id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
/JaqalPaq-1.2.0a1.tar.gz/JaqalPaq-1.2.0a1/src/jaqalpaq/core/locus.py | from .block import LoopStatement
from .branch import BranchStatement, CaseStatement
from .circuit import Circuit
class Locus:
"""A lightweight class capturing a spot inside of a circuit.
:param obj: What object in the circuit is being referred to.
:param Locus parent: The owner of the object referred to.
:param index: The index of the object referred to, among its siblings.
"""
__slots__ = ("_object", "_parent", "_index")
@classmethod
def from_address(klass, circuit, address):
"""Generate a Locus object from a list of indices
:param Circuit circuit: The circuit the address is looking into.
:param list address: A list of integers indexing the children at each step.
"""
obj = klass(circuit)
for term in address:
obj = obj[term]
return obj
def __init__(self, obj, *, parent=None, index=None):
self._object = obj
self._parent = parent
self._index = index
@property
def index(self):
"""The index of the object referred to, among its siblings."""
return self._index
@property
def parent(self):
"""The owner of the object referred to."""
return self._parent
@property
def object(self):
"""What object in the circuit is being referred to."""
return self._object
@property
def lineage(self):
"""An iterator yielding all Locus ancestors."""
if self._parent:
yield from self._parent.lineage
yield self
@property
def address(self):
"""A list of integers indexing the children at each step."""
return tuple(link._index for link in self.lineage if link._index is not None)
@property
def children(self):
"""All child objects owned by the Locus's object."""
obj = self._object
if isinstance(obj, Circuit):
return obj.body.statements
elif isinstance(obj, (LoopStatement, CaseStatement)):
return obj.statements.statements
elif isinstance(obj, BranchStatement):
return obj.cases
else:
assert isinstance(obj.statements, list)
return obj.statements
def __getitem__(self, index):
"""Returns a new Locus object referring to a child of a particular index."""
return type(self)(self.children[index], parent=self, index=index)
def __repr__(self):
return f"Locus<{self.address}>"
def __eq__(self, other):
if self is other:
return True
return (
(self._object == other._object)
and (self._parent == other._parent)
and (self._index == other._index)
) | PypiClean |
/MO_project-0.0.1.tar.gz/MO_project-0.0.1/README.md | # Project Name
This project is a part of the [Data Science Working Group](http://datascience.codeforsanfrancisco.org) at [Code for San Francisco](http://www.codeforsanfrancisco.org). Other DSWG projects can be found at the [main GitHub repo](https://github.com/sfbrigade/data-science-wg).
#### -- Project Status: [Active, On-Hold, Completed]
## Project Intro/Objective
The purpose of this project is ________. (Describe the main goals of the project and potential civic impact. Limit to a short paragraph, 3-6 Sentences)
### Partner
* [Name of Partner organization/Government department etc..]
* Website for partner
* Partner contact: [Name of Contact], [slack handle of contact if any]
* If you do not have a partner leave this section out
### Methods Used
* Inferential Statistics
* Machine Learning
* Data Visualization
* Predictive Modeling
* etc.
### Technologies
* R
* Python
* D3
* PostGres, MySql
* Pandas, jupyter
* HTML
* JavaScript
* etc.
## Project Description
(Provide more detailed overview of the project. Talk a bit about your data sources and what questions and hypothesis you are exploring. What specific data analysis/visualization and modelling work are you using to solve the problem? What blockers and challenges are you facing? Feel free to number or bullet point things here)
## Needs of this project
- frontend developers
- data exploration/descriptive statistics
- data processing/cleaning
- statistical modeling
- writeup/reporting
- etc. (be as specific as possible)
## Getting Started
1. Clone this repo (for help see this [tutorial](https://help.github.com/articles/cloning-a-repository/)).
2. Raw Data is being kept [here](Repo folder containing raw data) within this repo.
*If using offline data mention that and how they may obtain the data from the froup)*
3. Data processing/transformation scripts are being kept [here](Repo folder containing data processing scripts/notebooks)
4. etc...
*If your project is well underway and setup is fairly complicated (ie. requires installation of many packages) create another "setup.md" file and link to it here*
5. Follow setup [instructions](Link to file)
## Featured Notebooks/Analysis/Deliverables
* [Notebook/Markdown/Slide Deck Title](link)
* [Notebook/Markdown/Slide DeckTitle](link)
* [Blog Post](link)
## Contributing DSWG Members
**Team Leads (Contacts) : [Full Name](https://github.com/[github handle])(@slackHandle)**
#### Other Members:
|Name | Slack Handle |
|---------|-----------------|
|[Full Name](https://github.com/[github handle])| @johnDoe |
|[Full Name](https://github.com/[github handle]) | @janeDoe |
## Contact
* If you haven't joined the SF Brigade Slack, [you can do that here](http://c4sf.me/slack).
* Our slack channel is `#datasci-projectname`
* Feel free to contact team leads with any questions or if you are interested in contributing! | PypiClean |
/Kaylee-0.3.tar.gz/Kaylee-0.3/kaylee/project.py | from abc import ABCMeta, abstractmethod
#: Defines auto project mode (see :attr:`Project.mode`)
AUTO_PROJECT_MODE = 0x2
#: Defines manual project mode (see :attr:`Project.mode`)
MANUAL_PROJECT_MODE = 0x4
#: Defines the unknown amount of tasks to be solved by a Kaylee
#: application.
UNKNOWN_AMOUNT = 0x1
KL_PROJECT_MODE = '__kl_project_mode__'
KL_PROJECT_SCRIPT_URL = '__kl_project_script_url__'
KL_PROJECT_STYLES = '__kl_project_styles__'
class Project(object):
"""Kaylee Projects abstract base class.
:param script_url: The URL of the project's client part (\\*.js file).
:param mode: defines :attr:`Project.mode <kaylee.Project.mode>`.
"""
__metaclass__ = ABCMeta
def __init__(self, script_url, mode, **kwargs):
if mode not in [AUTO_PROJECT_MODE, MANUAL_PROJECT_MODE]:
raise ValueError('Wrong project mode: {}'.format(mode))
#: Indicates the mode in which project works on the client side.
#: Available modes:
#:
#: * :data:`AUTO_PROJECT_MODE <kaylee.project.AUTO_PROJECT_MODE>`
#: * :data:`MANUAL_PROJECT_MODE <kaylee.project.MANUAL_PROJECT_MODE>`
#:
#: For detailed description and usage see :ref:`projects_modes`.
self.mode = mode
#: A dictionary which contains the configuration passed to the
#: client-side of the project in :js:func:`pj.init`.
#: If the project is loaded from a :ref:`configuration object
#: <loading>` the base value of ``client_config`` is extended by
#: ``project.config`` configuration section.
#:
#: .. note:: Initially ``Project.client_config`` contains only the
#: data necessary to properly initialize the client-side
#: of the project.
self.client_config = {
KL_PROJECT_SCRIPT_URL: script_url,
KL_PROJECT_MODE: self.mode,
KL_PROJECT_STYLES: kwargs.get('styles', None),
}
#: Indicates whether the project was completed.
self.completed = False
@abstractmethod
def next_task(self):
"""Returns the next task. The returned ``None`` value indicates that
there will be no more new tasks from the project, but the bound
controller can still refer to the old tasks via ``project[task_id]``.
:returns: task :class:`dict` or ``None``.
"""
@abstractmethod
def __getitem__(self, task_id):
"""Returns a task with the required id. A task is simply
a :class:`dict` with at least an 'id' key in it::
{
'id' : '10',
'somedata' : somevalue,
# etc.
}
:rtype: :class:`dict`
"""
@abstractmethod
def normalize_result(self, task_id, result):
"""Validates and normalizes the result.
:param task_id: The ID of the task.
:param result: The result to be validated and normalized.
:throws ValueError: If the data is invalid.
:return: normalized result.
"""
def result_stored(self, task_id, data, storage):
"""A callback invoked by the bound controller when
a result is successfully stored to a permanent storage.
:param task_id: Task ID
:param data: Normalized task result
:param storage: The application's permanent results storage
:type storage: :class:`PermanentStorage`
"""
pass
# @abstractproperty
# def progress(self):
# """A tuple of 2 items indicating progress:
# ``(amount_of_tasks_completed, total_amount_of_tasks)``
# A negative (:data:`UNKNOWN_AMOUNT`) value of the second item in
# the tuple indicates that the project is not able to calculate the
# total amount of tasks.
# """
# pass | PypiClean |
/NumberTypes-0.1.tar.gz/NumberTypes-0.1/src/numbertypes.py | def even(number):
'''return True if number is even else false'''
if number % 2 == 0:
return (True)
else:
return (False)
def odd(number):
'''return True if number is Odd else false'''
if number % 2 == 1:
return (True)
else:
return (False)
def composite(x):
'''return True if number is a Composite number else false'''
if prime(x) == False:
return (True)
else:
return (False)
def prime(number):
'''return True if number is a Prime number else false'''
c = 0
for i in range(1, number + 1):
if number % i == 0:
c += 1
if c == 2:
return (True)
else:
return (False)
def twin_prime(number1, number2):
'''return True if two numbers are Twin Prime else false'''
if prime(number1) == True and prime(number2) == True and (number2 - number1 == 2 or number1 - number2 == 2):
return (True)
else:
return (False)
def perfect_square(number):
'''return True if number is a Perfect Square else false'''
y = number ** 0.5
if int(y) == y:
return (True)
else:
return (False)
def emirp(number):
'''return True if number is a Emirp number else false'''
v = 0
g = prime(number)
while number > 0:
d = number % 10
v = (v * 10) + d
number //= 10
if prime(v) == True and g == True:
return (True)
else:
return (False)
def armstrong(number):
'''return True if number is a Armstrong number else false'''
v = 0
y = number
while number > 0:
d = number % 10
v += (d ** 3)
number //= 10
if y == v:
return (True)
else:
return (False)
def factorion(number):
'''return True if number is a Factorian else false'''
v = 0
y = number
while number > 0:
f = 1
d = number % 10
for i in range(1, d + 1):
f *= i
v += f
number //= 10
if v == y:
return (True)
else:
return (False)
def palindrome(number):
'''return True if number is a Palindrome number else false'''
c = str(number)
g = c[::-1]
if g == c:
return (True)
else:
return (False)
def narcissistic(number):
'''return True if number is a Narcissistic number else false'''
c = 0
y = number
m = len(str(number))
while number > 0:
d = number % 10
c += d ** m
number //= 10
if y == c:
return (True)
else:
return (False)
def neon(number):
'''return True if number is a Neon number else false'''
f = number ** 2
g = 0
while f > 0:
d = f % 10
g += d
f //= 10
if g == number:
return (True)
else:
return (False)
def spy(number):
'''return True if number is a Spy number else false'''
c = 0
p = 1
v = number
while number > 0:
d = number % 10
c += d
number //= 10
while v > 0:
j = v % 10
p *= j
v //= 10
if c == p:
return (True)
else:
return (False)
def buzz(number):
'''return True if number is a Buzz number else false'''
if number % 7 == 0 or number % 10 == 7:
return (True)
else:
return (False)
def automorphic(number):
'''return True if number is an Automorphic number else false'''
d = len(str(number))
s = str(number ** 2)
v = s[len(s) - d:]
if str(number) == v:
return (True)
else:
return (False)
# Range based type counter
def even_in_range(starting, ending=0, shownumbers=False):
'''returns number of Evens in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if even(i) == True:
c += 1
if shownumbers == True: # True value
print(i)
return c
def odd_in_range(starting, ending=0, shownumbers=False):
'''returns number of Odds in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if odd(i) == True:
c += 1
if shownumbers == True: # True value
print(i)
return c
def prime_in_range(starting, ending=0, shownumbers=False):
'''returns number of Primes in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if prime(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def twin_prime_in_range(starting, ending=0, shownumbers=False):
'''returns number of Twin Primes in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if i < ending and i + 2 <= ending:
if twin_prime(i, i + 2) == True:
c += 1
if shownumbers == True:
print(i, i + 2)
return c
def palindrome_in_range(starting, ending=0, shownumbers=False):
'''returns number of Palindromes in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if palindrome(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def factorion_in_range(starting, ending=0, shownumbers=False):
'''returns number of Factorians in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if factorion(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def composite_in_range(starting, ending=0, shownumbers=False):
'''returns number of Composites in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if prime(i) != True:
c += 1
if shownumbers == True:
print(i)
return c
def armstrong_in_range(starting, ending=0, shownumbers=False):
'''returns number of Armstrongs in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if armstrong(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def narcissistic_in_range(starting, ending=0, shownumbers=False):
'''returns number of Narcissistic Numbers in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if narcissistic(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def neon_in_range(starting, ending=0, shownumbers=False):
'''returns number of Neons in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if neon(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def spy_in_range(starting, ending=0, shownumbers=False):
'''returns number of Spy Numbers in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if spy(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def emirp_in_range(starting, ending=0, shownumbers=False):
'''returns number of Emirps in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if emirp(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def perfect_square_in_range(starting, ending=0, shownumbers=False):
'''returns number of Perfect Squares in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if perfect_square(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def buzz_in_range(starting, ending=0, shownumbers=False):
'''returns number of Buzz numbers in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if buzz(i) == True:
c += 1
if shownumbers == True:
print(i)
return c
def automorphic_in_range(starting, ending=0, shownumbers=False):
'''returns number of Automorphics in a given range
when shownumbers set to True, prints all numbers also'''
c = 0
for i in range(starting, ending + 1):
if automorphic(i) == True:
c += 1
if shownumbers == True:
print(i)
return c | PypiClean |
/DeepSpectrumLite-1.0.2.tar.gz/DeepSpectrumLite-1.0.2/src/deepspectrumlite/lib/model/modules/attention_module.py | import tensorflow as tf
from tensorflow import keras
# code taken from https://github.com/kobiso/CBAM-tensorflow
# Author: Byung Soo Ko
def se_block(residual, name, ratio=8):
"""Contains the implementation of Squeeze-and-Excitation(SE) block.
As described in https://arxiv.org/abs/1709.01507.
"""
kernel_initializer = tf.keras.initializers.VarianceScaling()
bias_initializer = tf.constant_initializer(value=0.0)
with tf.name_scope(name):
channel = residual.get_shape()[-1]
# Global average pooling
squeeze = tf.reduce_mean(residual, axis=[1, 2], keepdims=True)
assert squeeze.get_shape()[1:] == (1, 1, channel)
excitation = keras.layers.Dense(
units=channel // ratio,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='bottleneck_fc')(squeeze)
assert excitation.get_shape()[1:] == (1, 1, channel // ratio)
excitation = keras.layers.Dense(
units=channel,
activation=tf.nn.sigmoid,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='recover_fc')(excitation)
assert excitation.get_shape()[1:] == (1, 1, channel)
# top = tf.multiply(bottom, se, name='scale')
scale = residual * excitation
return scale
def cbam_block(input_feature, name, ratio=8):
"""Contains the implementation of Convolutional Block Attention Module(CBAM) block.
As described in https://arxiv.org/abs/1807.06521.
"""
with tf.name_scope(name):
attention_feature = channel_attention(input_feature, 'ch_at', ratio)
attention_feature = spatial_attention(attention_feature, 'sp_at')
return attention_feature
def channel_attention(input_feature, name, ratio=8):
kernel_initializer = tf.keras.initializers.VarianceScaling()
bias_initializer = tf.constant_initializer(value=0.0)
with tf.name_scope(name):
channel = input_feature.get_shape()[-1]
avg_pool = tf.reduce_mean(input_feature, axis=[1, 2], keepdims=True)
assert avg_pool.get_shape()[1:] == (1, 1, channel)
avg_pool = keras.layers.Dense(
units=channel // ratio,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)(avg_pool)
assert avg_pool.get_shape()[1:] == (1, 1, channel // ratio)
avg_pool = keras.layers.Dense(
units=channel,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)(avg_pool)
assert avg_pool.get_shape()[1:] == (1, 1, channel)
max_pool = tf.reduce_max(input_feature, axis=[1, 2], keepdims=True)
assert max_pool.get_shape()[1:] == (1, 1, channel)
max_pool = keras.layers.Dense(
units=channel // ratio,
activation=tf.nn.relu)(max_pool)
assert max_pool.get_shape()[1:] == (1, 1, channel // ratio)
max_pool = keras.layers.Dense(
units=channel)(max_pool)
assert max_pool.get_shape()[1:] == (1, 1, channel)
scale = tf.sigmoid(avg_pool + max_pool, 'sigmoid')
return input_feature * scale
def spatial_attention(input_feature, name):
kernel_size = 7
kernel_initializer = tf.keras.initializers.VarianceScaling()
with tf.name_scope(name):
avg_pool = tf.reduce_mean(input_feature, axis=[3], keepdims=True)
assert avg_pool.get_shape()[-1] == 1
max_pool = tf.reduce_max(input_feature, axis=[3], keepdims=True)
assert max_pool.get_shape()[-1] == 1
concat = tf.concat([avg_pool, max_pool], 3)
assert concat.get_shape()[-1] == 2
concat = keras.layers.Conv2D(
filters=1,
kernel_size=[kernel_size, kernel_size],
strides=[1, 1],
padding="same",
activation=None,
kernel_initializer=kernel_initializer,
use_bias=False)(concat)
assert concat.get_shape()[-1] == 1
concat = tf.sigmoid(concat, 'sigmoid')
return input_feature * concat | PypiClean |
/CryptoParser-0.10.0.tar.gz/CryptoParser-0.10.0/cryptoparser/ssh/record.py |
import abc
import attr
from cryptoparser.common.parse import ParsableBase, ParserBinary, ComposerBinary
from cryptoparser.common.exception import NotEnoughData
from cryptoparser.ssh.subprotocol import (
SshMessageBase,
SshMessageVariantInit,
SshMessageVariantKexDH,
SshMessageVariantKexDHGroup,
)
@attr.s
class SshRecordBase(ParsableBase):
HEADER_SIZE = 6
packet = attr.ib(validator=attr.validators.instance_of(SshMessageBase))
@classmethod
@abc.abstractmethod
def _get_variant_class(cls):
raise NotImplementedError()
@classmethod
def _parse(cls, parsable):
parser = ParserBinary(parsable)
parser.parse_numeric('packet_length', 4)
if parser['packet_length'] > parser.unparsed_length:
raise NotEnoughData(parser['packet_length'] - parser.unparsed_length)
parser.parse_numeric('padding_length', 1)
parser.parse_parsable('packet', cls._get_variant_class())
parser.parse_raw('padding', parser['padding_length'])
return cls(packet=parser['packet']), parser.parsed_length
def compose(self):
body_composer = ComposerBinary()
body_composer.compose_parsable(self.packet)
payload_length = body_composer.composed_length
padding_length = 8 - ((payload_length + 5) % 8)
if padding_length < 4:
padding_length += 8
packet_length = payload_length + padding_length + 1
for _ in range(padding_length):
body_composer.compose_numeric(0, 1)
header_composer = ComposerBinary()
header_composer.compose_numeric(packet_length, 4)
header_composer.compose_numeric(padding_length, 1)
return header_composer.composed + body_composer.composed
class SshRecordInit(SshRecordBase):
@classmethod
def _get_variant_class(cls):
return SshMessageVariantInit
class SshRecordKexDH(SshRecordBase):
@classmethod
def _get_variant_class(cls):
return SshMessageVariantKexDH
class SshRecordKexDHGroup(SshRecordBase):
@classmethod
def _get_variant_class(cls):
return SshMessageVariantKexDHGroup | PypiClean |
/NBTParse-0.7.1.tar.gz/NBTParse-0.7.1/nbtparse/minecraft/terrain/chunk.py | from collections import abc as cabc
import functools
import logging
from ...syntax import tags
from ...syntax import ids
from ...semantics import fields
from ...semantics import nbtobject
from .. import entity
from .. import entity_ids
from .. import entityfactory
from . import tile
from . import voxel
logger = logging.getLogger(__name__)
SECTION_LENGTH = 16
SECTION_HEIGHT = 16
SECTION_WIDTH = 16
SECTIONS_PER_CHUNK = 16
class HeightMap(cabc.MutableMapping):
"""The height map of a chunk.
Maps coordinates to heights::
hm[3, 4] = 5
Keys may not be inserted or deleted, and must be pairs of integers.
Intlist should be the raw list of integers as saved by Minecraft.
"""
def __init__(self, intlist: [int]):
required_len = SECTION_LENGTH * SECTION_WIDTH
if len(intlist) != required_len:
raise ValueError('Must have exactly {} entries'
.format(required_len))
self._intlist = list(intlist)
def __repr__(self):
return 'HeightMap({!r})'.format(self._intlist)
def _fix_idx(self, idx: (int, int)) -> (int, int):
x_idx, z_idx = idx
if x_idx not in range(SECTION_LENGTH):
raise IndexError('X index out of range')
if z_idx not in range(SECTION_WIDTH):
raise IndexError('Z index out of range')
return idx
def __getitem__(self, idx: (int, int)) -> int:
x_idx, z_idx = self._fix_idx(idx)
return self._intlist[z_idx * SECTION_LENGTH + x_idx]
def __setitem__(self, idx: (int, int), value: int):
x_idx, z_idx = self._fix_idx(idx)
self._intlist[z_idx * SECTION_LENGTH + x_idx] = value
def __delitem__(self, idx: (int, int)):
raise TypeError('Cannot delete items.')
def __iter__(self):
yield from self._intlist
def __len__(self):
return len(self._intlist)
def to_raw(self) -> [int]:
"""Returns the raw list used by Minecraft."""
return list(self._intlist)
class HeightMapField(fields.MutableField, fields.SingleField):
"""Field for :class:`HeightMap`."""
def __init__(self, nbt_name: str, *, default: HeightMap=None):
super().__init__(nbt_name, default=default)
def __repr__(self):
return 'HeightMapField({!r}, default={!r})'.format(self.nbt_name,
self.default)
@staticmethod
def to_python(tag: tags.IntArrayTag) -> HeightMap:
return HeightMap(tag)
@staticmethod
def from_python(value: HeightMap) -> tags.IntArrayTag:
return tags.IntArrayTag(value.to_raw())
class _BlockField(fields.MutableField, fields.MultiField):
"""Exposes the blocks in a section.
If default is :obj:`None`, a new empty buffer will be created as the
default.
"""
def __init__(self, id_name: str, addid_name: str, damage_name: str, *,
length: int, height: int, width: int):
if (length * width * height) % 2 == 1:
raise ValueError('Cannot create _BlockField with odd length.')
self.length = length
self.height = height
self.width = width
nbt_names = (id_name, addid_name, damage_name)
super().__init__(nbt_names)
def __repr__(self) -> str:
id_name, addid_name, damage_name = self.nbt_names
return ('_BlockField({!r}, {!r}, {!r}, length={!r}, height={!r}, '
'width={!r}, default={!r})'.format(id_name, addid_name,
damage_name, self.length,
self.height, self.width,
self.default))
def set_default(self, obj):
"""Set this field to its default.
Always creates a new VoxelBuffer; the default argument is not
supported.
"""
default = voxel.VoxelBuffer(self.length, self.height, self.width)
self.__set__(obj, default)
def to_python(self, ids: tags.ByteArrayTag, addids: tags.ByteArrayTag,
damages: tags.ByteArrayTag) -> voxel.VoxelBuffer:
length = self.length
height = self.height
width = self.width
if addids is None:
addids = bytearray((length * width * height) // 2)
return voxel.VoxelBuffer.from_raw(ids, addids, damages,
length=length, height=height,
width=width)
@staticmethod
def from_python(vb: voxel.VoxelBuffer):
result = vb.to_raw()
return tuple(tags.ByteArrayTag(x) for x in result)
class Section(nbtobject.NBTObject):
y_index = fields.ByteField('Y')
y_index.__doc__ = """The Y-index of this section.
From 0 to 15 inclusive.
"""
blocks = _BlockField('Blocks', 'Add', 'Data', length=SECTION_LENGTH,
width=SECTION_WIDTH, height=SECTION_HEIGHT)
blocks.__doc__ = """:class:`~.voxel.VoxelBuffer` of this section."""
def __repr__(self):
return '<Section: y_index={!r}>'.format(self.y_index)
class _MagicDict(dict):
def __missing__(self, key):
if key not in range(SECTIONS_PER_CHUNK):
raise KeyError(key)
result = Section()
result.y_index = key
self[key] = result
return result
def __repr__(self):
return '_MagicDict({})'.format(super().__repr__())
class SectionDictField(fields.MutableField, fields.SingleField):
"""Field for the sections of a chunk.
Keys are y_index, values are sections.
"""
def __init__(self, nbt_name: str, *, default: dict=None):
super().__init__(nbt_name, default=default)
def __repr__(self):
return 'SectionDictField({!r}, default={!r})'.format(self.nbt_name,
self.default)
@staticmethod
def to_python(sec_list: tags.ListTag) -> dict:
result = _MagicDict()
for raw_sec in sec_list:
cooked_sec = Section.from_nbt(raw_sec)
result[cooked_sec.y_index] = cooked_sec
return result
@staticmethod
def from_python(sec_dict: dict) -> tags.ListTag:
result = tags.ListTag(content_id=ids.TAG_Compound)
for cooked_sec_y, cooked_sec in sec_dict.items():
cooked_sec.y_index = cooked_sec_y
raw_sec = cooked_sec.to_nbt()
result.append(raw_sec)
return result
def set_default(self, obj: nbtobject.NBTObject):
if self.default is None:
self.__set__(obj, _MagicDict())
else:
super().set_default(obj)
class _EntityListField(fields.MutableField, fields.SingleField):
def __init__(self, nbt_name, *, default_class=entity.Entity,
default_value: list=None):
super().__init__(nbt_name, default=default_value)
self._default_class = default_class
def to_python_ex(self, raw_list: tags.ListTag, cnk: 'Chunk') -> list:
result = []
for raw_entity in raw_list:
cooked = entityfactory.from_nbt(raw_entity,
default_class=self._default_class,
namespace=cnk._namespace)
result.append(cooked)
return result
def from_python_ex(self, python_list: list, cnk: 'Chunk'):
result = tags.ListTag(content_id=ids.TAG_Compound)
for cooked in python_list:
raw_entity = cooked.to_nbt()
result.append(raw_entity)
return result
class Chunk(nbtobject.NBTObject):
sections = SectionDictField('Sections')
sections.__doc__ = (
"""Mutable mapping from Y-indices to :class:`Section`\ s.
Y-indices which are not present in the underlying NBT will be
automagically created as empty sections upon attempting to retrieve
them.
The key in this mapping will override the :attr:`~Section.y_index`
attribute if they disagree.
.. note::
It is acceptable to replace this mapping with an entirely
different mapping. If you do so, the magic creation of missing
sections will very likely not work. If you prefer creating
sections explicitly, code like the following will disable the
magic::
c = Chunk.from_nbt(...)
c.sections = dict(c.sections)
""")
tiles = _EntityListField('TileEntities',
default_class=tile.TileEntity)
tiles.__doc__ = """List of :class:`~.tile.TileEntity` objects.
.. note::
This attribute is generally managed by the :class:`~.region.Region`
which created this chunk. Manually changing it is usually
unnecessary.
"""
entities = _EntityListField('Entities')
def __init__(self, *args, namespace=entity_ids.VANILLA, **kwargs):
super().__init__(*args, **kwargs)
self._namespace = namespace
@classmethod
def from_bytes(cls, raw: bytes, namespace=entity_ids.VANILLA):
result = super().from_bytes(raw)
result._namespace = namespace
return result
height_map = HeightMapField('HeightMap')
height_map.__doc__ = """The height map for this chunk.
.. note::
It is planned that a lighting engine will manage this attribute
automatically. This is not yet implemented.
"""
def __repr__(self):
return '<Chunk at 0x{:x}>'.format(id(self))
@staticmethod
def prepare_save(nbt: tags.CompoundTag) -> tags.CompoundTag:
"""Wrap nbt in a singleton CompoundTag."""
return tags.CompoundTag({'Level': nbt})
@staticmethod
def prepare_load(nbt: tags.CompoundTag) -> tags.CompoundTag:
"""Unwrap nbt from a singleton CompoundTag."""
return nbt['Level'] | PypiClean |
/EQcorrscan-0.4.4.tar.gz/EQcorrscan-0.4.4/eqcorrscan/doc/build/html/_static/js/jquery-1.12.4.min.js | !function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="1.12.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&&n.isArray(a)?a:[]):f=a&&n.isPlainObject(a)?a:{},g[d]=n.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray||function(a){return"array"===n.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(!l.ownFirst)for(b in a)return k.call(a,b);for(b in a);return void 0===b||k.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(b){b&&n.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(h)return h.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=e.call(arguments,2),d=function(){return a.apply(b||this,c.concat(e.call(arguments)))},d.guid=a.guid=a.guid||n.guid++,d):void 0},now:function(){return+new Date},support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=la(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=ma(b);function pa(){}pa.prototype=d.filters=d.pseudos,d.setFilters=new pa,g=fa.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=R.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=S.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(Q," ")}),h=h.slice(c.length));for(g in d.filter)!(e=W[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fa.error(a):z(a,i).slice(0)};function qa(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return n.inArray(a,b)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;e>b;b++)if(n.contains(d[b],this))return!0}));for(b=0;e>b;b++)n.find(a,d[b],c);return c=this.pushStack(e>1?n.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}if(f=d.getElementById(e[2]),f&&f.parentNode){if(f.id!==e[2])return A.find(a);this.length=1,this[0]=f}return this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b,c=n(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(n.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?n.inArray(this[0],n(a)):n.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return n.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||(e=n.uniqueSort(e)),D.test(a)&&(e=e.reverse())),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:"")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){n.each(b,function(b,c){n.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&"string"!==n.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return n.each(arguments,function(a,b){var c;while((c=n.inArray(b,f,c))>-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=!0,c||j.disable(),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.addEventListener?(d.removeEventListener("DOMContentLoaded",K),a.removeEventListener("load",K)):(d.detachEvent("onreadystatechange",K),a.detachEvent("onload",K))}function K(){(d.addEventListener||"load"===a.event.type||"complete"===d.readyState)&&(J(),n.ready())}n.ready.promise=function(b){if(!I)if(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll)a.setTimeout(n.ready);else if(d.addEventListener)d.addEventListener("DOMContentLoaded",K),a.addEventListener("load",K);else{d.attachEvent("onreadystatechange",K),a.attachEvent("onload",K);var c=!1;try{c=null==a.frameElement&&d.documentElement}catch(e){}c&&c.doScroll&&!function f(){if(!n.isReady){try{c.doScroll("left")}catch(b){return a.setTimeout(f,50)}J(),n.ready()}}()}return I.promise(b)},n.ready.promise();var L;for(L in n(l))break;l.ownFirst="0"===L,l.inlineBlockNeedsLayout=!1,n(function(){var a,b,c,e;c=d.getElementsByTagName("body")[0],c&&c.style&&(b=d.createElement("div"),e=d.createElement("div"),e.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(e).appendChild(b),"undefined"!=typeof b.style.zoom&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",l.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(e))}),function(){var a=d.createElement("div");l.deleteExpando=!0;try{delete a.test}catch(b){l.deleteExpando=!1}a=null}();var M=function(a){var b=n.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b},N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(O,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}n.data(a,b,c)}else c=void 0;
}return c}function Q(a){var b;for(b in a)if(("data"!==b||!n.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function R(a,b,d,e){if(M(a)){var f,g,h=n.expando,i=a.nodeType,j=i?n.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||n.guid++:h),j[k]||(j[k]=i?{}:{toJSON:n.noop}),"object"!=typeof b&&"function"!=typeof b||(e?j[k]=n.extend(j[k],b):j[k].data=n.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[n.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[n.camelCase(b)])):f=g,f}}function S(a,b,c){if(M(a)){var d,e,f=a.nodeType,g=f?n.cache:a,h=f?a[n.expando]:n.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){n.isArray(b)?b=b.concat(n.map(b,n.camelCase)):b in d?b=[b]:(b=n.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!Q(d):!n.isEmptyObject(d))return}(c||(delete g[h].data,Q(g[h])))&&(f?n.cleanData([a],!0):l.deleteExpando||g!=g.window?delete g[h]:g[h]=void 0)}}}n.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?n.cache[a[n.expando]]:a[n.expando],!!a&&!Q(a)},data:function(a,b,c){return R(a,b,c)},removeData:function(a,b){return S(a,b)},_data:function(a,b,c){return R(a,b,c,!0)},_removeData:function(a,b){return S(a,b,!0)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=n.data(f),1===f.nodeType&&!n._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));n._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){n.data(this,a)}):arguments.length>1?this.each(function(){n.data(this,a,b)}):f?P(f,a,n.data(f,a)):void 0},removeData:function(a){return this.each(function(){n.removeData(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=n._data(a,b),c&&(!d||n.isArray(c)?d=n._data(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return n._data(a,c)||n._data(a,c,{empty:n.Callbacks("once memory").add(function(){n._removeData(a,b+"queue"),n._removeData(a,c)})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=n._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}}),function(){var a;l.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,e;return c=d.getElementsByTagName("body")[0],c&&c.style?(b=d.createElement("div"),e=d.createElement("div"),e.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(e).appendChild(b),"undefined"!=typeof b.style.zoom&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(d.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(e),a):void 0}}();var T=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,U=new RegExp("^(?:([+-])=|)("+T+")([a-z%]*)$","i"),V=["Top","Right","Bottom","Left"],W=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)};function X(a,b,c,d){var e,f=1,g=20,h=d?function(){return d.cur()}:function(){return n.css(a,b,"")},i=h(),j=c&&c[3]||(n.cssNumber[b]?"":"px"),k=(n.cssNumber[b]||"px"!==j&&+i)&&U.exec(n.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||".5",k/=f,n.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var Y=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)Y(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},Z=/^(?:checkbox|radio)$/i,$=/<([\w:-]+)/,_=/^$|\/(?:java|ecma)script/i,aa=/^\s+/,ba="abbr|article|aside|audio|bdi|canvas|data|datalist|details|dialog|figcaption|figure|footer|header|hgroup|main|mark|meter|nav|output|picture|progress|section|summary|template|time|video";function ca(a){var b=ba.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}!function(){var a=d.createElement("div"),b=d.createDocumentFragment(),c=d.createElement("input");a.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",l.leadingWhitespace=3===a.firstChild.nodeType,l.tbody=!a.getElementsByTagName("tbody").length,l.htmlSerialize=!!a.getElementsByTagName("link").length,l.html5Clone="<:nav></:nav>"!==d.createElement("nav").cloneNode(!0).outerHTML,c.type="checkbox",c.checked=!0,b.appendChild(c),l.appendChecked=c.checked,a.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!a.cloneNode(!0).lastChild.defaultValue,b.appendChild(a),c=d.createElement("input"),c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),a.appendChild(c),l.checkClone=a.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!!a.addEventListener,a[n.expando]=1,l.attributes=!a.getAttribute(n.expando)}();var da={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:l.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]};da.optgroup=da.option,da.tbody=da.tfoot=da.colgroup=da.caption=da.thead,da.th=da.td;function ea(a,b){var c,d,e=0,f="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,ea(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function fa(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}var ga=/<|&#?\w+;/,ha=/<tbody/i;function ia(a){Z.test(a.type)&&(a.defaultChecked=a.checked)}function ja(a,b,c,d,e){for(var f,g,h,i,j,k,m,o=a.length,p=ca(b),q=[],r=0;o>r;r++)if(g=a[r],g||0===g)if("object"===n.type(g))n.merge(q,g.nodeType?[g]:g);else if(ga.test(g)){i=i||p.appendChild(b.createElement("div")),j=($.exec(g)||["",""])[1].toLowerCase(),m=da[j]||da._default,i.innerHTML=m[1]+n.htmlPrefilter(g)+m[2],f=m[0];while(f--)i=i.lastChild;if(!l.leadingWhitespace&&aa.test(g)&&q.push(b.createTextNode(aa.exec(g)[0])),!l.tbody){g="table"!==j||ha.test(g)?"<table>"!==m[1]||ha.test(g)?0:i:i.firstChild,f=g&&g.childNodes.length;while(f--)n.nodeName(k=g.childNodes[f],"tbody")&&!k.childNodes.length&&g.removeChild(k)}n.merge(q,i.childNodes),i.textContent="";while(i.firstChild)i.removeChild(i.firstChild);i=p.lastChild}else q.push(b.createTextNode(g));i&&p.removeChild(i),l.appendChecked||n.grep(ea(q,"input"),ia),r=0;while(g=q[r++])if(d&&n.inArray(g,d)>-1)e&&e.push(g);else if(h=n.contains(g.ownerDocument,g),i=ea(p.appendChild(g),"script"),h&&fa(i),c){f=0;while(g=i[f++])_.test(g.type||"")&&c.push(g)}return i=null,p}!function(){var b,c,e=d.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b]=c in a)||(e.setAttribute(c,"t"),l[b]=e.attributes[c].expando===!1);e=null}();var ka=/^(?:input|select|textarea)$/i,la=/^key/,ma=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,na=/^(?:focusinfocus|focusoutblur)$/,oa=/^([^.]*)(?:\.(.+)|)/;function pa(){return!0}function qa(){return!1}function ra(){try{return d.activeElement}catch(a){}}function sa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)sa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=qa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return"undefined"==typeof n||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(G)||[""],h=b.length;while(h--)f=oa.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=oa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(i=m=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!na.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),h=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),l=n.event.special[q]||{},f||!l.trigger||l.trigger.apply(e,c)!==!1)){if(!f&&!l.noBubble&&!n.isWindow(e)){for(j=l.delegateType||q,na.test(j+q)||(i=i.parentNode);i;i=i.parentNode)p.push(i),m=i;m===(e.ownerDocument||d)&&p.push(m.defaultView||m.parentWindow||a)}o=0;while((i=p[o++])&&!b.isPropagationStopped())b.type=o>1?j:l.bindType||q,g=(n._data(i,"events")||{})[b.type]&&n._data(i,"handle"),g&&g.apply(i,c),g=h&&i[h],g&&g.apply&&M(i)&&(b.result=g.apply(i,c),b.result===!1&&b.preventDefault());if(b.type=q,!f&&!b.isDefaultPrevented()&&(!l._default||l._default.apply(p.pop(),c)===!1)&&M(e)&&h&&e[q]&&!n.isWindow(e)){m=e[h],m&&(e[h]=null),n.event.triggered=q;try{e[q]()}catch(s){}n.event.triggered=void 0,m&&(e[h]=m)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[n.expando])return a;var b,c,e,f=a.type,g=a,h=this.fixHooks[f];h||(this.fixHooks[f]=h=ma.test(f)?this.mouseHooks:la.test(f)?this.keyHooks:{}),e=h.props?this.props.concat(h.props):this.props,a=new n.Event(g),b=e.length;while(b--)c=e[b],a[c]=g[c];return a.target||(a.target=g.srcElement||d),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,h.filter?h.filter(a,g):a},props:"altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,e,f,g=b.button,h=b.fromElement;return null==a.pageX&&null!=b.clientX&&(e=a.target.ownerDocument||d,f=e.documentElement,c=e.body,a.pageX=b.clientX+(f&&f.scrollLeft||c&&c.scrollLeft||0)-(f&&f.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(f&&f.scrollTop||c&&c.scrollTop||0)-(f&&f.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&h&&(a.relatedTarget=h===a.target?b.toElement:h),a.which||void 0===g||(a.which=1&g?1:2&g?3:4&g?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==ra()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===ra()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return n.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c){var d=n.extend(new n.Event,c,{type:a,isSimulated:!0});n.event.trigger(d,null,b),d.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=d.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)}:function(a,b,c){var d="on"+b;a.detachEvent&&("undefined"==typeof a[d]&&(a[d]=null),a.detachEvent(d,c))},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?pa:qa):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={constructor:n.Event,isDefaultPrevented:qa,isPropagationStopped:qa,isImmediatePropagationStopped:qa,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=pa,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=pa,a&&!this.isSimulated&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=pa,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return e&&(e===d||n.contains(d,e))||(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),l.submit||(n.event.special.submit={setup:function(){return n.nodeName(this,"form")?!1:void n.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=n.nodeName(b,"input")||n.nodeName(b,"button")?n.prop(b,"form"):void 0;c&&!n._data(c,"submit")&&(n.event.add(c,"submit._submit",function(a){a._submitBubble=!0}),n._data(c,"submit",!0))})},postDispatch:function(a){a._submitBubble&&(delete a._submitBubble,this.parentNode&&!a.isTrigger&&n.event.simulate("submit",this.parentNode,a))},teardown:function(){return n.nodeName(this,"form")?!1:void n.event.remove(this,"._submit")}}),l.change||(n.event.special.change={setup:function(){return ka.test(this.nodeName)?("checkbox"!==this.type&&"radio"!==this.type||(n.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._justChanged=!0)}),n.event.add(this,"click._change",function(a){this._justChanged&&!a.isTrigger&&(this._justChanged=!1),n.event.simulate("change",this,a)})),!1):void n.event.add(this,"beforeactivate._change",function(a){var b=a.target;ka.test(b.nodeName)&&!n._data(b,"change")&&(n.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||n.event.simulate("change",this.parentNode,a)}),n._data(b,"change",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return n.event.remove(this,"._change"),!ka.test(this.nodeName)}}),l.focusin||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a))};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=n._data(d,b);e||d.addEventListener(a,c,!0),n._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=n._data(d,b)-1;e?n._data(d,b,e):(d.removeEventListener(a,c,!0),n._removeData(d,b))}}}),n.fn.extend({on:function(a,b,c,d){return sa(this,a,b,c,d)},one:function(a,b,c,d){return sa(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return b!==!1&&"function"!=typeof b||(c=b,b=void 0),c===!1&&(c=qa),this.each(function(){n.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var ta=/ jQuery\d+="(?:null|\d+)"/g,ua=new RegExp("<(?:"+ba+")[\\s/>]","i"),va=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,wa=/<script|<style|<link/i,xa=/checked\s*(?:[^=]|=\s*.checked.)/i,ya=/^true\/(.*)/,za=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,Aa=ca(d),Ba=Aa.appendChild(d.createElement("div"));function Ca(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function Da(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function Ea(a){var b=ya.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Ga(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(Da(b).text=a.text,Ea(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&Z.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}}function Ha(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&xa.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(o&&(k=ja(b,a[0].ownerDocument,!1,a,d),e=k.firstChild,1===k.childNodes.length&&(k=e),e||d)){for(i=n.map(ea(k,"script"),Da),h=i.length;o>m;m++)g=k,m!==p&&(g=n.clone(g,!0,!0),h&&n.merge(i,ea(g,"script"))),c.call(a[m],g,m);if(h)for(j=i[i.length-1].ownerDocument,n.map(i,Ea),m=0;h>m;m++)g=i[m],_.test(g.type||"")&&!n._data(g,"globalEval")&&n.contains(j,g)&&(g.src?n._evalUrl&&n._evalUrl(g.src):n.globalEval((g.text||g.textContent||g.innerHTML||"").replace(za,"")));k=e=null}return a}function Ia(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(ea(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&fa(ea(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(va,"<$1></$2>")},clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!ua.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(Ba.innerHTML=a.outerHTML,Ba.removeChild(f=Ba.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=ea(f),h=ea(a),g=0;null!=(e=h[g]);++g)d[g]&&Ga(e,d[g]);if(b)if(c)for(h=h||ea(a),d=d||ea(f),g=0;null!=(e=h[g]);g++)Fa(e,d[g]);else Fa(a,f);return d=ea(f,"script"),d.length>0&&fa(d,!i&&ea(a,"script")),d=h=e=null,f},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.attributes,m=n.event.special;null!=(d=a[h]);h++)if((b||M(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k||"undefined"==typeof d.removeAttribute?d[i]=void 0:d.removeAttribute(i),c.push(f))}}}),n.fn.extend({domManip:Ha,detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return Y(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||d).createTextNode(a))},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(ea(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return Y(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(ta,""):void 0;if("string"==typeof a&&!wa.test(a)&&(l.htmlSerialize||!ua.test(a))&&(l.leadingWhitespace||!aa.test(a))&&!da[($.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ea(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ha(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(ea(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],f=n(a),h=f.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(f[d])[b](c),g.apply(e,c.get());return this.pushStack(e)}});var Ja,Ka={HTML:"block",BODY:"block"};function La(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function Ma(a){var b=d,c=Ka[a];return c||(c=La(a,b),"none"!==c&&c||(Ja=(Ja||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Ja[0].contentWindow||Ja[0].contentDocument).document,b.write(),b.close(),c=La(a,b),Ja.detach()),Ka[a]=c),c}var Na=/^margin/,Oa=new RegExp("^("+T+")(?!px)[a-z%]+$","i"),Pa=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e},Qa=d.documentElement;!function(){var b,c,e,f,g,h,i=d.createElement("div"),j=d.createElement("div");if(j.style){j.style.cssText="float:left;opacity:.5",l.opacity="0.5"===j.style.opacity,l.cssFloat=!!j.style.cssFloat,j.style.backgroundClip="content-box",j.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===j.style.backgroundClip,i=d.createElement("div"),i.style.cssText="border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute",j.innerHTML="",i.appendChild(j),l.boxSizing=""===j.style.boxSizing||""===j.style.MozBoxSizing||""===j.style.WebkitBoxSizing,n.extend(l,{reliableHiddenOffsets:function(){return null==b&&k(),f},boxSizingReliable:function(){return null==b&&k(),e},pixelMarginRight:function(){return null==b&&k(),c},pixelPosition:function(){return null==b&&k(),b},reliableMarginRight:function(){return null==b&&k(),g},reliableMarginLeft:function(){return null==b&&k(),h}});function k(){var k,l,m=d.documentElement;m.appendChild(i),j.style.cssText="-webkit-box-sizing:border-box;box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%",b=e=h=!1,c=g=!0,a.getComputedStyle&&(l=a.getComputedStyle(j),b="1%"!==(l||{}).top,h="2px"===(l||{}).marginLeft,e="4px"===(l||{width:"4px"}).width,j.style.marginRight="50%",c="4px"===(l||{marginRight:"4px"}).marginRight,k=j.appendChild(d.createElement("div")),k.style.cssText=j.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",k.style.marginRight=k.style.width="0",j.style.width="1px",g=!parseFloat((a.getComputedStyle(k)||{}).marginRight),j.removeChild(k)),j.style.display="none",f=0===j.getClientRects().length,f&&(j.style.display="",j.innerHTML="<table><tr><td></td><td>t</td></tr></table>",j.childNodes[0].style.borderCollapse="separate",k=j.getElementsByTagName("td"),k[0].style.cssText="margin:0;border:0;padding:0;display:none",f=0===k[0].offsetHeight,f&&(k[0].style.display="",k[1].style.display="none",f=0===k[0].offsetHeight)),m.removeChild(i)}}}();var Ra,Sa,Ta=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ra=function(b){var c=b.ownerDocument.defaultView;return c&&c.opener||(c=a),c.getComputedStyle(b)},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c.getPropertyValue(b)||c[b]:void 0,""!==g&&void 0!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),c&&!l.pixelMarginRight()&&Oa.test(g)&&Na.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f),void 0===g?g:g+""}):Qa.currentStyle&&(Ra=function(a){return a.currentStyle},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Oa.test(g)&&!Ta.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Ua(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Va=/alpha\([^)]*\)/i,Wa=/opacity\s*=\s*([^)]*)/i,Xa=/^(none|table(?!-c[ea]).+)/,Ya=new RegExp("^("+T+")(.*)$","i"),Za={position:"absolute",visibility:"hidden",display:"block"},$a={letterSpacing:"0",fontWeight:"400"},_a=["Webkit","O","Moz","ms"],ab=d.createElement("div").style;function bb(a){if(a in ab)return a;var b=a.charAt(0).toUpperCase()+a.slice(1),c=_a.length;while(c--)if(a=_a[c]+b,a in ab)return a}function cb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=n._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&W(d)&&(f[g]=n._data(d,"olddisplay",Ma(d.nodeName)))):(e=W(d),(c&&"none"!==c||!e)&&n._data(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function db(a,b,c){var d=Ya.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function eb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+V[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+V[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+V[f]+"Width",!0,e))):(g+=n.css(a,"padding"+V[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+V[f]+"Width",!0,e)));return g}function fb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ra(a),g=l.boxSizing&&"border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Sa(a,b,f),(0>e||null==e)&&(e=a.style[b]),Oa.test(e))return e;d=g&&(l.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+eb(a,b,c||(g?"border":"content"),d,f)+"px"}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Sa(a,"opacity");return""===c?"1":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":l.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;if(b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=U.exec(c))&&e[1]&&(c=X(a,b,e),f="number"),null!=c&&c===c&&("number"===f&&(c+=e&&e[3]||(n.cssNumber[h]?"":"px")),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Sa(a,b,d)),"normal"===f&&b in $a&&(f=$a[b]),""===c||c?(e=parseFloat(f),c===!0||isFinite(e)?e||0:f):f}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?Xa.test(n.css(a,"display"))&&0===a.offsetWidth?Pa(a,Za,function(){return fb(a,b,d)}):fb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ra(a);return db(a,c,d?eb(a,b,d,l.boxSizing&&"border-box"===n.css(a,"boxSizing",!1,e),e):0)}}}),l.opacity||(n.cssHooks.opacity={get:function(a,b){return Wa.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=n.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===n.trim(f.replace(Va,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Va.test(f)?f.replace(Va,e):f+" "+e)}}),n.cssHooks.marginRight=Ua(l.reliableMarginRight,function(a,b){return b?Pa(a,{display:"inline-block"},Sa,[a,"marginRight"]):void 0}),n.cssHooks.marginLeft=Ua(l.reliableMarginLeft,function(a,b){return b?(parseFloat(Sa(a,"marginLeft"))||(n.contains(a.ownerDocument,a)?a.getBoundingClientRect().left-Pa(a,{
marginLeft:0},function(){return a.getBoundingClientRect().left}):0))+"px":void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+V[d]+b]=f[d]||f[d-2]||f[0];return e}},Na.test(a)||(n.cssHooks[a+b].set=db)}),n.fn.extend({css:function(a,b){return Y(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=Ra(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return cb(this,!0)},hide:function(){return cb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){W(this)?n(this).show():n(this).hide()})}});function gb(a,b,c,d,e){return new gb.prototype.init(a,b,c,d,e)}n.Tween=gb,gb.prototype={constructor:gb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||n.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=gb.propHooks[this.prop];return a&&a.get?a.get(this):gb.propHooks._default.get(this)},run:function(a){var b,c=gb.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):gb.propHooks._default.set(this),this}},gb.prototype.init.prototype=gb.prototype,gb.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[n.cssProps[a.prop]]&&!n.cssHooks[a.prop]?a.elem[a.prop]=a.now:n.style(a.elem,a.prop,a.now+a.unit)}}},gb.propHooks.scrollTop=gb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},n.fx=gb.prototype.init,n.fx.step={};var hb,ib,jb=/^(?:toggle|show|hide)$/,kb=/queueHooks$/;function lb(){return a.setTimeout(function(){hb=void 0}),hb=n.now()}function mb(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=V[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function nb(a,b,c){for(var d,e=(qb.tweeners[b]||[]).concat(qb.tweeners["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ob(a,b,c){var d,e,f,g,h,i,j,k,m=this,o={},p=a.style,q=a.nodeType&&W(a),r=n._data(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,m.always(function(){m.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=n.css(a,"display"),k="none"===j?n._data(a,"olddisplay")||Ma(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(l.inlineBlockNeedsLayout&&"inline"!==Ma(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",l.shrinkWrapBlocks()||m.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],jb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(o))"inline"===("none"===j?Ma(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=n._data(a,"fxshow",{}),f&&(r.hidden=!q),q?n(a).show():m.done(function(){n(a).hide()}),m.done(function(){var b;n._removeData(a,"fxshow");for(b in o)n.style(a,b,o[b])});for(d in o)g=nb(q?r[d]:0,d,m),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function pb(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function qb(a,b,c){var d,e,f=0,g=qb.prefilters.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=hb||lb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{},easing:n.easing._default},c),originalProperties:b,originalOptions:c,startTime:hb||lb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.rejectWith(a,[j,b]),this}}),k=j.props;for(pb(k,j.opts.specialEasing);g>f;f++)if(d=qb.prefilters[f].call(j,a,k,j.opts))return n.isFunction(d.stop)&&(n._queueHooks(j.elem,j.opts.queue).stop=n.proxy(d.stop,d)),d;return n.map(k,nb,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(qb,{tweeners:{"*":[function(a,b){var c=this.createTween(a,b);return X(c.elem,a,U.exec(b),c),c}]},tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.match(G);for(var c,d=0,e=a.length;e>d;d++)c=a[d],qb.tweeners[c]=qb.tweeners[c]||[],qb.tweeners[c].unshift(b)},prefilters:[ob],prefilter:function(a,b){b?qb.prefilters.unshift(a):qb.prefilters.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,null!=d.queue&&d.queue!==!0||(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(W).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=qb(this,n.extend({},a),f);(e||n._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=n._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&kb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));!b&&c||n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=n._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(mb(b,!0),a,d,e)}}),n.each({slideDown:mb("show"),slideUp:mb("hide"),slideToggle:mb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=n.timers,c=0;for(hb=n.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||n.fx.stop(),hb=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){ib||(ib=a.setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){a.clearInterval(ib),ib=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(b,c){return b=n.fx?n.fx.speeds[b]||b:b,c=c||"fx",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a,b=d.createElement("input"),c=d.createElement("div"),e=d.createElement("select"),f=e.appendChild(d.createElement("option"));c=d.createElement("div"),c.setAttribute("className","t"),c.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",a=c.getElementsByTagName("a")[0],b.setAttribute("type","checkbox"),c.appendChild(b),a=c.getElementsByTagName("a")[0],a.style.cssText="top:1px",l.getSetAttribute="t"!==c.className,l.style=/top/.test(a.getAttribute("style")),l.hrefNormalized="/a"===a.getAttribute("href"),l.checkOn=!!b.value,l.optSelected=f.selected,l.enctype=!!d.createElement("form").enctype,e.disabled=!0,l.optDisabled=!f.disabled,b=d.createElement("input"),b.setAttribute("value",""),l.input=""===b.getAttribute("value"),b.value="t",b.setAttribute("type","radio"),l.radioValue="t"===b.value}();var rb=/\r/g,sb=/[\x20\t\r\n\f]+/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(rb,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a)).replace(sb," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(l.optDisabled?!c.disabled:null===c.getAttribute("disabled"))&&(!c.parentNode.disabled||!n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)if(d=e[g],n.inArray(n.valHooks.option.get(d),f)>-1)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>-1:void 0}},l.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var tb,ub,vb=n.expr.attrHandle,wb=/^(?:checked|selected)$/i,xb=l.getSetAttribute,yb=l.input;n.fn.extend({attr:function(a,b){return Y(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),e=n.attrHooks[b]||(n.expr.match.bool.test(b)?ub:tb)),void 0!==c?null===c?void n.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=n.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(G);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)?yb&&xb||!wb.test(c)?a[d]=!1:a[n.camelCase("default-"+c)]=a[d]=!1:n.attr(a,c,""),a.removeAttribute(xb?c:d)}}),ub={set:function(a,b,c){return b===!1?n.removeAttr(a,c):yb&&xb||!wb.test(c)?a.setAttribute(!xb&&n.propFix[c]||c,c):a[n.camelCase("default-"+c)]=a[c]=!0,c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=vb[b]||n.find.attr;yb&&xb||!wb.test(b)?vb[b]=function(a,b,d){var e,f;return d||(f=vb[b],vb[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,vb[b]=f),e}:vb[b]=function(a,b,c){return c?void 0:a[n.camelCase("default-"+b)]?b.toLowerCase():null}}),yb&&xb||(n.attrHooks.value={set:function(a,b,c){return n.nodeName(a,"input")?void(a.defaultValue=b):tb&&tb.set(a,b,c)}}),xb||(tb={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},vb.id=vb.name=vb.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},n.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:tb.set},n.attrHooks.contenteditable={set:function(a,b,c){tb.set(a,""===b?!1:b,c)}},n.each(["width","height"],function(a,b){n.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),l.style||(n.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var zb=/^(?:input|select|textarea|button|object)$/i,Ab=/^(?:a|area)$/i;n.fn.extend({prop:function(a,b){return Y(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return a=n.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),n.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&n.isXMLDoc(a)||(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=n.find.attr(a,"tabindex");return b?parseInt(b,10):zb.test(a.nodeName)||Ab.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),l.hrefNormalized||n.each(["href","src"],function(a,b){n.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),l.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this}),l.enctype||(n.propFix.enctype="encoding");var Bb=/[\t\r\n\f]/g;function Cb(a){return n.attr(a,"class")||""}n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,Cb(this)))});if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Cb(c),d=1===c.nodeType&&(" "+e+" ").replace(Bb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=n.trim(d),e!==h&&n.attr(c,"class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,Cb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Cb(c),d=1===c.nodeType&&(" "+e+" ").replace(Bb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=n.trim(d),e!==h&&n.attr(c,"class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):n.isFunction(a)?this.each(function(c){n(this).toggleClass(a.call(this,c,Cb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=n(this),f=a.match(G)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=Cb(this),b&&n._data(this,"__className__",b),n.attr(this,"class",b||a===!1?"":n._data(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+Cb(c)+" ").replace(Bb," ").indexOf(b)>-1)return!0;return!1}}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Db=a.location,Eb=n.now(),Fb=/\?/,Gb=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;n.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=n.trim(b+"");return e&&!n.trim(e.replace(Gb,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():n.error("Invalid JSON: "+b)},n.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new a.DOMParser,c=d.parseFromString(b,"text/xml")):(c=new a.ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||n.error("Invalid XML: "+b),c};var Hb=/#.*$/,Ib=/([?&])_=[^&]*/,Jb=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Kb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Lb=/^(?:GET|HEAD)$/,Mb=/^\/\//,Nb=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Ob={},Pb={},Qb="*/".concat("*"),Rb=Db.href,Sb=Nb.exec(Rb.toLowerCase())||[];function Tb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(G)||[];if(n.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Ub(a,b,c,d){var e={},f=a===Pb;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Vb(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&n.extend(!0,a,c),a}function Wb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Xb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Rb,type:"GET",isLocal:Kb.test(Sb[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Qb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Vb(Vb(a,n.ajaxSettings),b):Vb(n.ajaxSettings,a)},ajaxPrefilter:Tb(Ob),ajaxTransport:Tb(Pb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var d,e,f,g,h,i,j,k,l=n.ajaxSetup({},c),m=l.context||l,o=l.context&&(m.nodeType||m.jquery)?n(m):n.event,p=n.Deferred(),q=n.Callbacks("once memory"),r=l.statusCode||{},s={},t={},u=0,v="canceled",w={readyState:0,getResponseHeader:function(a){var b;if(2===u){if(!k){k={};while(b=Jb.exec(g))k[b[1].toLowerCase()]=b[2]}b=k[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===u?g:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return u||(a=t[c]=t[c]||a,s[a]=b),this},overrideMimeType:function(a){return u||(l.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>u)for(b in a)r[b]=[r[b],a[b]];else w.always(a[w.status]);return this},abort:function(a){var b=a||v;return j&&j.abort(b),y(0,b),this}};if(p.promise(w).complete=q.add,w.success=w.done,w.error=w.fail,l.url=((b||l.url||Rb)+"").replace(Hb,"").replace(Mb,Sb[1]+"//"),l.type=c.method||c.type||l.method||l.type,l.dataTypes=n.trim(l.dataType||"*").toLowerCase().match(G)||[""],null==l.crossDomain&&(d=Nb.exec(l.url.toLowerCase()),l.crossDomain=!(!d||d[1]===Sb[1]&&d[2]===Sb[2]&&(d[3]||("http:"===d[1]?"80":"443"))===(Sb[3]||("http:"===Sb[1]?"80":"443")))),l.data&&l.processData&&"string"!=typeof l.data&&(l.data=n.param(l.data,l.traditional)),Ub(Ob,l,c,w),2===u)return w;i=n.event&&l.global,i&&0===n.active++&&n.event.trigger("ajaxStart"),l.type=l.type.toUpperCase(),l.hasContent=!Lb.test(l.type),f=l.url,l.hasContent||(l.data&&(f=l.url+=(Fb.test(f)?"&":"?")+l.data,delete l.data),l.cache===!1&&(l.url=Ib.test(f)?f.replace(Ib,"$1_="+Eb++):f+(Fb.test(f)?"&":"?")+"_="+Eb++)),l.ifModified&&(n.lastModified[f]&&w.setRequestHeader("If-Modified-Since",n.lastModified[f]),n.etag[f]&&w.setRequestHeader("If-None-Match",n.etag[f])),(l.data&&l.hasContent&&l.contentType!==!1||c.contentType)&&w.setRequestHeader("Content-Type",l.contentType),w.setRequestHeader("Accept",l.dataTypes[0]&&l.accepts[l.dataTypes[0]]?l.accepts[l.dataTypes[0]]+("*"!==l.dataTypes[0]?", "+Qb+"; q=0.01":""):l.accepts["*"]);for(e in l.headers)w.setRequestHeader(e,l.headers[e]);if(l.beforeSend&&(l.beforeSend.call(m,w,l)===!1||2===u))return w.abort();v="abort";for(e in{success:1,error:1,complete:1})w[e](l[e]);if(j=Ub(Pb,l,c,w)){if(w.readyState=1,i&&o.trigger("ajaxSend",[w,l]),2===u)return w;l.async&&l.timeout>0&&(h=a.setTimeout(function(){w.abort("timeout")},l.timeout));try{u=1,j.send(s,y)}catch(x){if(!(2>u))throw x;y(-1,x)}}else y(-1,"No Transport");function y(b,c,d,e){var k,s,t,v,x,y=c;2!==u&&(u=2,h&&a.clearTimeout(h),j=void 0,g=e||"",w.readyState=b>0?4:0,k=b>=200&&300>b||304===b,d&&(v=Wb(l,w,d)),v=Xb(l,v,w,k),k?(l.ifModified&&(x=w.getResponseHeader("Last-Modified"),x&&(n.lastModified[f]=x),x=w.getResponseHeader("etag"),x&&(n.etag[f]=x)),204===b||"HEAD"===l.type?y="nocontent":304===b?y="notmodified":(y=v.state,s=v.data,t=v.error,k=!t)):(t=y,!b&&y||(y="error",0>b&&(b=0))),w.status=b,w.statusText=(c||y)+"",k?p.resolveWith(m,[s,y,w]):p.rejectWith(m,[w,y,t]),w.statusCode(r),r=void 0,i&&o.trigger(k?"ajaxSuccess":"ajaxError",[w,l,k?s:t]),q.fireWith(m,[w,y]),i&&(o.trigger("ajaxComplete",[w,l]),--n.active||n.event.trigger("ajaxStop")))}return w},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax(n.extend({url:a,type:b,dataType:e,data:c,success:d},n.isPlainObject(a)&&a))}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){if(n.isFunction(a))return this.each(function(b){n(this).wrapAll(a.call(this,b))});if(this[0]){var b=n(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return n.isFunction(a)?this.each(function(b){n(this).wrapInner(a.call(this,b))}):this.each(function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}});function Yb(a){return a.style&&a.style.display||n.css(a,"display")}function Zb(a){if(!n.contains(a.ownerDocument||d,a))return!0;while(a&&1===a.nodeType){if("none"===Yb(a)||"hidden"===a.type)return!0;a=a.parentNode}return!1}n.expr.filters.hidden=function(a){return l.reliableHiddenOffsets()?a.offsetWidth<=0&&a.offsetHeight<=0&&!a.getClientRects().length:Zb(a)},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var $b=/%20/g,_b=/\[\]$/,ac=/\r?\n/g,bc=/^(?:submit|button|image|reset|file)$/i,cc=/^(?:input|select|textarea|keygen)/i;function dc(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||_b.test(a)?d(a,e):dc(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)dc(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)dc(c,a[c],b,e);return d.join("&").replace($b,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&cc.test(this.nodeName)&&!bc.test(a)&&(this.checked||!Z.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(ac,"\r\n")}}):{name:b.name,value:c.replace(ac,"\r\n")}}).get()}}),n.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return this.isLocal?ic():d.documentMode>8?hc():/^(get|post|head|put|delete|options)$/i.test(this.type)&&hc()||ic()}:hc;var ec=0,fc={},gc=n.ajaxSettings.xhr();a.attachEvent&&a.attachEvent("onunload",function(){for(var a in fc)fc[a](void 0,!0)}),l.cors=!!gc&&"withCredentials"in gc,gc=l.ajax=!!gc,gc&&n.ajaxTransport(function(b){if(!b.crossDomain||l.cors){var c;return{send:function(d,e){var f,g=b.xhr(),h=++ec;if(g.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(f in b.xhrFields)g[f]=b.xhrFields[f];b.mimeType&&g.overrideMimeType&&g.overrideMimeType(b.mimeType),b.crossDomain||d["X-Requested-With"]||(d["X-Requested-With"]="XMLHttpRequest");for(f in d)void 0!==d[f]&&g.setRequestHeader(f,d[f]+"");g.send(b.hasContent&&b.data||null),c=function(a,d){var f,i,j;if(c&&(d||4===g.readyState))if(delete fc[h],c=void 0,g.onreadystatechange=n.noop,d)4!==g.readyState&&g.abort();else{j={},f=g.status,"string"==typeof g.responseText&&(j.text=g.responseText);try{i=g.statusText}catch(k){i=""}f||!b.isLocal||b.crossDomain?1223===f&&(f=204):f=j.text?200:404}j&&e(f,i,j,g.getAllResponseHeaders())},b.async?4===g.readyState?a.setTimeout(c):g.onreadystatechange=fc[h]=c:c()},abort:function(){c&&c(void 0,!0)}}}});function hc(){try{return new a.XMLHttpRequest}catch(b){}}function ic(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=d.head||n("head")[0]||d.documentElement;return{send:function(e,f){b=d.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||f(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var jc=[],kc=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=jc.pop()||n.expando+"_"+Eb++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(kc.test(b.url)?"url":"string"==typeof b.data&&0===(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&kc.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(kc,"$1"+e):b.jsonp!==!1&&(b.url+=(Fb.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){void 0===f?n(a).removeProp(e):a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,jc.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||d;var e=x.exec(a),f=!c&&[];return e?[b.createElement(e[1])]:(e=ja([a],b,f),f&&f.length&&n(f).remove(),n.merge([],e.childNodes))};var lc=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&lc)return lc.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>-1&&(d=n.trim(a.slice(h,a.length)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e||"GET",dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).always(c&&function(a,b){g.each(function(){c.apply(this,f||[a.responseText,b,a])})}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};function mc(a){return n.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&n.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,n.extend({},h))),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,n.contains(b,e)?("undefined"!=typeof e.getBoundingClientRect&&(d=e.getBoundingClientRect()),c=mc(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===n.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(c=a.offset()),c.top+=n.css(a[0],"borderTopWidth",!0),c.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-n.css(d,"marginTop",!0),left:b.left-c.left-n.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent;while(a&&!n.nodeName(a,"html")&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Qa})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);n.fn[a]=function(d){return Y(this,function(a,d,e){var f=mc(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?n(f).scrollLeft():e,c?e:n(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=Ua(l.pixelPosition,function(a,c){return c?(c=Sa(a,b),Oa.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({
padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return Y(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.extend({bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}}),n.fn.size=function(){return this.length},n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var nc=a.jQuery,oc=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=oc),b&&a.jQuery===n&&(a.jQuery=nc),n},b||(a.jQuery=a.$=n),n}); | PypiClean |
/Helmholtz-0.2.0.tar.gz/Helmholtz-0.2.0/helmholtz/experiment/templatetags/experiments.py | from django import template
from django.db.models import Count
from helmholtz.core.schema import get_subclasses_recursively
from helmholtz.species.models import Species
from helmholtz.preparations.models import Preparation, Animal
from helmholtz.experiment.models import Experiment
from helmholtz.recording.models import ProtocolRecording
from helmholtz.stimulation.models import StimulationType
from helmholtz.equipment.models import DeviceConfiguration
register = template.Library()
@register.inclusion_tag('experiments_history.html', takes_context=True)
def experiment_list(context, object_list, lab):
_context = dict()
_context.update({
'object_list':context['object_list'],
'lab':lab,
'MEDIA_URL':context['MEDIA_URL'],
'is_paginated':context['is_paginated'],
'page_range':context['page_range'],
'page':context['page']
})
return _context
# having_male_animals = qset.filter(preparation__animal__sex='M').annotate(n_animals=Count("preparation__animal", distinct=True))
@register.inclusion_tag('experiments_statistics.html', takes_context=True)
def experiments_statistics(context):
_context = dict()
_context.update({
'MEDIA_URL':context['MEDIA_URL'],
})
# count stimulation types
stim_types = StimulationType.objects.filter(protocolrecording__isnull=False).annotate(
n_experiments=Count("protocolrecording__block__experiment", distinct=True),
# n_blocks=Count("protocolrecording__block", distinct=True),
# n_protocols=Count("protocolrecording", distinct=True)
).values('name', 'n_experiments')
_context['n_stim_types'] = len(stim_types)
_context['stim_types'] = stim_types
qset = Experiment.objects.all()
_context['n_experiments'] = len(qset)
experiments = qset.aggregate(
n_experiments=Count("id", distinct=True),
n_blocks=Count("recordingblock", distinct=True),
n_protocols=Count("recordingblock__protocolrecording", distinct=True),
n_researchers=Count("researchers", distinct=True),
n_files=Count("recordingblock__protocolrecording__file", distinct=True),
n_signals=Count("recordingblock__protocolrecording__file__signal", distinct=True),
n_preparations=Count("preparation", distinct=True),
n_animals=Count("preparation__animal", distinct=True),
n_invivo=Count("preparation__invivopreparation__id", distinct=True),
n_invitro_slices=Count("preparation__invitroslice__id", distinct=True),
n_invitro_cultures=Count("preparation__invitroculture__id", distinct=True),
)
_context.update(experiments)
# count male animals
# n_male_animals = qset.filter(preparation__animal__sex='M').count()
# _context['n_male_animals'] = n_male_animals
# count female animals
# n_female_animals = qset.filter(preparation__animal__sex='F').count()
# _context['n_female_animals'] = n_female_animals
# count species
species = Species.objects.filter(strain__animal__isnull=False).annotate(
n_animals=Count("strain__animal")
).values('english_name', 'url', 'n_animals')
_context['species'] = species
# count preparation types
prep_types = dict()
subclasses = Preparation.__subclasses__()
for subclass in subclasses :
count = subclass.objects.count()
if count :
prep_types[subclass._meta.verbose_name] = count
_context['prep_types'] = prep_types
# define acquisition method, i.e. sharp or patch
methods = dict()
subclasses = get_subclasses_recursively(DeviceConfiguration, strict=True)
for subclass in subclasses :
if hasattr(subclass, 'method'):
count = subclass.objects.filter(recordingconfiguration__block__experiment__id__isnull=False).count()
if count :
methods[subclass.method] = count
_context['methods'] = methods
_context['n_methods'] = len(methods)
return _context
@register.inclusion_tag('experiment_statistics.html', takes_context=True)
def experiment_statistics(context, experiment):
_context = dict()
# count stimulation types
stim_types = StimulationType.objects.filter(protocolrecording__block__experiment=experiment).annotate(
n_experiments=Count("protocolrecording__block__experiment", distinct=True),
).values('name', 'n_experiments')
_context['n_stim_types'] = len(stim_types)
_context['stim_types'] = stim_types
# count methods
methods = dict()
subclasses = get_subclasses_recursively(DeviceConfiguration, strict=True)
for subclass in subclasses :
if hasattr(subclass, 'method'):
count = subclass.objects.filter(recordingconfiguration__block__experiment=experiment).count()
if count :
methods[subclass.method] = count
_context.update({
'MEDIA_URL':context['MEDIA_URL'],
'experiment':experiment,
'n_blocks':experiment.recordingblock_set.count(),
'n_protocols':experiment.protocols.count(),
'methods':methods,
'n_methods':len(methods)
})
return _context
@register.inclusion_tag('experiment_perfusions.html', takes_context=True)
def experiment_perfusions(context, perfusions):
_context = dict()
_context.update({
'MEDIA_URL':context['MEDIA_URL'],
'perfusions':perfusions
})
return _context
@register.inclusion_tag('experiment_injections.html', takes_context=True)
def experiment_injections(context, injections):
_context = dict()
_context.update({
'MEDIA_URL':context['MEDIA_URL'],
'injections':injections
})
return _context | PypiClean |
/Mopidy-Touchscreen-1.0.0.tar.gz/Mopidy-Touchscreen-1.0.0/mopidy_touchscreen/screen_manager.py | import logging
import traceback
from graphic_utils import DynamicBackground, \
ScreenObjectsManager, TouchAndTextItem
from input import InputManager
from pkg_resources import Requirement, resource_filename
import pygame
from screens import BaseScreen, Keyboard, LibraryScreen, MainScreen, MenuScreen,\
PlaylistScreen, SearchScreen, Tracklist
logger = logging.getLogger(__name__)
search_index = 0
main_screen_index = 1
tracklist_index = 2
library_index = 3
playlist_index = 4
menu_index = 5
class ScreenManager():
def __init__(self, size, core, cache, resolution_factor):
self.core = core
self.cache = cache
self.fonts = {}
self.background = None
self.current_screen = library_index
# Init variables in init
self.base_size = None
self.size = None
self.screens = None
self.track = None
self.input_manager = InputManager(size)
self.down_bar_objects = ScreenObjectsManager()
self.down_bar = None
self.keyboard = None
self.update_type = BaseScreen.update_all
self.resolution_factor = resolution_factor
self.init_manager(size)
def init_manager(self, size):
self.size = size
self.background = DynamicBackground(self.size)
self.base_size = self.size[1] / self.resolution_factor
font = resource_filename(
Requirement.parse("mopidy-touchscreen"),
"mopidy_touchscreen/icomoon.ttf")
self.fonts['base'] = pygame.font.SysFont("arial",
int(self.base_size*0.9))
self.fonts['icon'] = pygame.font.Font(font, int(self.base_size*0.9))
try:
self.screens = [
SearchScreen(size, self.base_size, self, self.fonts),
MainScreen(size, self.base_size, self, self.fonts,
self.cache, self.core, self.background),
Tracklist(size, self.base_size, self, self.fonts),
LibraryScreen(size, self.base_size, self, self.fonts),
PlaylistScreen(size,
self.base_size, self, self.fonts),
MenuScreen(size, self.base_size, self, self.fonts, self.core)]
except:
traceback.print_exc()
self.track = None
# Menu buttons
button_size = (self.size[0] / 6, self.base_size)
# Search button
button = TouchAndTextItem(self.fonts['icon'], u" \ue986",
(0, self.size[1] - self.base_size),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_0", button)
x = button.get_right_pos()
# Main button
button = TouchAndTextItem(self.fonts['icon'], u" \ue600",
(x, self.size[1] - self.base_size),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_1", button)
x = button.get_right_pos()
# Tracklist button
button = TouchAndTextItem(self.fonts['icon'], u" \ue60d",
(x, self.size[1] - self.base_size),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_2", button)
x = button.get_right_pos()
# Library button
button = TouchAndTextItem(self.fonts['icon'], u" \ue604",
(x, self.size[1] - self.base_size),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_3", button)
x = button.get_right_pos()
# Playlist button
button = TouchAndTextItem(self.fonts['icon'], u" \ue605",
(x, self.size[1] - self.base_size),
button_size, center=True)
self.down_bar_objects.set_touch_object("menu_4", button)
x = button.get_right_pos()
# Menu button
button = TouchAndTextItem(self.fonts['icon'], u" \ue60a",
(x, self.size[1] - self.base_size),
button_size,
center=True)
self.down_bar_objects.set_touch_object("menu_5", button)
# Down bar
self.down_bar = pygame.Surface(
(self.size[0], self.size[1] - self.base_size),
pygame.SRCALPHA)
self.down_bar.fill((0, 0, 0, 200))
self.options_changed()
self.mute_changed(self.core.playback.mute.get())
playback_state = self.core.playback.state.get()
self.playback_state_changed(playback_state,
playback_state)
self.screens[menu_index].check_connection()
self.change_screen(self.current_screen)
self.update_type = BaseScreen.update_all
def get_update_type(self):
if self.update_type == BaseScreen.update_all:
self.update_type = BaseScreen.no_update
return BaseScreen.update_all
else:
if self.keyboard:
return BaseScreen.no_update
else:
if self.background.should_update():
return BaseScreen.update_all
else:
if self.screens[self.current_screen].should_update():
return BaseScreen.update_partial
else:
return BaseScreen.no_update
def update(self, screen):
update_type = self.get_update_type()
if update_type != BaseScreen.no_update:
rects = []
surface = self.background.draw_background()
if self.keyboard:
self.keyboard.update(surface)
else:
self.screens[self.current_screen].\
update(surface, update_type, rects)
surface.blit(self.down_bar, (0, self.size[1] - self.base_size))
self.down_bar_objects.render(surface)
if update_type == BaseScreen.update_all or len(rects) < 1:
screen.blit(surface, (0, 0))
pygame.display.flip()
else:
for rect in rects:
screen.blit(surface, rect, area=rect)
pygame.display.update(rects)
def track_started(self, track):
self.track = track
self.screens[main_screen_index].track_started(track.track)
self.screens[tracklist_index].track_started(track)
def track_playback_ended(self, tl_track, time_position):
self.screens[main_screen_index].track_playback_ended(
tl_track, time_position)
def event(self, event):
event = self.input_manager.event(event)
if event is not None:
if self.keyboard is not None:
self.keyboard.touch_event(event)
elif not self.manage_event(event):
self.screens[self.current_screen].touch_event(event)
self.update_type = BaseScreen.update_all
def manage_event(self, event):
if event.type == InputManager.click:
objects = \
self.down_bar_objects.get_touch_objects_in_pos(
event.current_pos)
return self.click_on_objects(objects, event)
else:
if event.type == InputManager.key and not event.longpress:
dir = event.direction
if dir == InputManager.right or dir == InputManager.left:
if not self.screens[self.current_screen]\
.change_screen(dir):
if dir == InputManager.right:
self.change_screen(self.current_screen+1)
else:
self.change_screen(self.current_screen-1)
return True
return False
def volume_changed(self, volume):
self.screens[main_screen_index].volume_changed(volume)
self.update_type = BaseScreen.update_all
def playback_state_changed(self, old_state, new_state):
self.screens[main_screen_index].playback_state_changed(
old_state, new_state)
self.update_type = BaseScreen.update_all
def mute_changed(self, mute):
self.screens[main_screen_index].mute_changed(mute)
self.update_type = BaseScreen.update_all
def tracklist_changed(self):
self.screens[tracklist_index].tracklist_changed()
self.update_type = BaseScreen.update_all
def options_changed(self):
self.screens[menu_index].options_changed()
self.update_type = BaseScreen.update_all
def change_screen(self, new_screen):
if new_screen > -1 and new_screen < len(self.screens):
self.down_bar_objects.get_touch_object(
"menu_" + str(self.current_screen)).set_active(False)
self.current_screen = new_screen
self.down_bar_objects.get_touch_object(
"menu_" + str(new_screen)).set_active(True)
self.update_type = BaseScreen.update_all
def click_on_objects(self, objects, event):
if objects is not None:
for key in objects:
if key[:-1] == "menu_":
self.change_screen(int(key[-1:]))
return True
return False
def playlists_loaded(self):
self.screens[playlist_index].playlists_loaded()
self.update_type = BaseScreen.update_all
def search(self, query, mode):
self.screens[search_index].search(query, mode)
self.update_type = BaseScreen.update_all
def resize(self, event):
self.init_manager(event.size)
self.update_type = BaseScreen.update_all
def open_keyboard(self, input_listener):
self.keyboard = Keyboard(self.size, self.base_size, self,
self.fonts, input_listener)
self.update_type = BaseScreen.update_all
def close_keyboard(self):
self.keyboard = None
self.update_type = BaseScreen.update_all | PypiClean |
/AoikRegistryEditor-0.1.0-py3-none-any.whl/aoikregistryeditor/mediator.py | from __future__ import absolute_import
from argparse import ArgumentParser
import sys
from tkinter import Tk
from traceback import format_exc
from .aoikimportutil import load_obj
from .registry_editor import RegistryEditor
from .tkinterutil.label import LabelVidget
#
def get_cmdargs_parser():
"""
Create command arguments parser.
@return: Command arguments parser.
"""
# Create command arguments parser
parser = ArgumentParser()
# Specify arguments
#
menu_conf_uri_default = 'aoikregistryeditor.menu_config::MENU_CONFIG'
parser.add_argument(
'-m', '--menu-conf',
dest='menu_config_uri',
default=menu_conf_uri_default,
metavar='MENU_CONF',
help='Menu config object URI. Default is `{}`.'.format(
menu_conf_uri_default
),
)
#
parser.add_argument(
'--menu-conf-default',
dest='print_menu_conf_default',
action='store_true',
help='Print default menu config module.',
)
#
ui_config_func_uri_default = 'aoikregistryeditor.ui_config::configure_ui'
parser.add_argument(
'-u', '--ui-conf',
dest='ui_config_func_uri',
default=ui_config_func_uri_default,
metavar='UI_CONF',
help='UI config function URI. Default is `{}`.'.format(
ui_config_func_uri_default
),
)
#
parser.add_argument(
'--ui-conf-default',
dest='print_ui_conf_default',
action='store_true',
help='Print default UI config module.',
)
#
field_editor_factory_uri_default = \
'aoikregistryeditor.field_editor_config::field_editor_factory'
parser.add_argument(
'-f', '--field-editor',
dest='field_editor_factory_uri',
default=field_editor_factory_uri_default,
metavar='FACTORY',
help='Field editor factory URI. Default is `{}`.'.format(
field_editor_factory_uri_default
),
)
#
parser.add_argument(
'--field-editor-default',
dest='print_field_editor_config_default',
action='store_true',
help='Print default field editor factory config module.',
)
# Return the command arguments parser
return parser
#
def main_core(args=None, step_func=None):
"""
The main function that implements the core functionality.
@param args: Command arguments list.
@param step_func: A function to set step information for the upper context.
@return: Exit code.
"""
# If step function is not given
if step_func is None:
# Raise error
raise ValueError('Argument `step_func` is not given')
# If step function is given.
# Set step info
step_func(title='Parse command arguments')
# Create command arguments parser
args_parser = get_cmdargs_parser()
# If arguments are not given
if args is None:
# Use command arguments
args = sys.argv[1:]
# Parse command arguments
args = args_parser.parse_args(args)
# If print default menu config module
if args.print_menu_conf_default:
# Set step info
step_func(title='Print default menu config module')
# Import default menu config module
from . import menu_config as config_module
# Print default menu config module's content
sys.stdout.write(open(config_module.__file__).read())
# Exit
return
# If not print default menu config module.
# If print default UI config module
if args.print_ui_conf_default:
# Set step info
step_func(title='Print default UI config module')
# Import default UI config module
from . import ui_config as config_module
# Print default UI config module's content
sys.stdout.write(open(config_module.__file__).read())
# Exit
return
# If not print default UI config module.
# If print default field editor factory config module
if args.print_field_editor_config_default:
# Set step info
step_func(title='Print default field editor factory config module')
# Import default field editor config module
from . import field_editor_config as config_module
# Print default field editor config module's content
sys.stdout.write(open(config_module.__file__).read())
# Exit
return
# If not print default field editor factory config module.
# Set step info
step_func(title='Create TK root')
# Create TK root
tk = Tk()
# Add window title
tk.title('AoikRegistryEditor')
# Set step info
step_func(title='Create status bar label')
# Create status bar label
status_bar_label = LabelVidget(master=tk)
# Create status bar set function
def status_bar_set(text):
status_bar_label.config(text=text)
# Set step info
step_func(title='Load field editor factory')
# Get field editor factory function URI
field_editor_factory_uri = args.field_editor_factory_uri
# Load field editor factory function
field_editor_config_module, field_editor_factory = load_obj(
field_editor_factory_uri,
mod_name='aoikregistryeditor._field_editor_config',
retn_mod=True,
)
# Set step info
step_func(title='Create registry editor')
# Create registry editor
editor = RegistryEditor(
field_editor_factory=field_editor_factory,
status_bar_set=status_bar_set,
master=tk,
)
# Set step info
step_func(title='Load menu config')
# Get menu config URI
menu_config_uri = args.menu_config_uri
# Load menu config
menu_config_module, menu_config = load_obj(
menu_config_uri,
mod_name='aoikregistryeditor._menu_config',
retn_mod=True,
)
# Set step info
step_func(title='Create menu tree')
# Create menu tree
menutree = editor.menutree_create(specs=menu_config)
# Set step info
step_func(title='Add menu tree to root window')
# Add the menu tree's top menu to root window
tk.config(menu=menutree.menu_top())
# Set step info
step_func(title='Get UI config info dict')
# Get UI config info dict
ui_info = dict(
tk=tk,
menutree=menutree,
status_bar_label=status_bar_label,
editor=editor,
path_bar_label=editor._path_bar_label,
path_bar=editor._path_bar,
child_keys_labelframe=editor._child_keys_labelframe,
child_keys_listbox=editor._child_keys_listbox,
fields_labelframe=editor._fields_labelframe,
fields_listbox=editor._fields_listbox,
field_editor_labelframe=editor._field_editor_labelframe,
field_add_label=editor._field_add_label,
field_del_label=editor._field_del_label,
field_load_label=editor._field_load_label,
field_save_label=editor._field_save_label,
field_add_dialog=editor._field_add_dialog,
)
# Set step info
step_func(title='Load UI config function')
# Get UI config function URI
ui_config_func_uri = args.ui_config_func_uri
# Load UI config function
ui_config_module, ui_config_func = load_obj(
ui_config_func_uri,
mod_name='aoikregistryeditor._ui_config',
retn_mod=True,
)
# Set step info
step_func(title='Call UI config function')
# Call UI config function
ui_config_func(ui_info)
# Set step info
step_func(title='Run TK event loop')
# Run TK event loop
tk.mainloop()
#
def main_wrap(args=None):
"""
The main function that provides exception handling.
Call "main_core" to implement the core functionality.
@param args: Command arguments list.
@return: Exit code.
"""
# A dict that contains step info
step_info = {
'title': '',
'exit_code': 0
}
# A function that updates step info
def step_func(title=None, exit_code=None):
# If title is not None
if title is not None:
# Update title
step_info['title'] = title
# If exit code is not None
if exit_code is not None:
# Update exit code
step_info['exit_code'] = exit_code
#
try:
# Call "main_core" to implement the core functionality
return main_core(args=args, step_func=step_func)
# Catch keyboard interrupt
except KeyboardInterrupt:
# Return without error
return 0
# Catch other exceptions
except Exception:
# Get step title
step_title = step_info.get('title', '')
# Get traceback
tb_msg = format_exc()
# If step title is not empty
if step_title:
# Get message
msg = '# Error: {}\n---\n{}---\n'.format(step_title, tb_msg)
else:
# Get message
msg = '# Error\n---\n{}---\n'.format(tb_msg)
# Output message
sys.stderr.write(msg)
# Get exit code
exit_code = step_info.get('exit_code', 1)
# Return exit code
return exit_code | PypiClean |
/ACME%20oneM2M%20CSE-0.3.0.tar.gz/ACME oneM2M CSE-0.3.0/acme/Importer.py |
import json, os, fnmatch
from Utils import *
from Configuration import Configuration
from Constants import Constants as C
import CSE
from Logging import Logging
from resources import Resource
class Importer(object):
# List of "priority" resources that must be imported first for correct CSE operation
_firstImporters = [ 'csebase.json', 'acp.admin.json', 'acp.default.json' ]
def __init__(self):
Logging.log('Importer initialized')
def importResources(self, path=None):
# Only when the DB is empty else don't imports
if CSE.dispatcher.countResources() > 0:
Logging.log('Resources already imported, skipping importing')
# But we still need the CSI etc of the CSE
rss = CSE.dispatcher.retrieveResourcesByType(C.tCSEBase)
if rss is not None:
Configuration.set('cse.csi', rss[0]['csi'])
Configuration.set('cse.ri', rss[0]['ri'])
Configuration.set('cse.rn', rss[0]['rn'])
return True
Logging.logErr('CSE not found')
return False
# get the originator for the creator attribute of imported resources
originator = Configuration.get('cse.originator')
# Import
if path is None:
if Configuration.has('cse.resourcesPath'):
path = Configuration.get('cse.resourcesPath')
else:
Logging.logErr('cse.resourcesPath not set')
raise RuntimeError('cse.resourcesPath not set')
if not os.path.exists(path):
Logging.logWarn('Import directory does not exist: %s' % path)
return False
Logging.log('Importing resources from directory: %s' % path)
self._prepareImporting()
# first import the priority resources, like CSE, Admin ACP, Default ACP
hasCSE = False
hasACP = False
for rn in self._firstImporters:
fn = path + '/' + rn
if os.path.exists(fn):
Logging.log('Importing resource: %s ' % fn)
with open(fn) as jfile:
r = resourceFromJSON(json.load(jfile), create=True)
# Check resource creation
if not CSE.registration.checkResourceCreation(r, originator):
continue
CSE.dispatcher.createResource(r)
ty = r.ty
if ty == C.tCSEBase:
Configuration.set('cse.csi', r.csi)
Configuration.set('cse.ri', r.ri)
Configuration.set('cse.rn', r.rn)
hasCSE = True
elif ty == C.tACP:
hasACP = True
# Check presence of CSE and at least one ACP
if not (hasCSE and hasACP):
Logging.logErr('CSE and/or default ACP missing during import')
self._finishImporting()
return False
# then get the filenames of all other files and sort them. Process them in order
filenames = sorted(os.listdir(path))
for fn in filenames:
if fn not in self._firstImporters:
Logging.log('Importing resource from file: %s' % fn)
with open(path + '/' + fn) as jfile:
# update an existing resource
if 'update' in fn:
j = json.load(jfile)
keys = list(j.keys())
if len(keys) == 1 and (k := keys[0]) and 'ri' in j[k] and (ri := j[k]['ri']) is not None:
(r, _) = CSE.dispatcher.retrieveResource(ri)
if r is not None:
CSE.dispatcher.updateResource(r, j)
# create a new cresource
else:
r = resourceFromJSON(json.load(jfile), create=True)
# Try to get parent resource
if r is not None:
parent = None
if (pi := r.pi) is not None:
(parent, _) = CSE.dispatcher.retrieveResource(pi)
# Check resource creation
if not CSE.registration.checkResourceCreation(r, originator):
continue
# Add the resource
CSE.dispatcher.createResource(r, parent)
else:
Logging.logWarn('Unknown resource in file: %s' % fn)
self._finishImporting()
return True
def _prepareImporting(self):
# temporarily disable access control
self._oldacp = Configuration.get('cse.enableACPChecks')
Configuration.set('cse.enableACPChecks', False)
def _finishImporting(self):
Configuration.set('cse.enableACPChecks', self._oldacp) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile/IconItem.js.uncompressed.js | define("dojox/mobile/IconItem", [
"dojo/_base/kernel",
"dojo/_base/array",
"dojo/_base/declare",
"dojo/_base/lang",
"dojo/_base/sniff",
"dojo/_base/window",
"dojo/dom-attr",
"dojo/dom-class",
"dojo/dom-construct",
"dojo/dom-style",
"dijit/registry", // registry.byId
"./common",
"./_ItemBase",
"./TransitionEvent"
], function(dojo, array, declare, lang, has, win, domAttr, domClass, domConstruct, domStyle, registry, common, ItemBase, TransitionEvent){
/*=====
var ItemBase = dojox.mobile._ItemBase;
=====*/
// module:
// dojox/mobile/IconItem
// summary:
// An icon item widget.
return declare("dojox.mobile.IconItem", ItemBase, {
// summary:
// An icon item widget.
// description:
// IconItem represents an item that has an application component
// and its icon image. You can tap the icon to open the
// corresponding application component. You can also use the icon
// to move to a different view by specifying either of the moveTo,
// href or url parameters.
// lazy: String
// If true, the content of the item, which includes dojo markup, is
// instantiated lazily. That is, only when the icon is opened by
// the user, the required modules are loaded and dojo widgets are
// instantiated.
lazy: false,
// requires: String
// Comma-separated required module names to be loaded. All the
// modules specified with dojoType and their depending modules are
// automatically loaded by the IconItem. If you need other extra
// modules to be loaded, use this parameter. If lazy is true, the
// specified required modules are loaded when the user opens the
// icon for the first time.
requires: "",
// timeout: String
// Duration of highlight in seconds.
timeout: 10,
// closeBtnClass: String
// A class name of a DOM button to be used as a close button.
closeBtnClass: "mblDomButtonBlueMinus",
// closeBtnProp: String
// Properties for the close button.
closeBtnProp: null,
templateString: '<div class="mblIconArea" dojoAttachPoint="iconDivNode">'+
'<div><img src="${icon}" dojoAttachPoint="iconNode"></div><span dojoAttachPoint="labelNode1"></span>'+
'</div>',
templateStringSub: '<li class="mblIconItemSub" lazy="${lazy}" style="display:none;" dojoAttachPoint="contentNode">'+
'<h2 class="mblIconContentHeading" dojoAttachPoint="closeNode">'+
'<div class="${closeBtnClass}" style="position:absolute;left:4px;top:2px;" dojoAttachPoint="closeIconNode"></div><span dojoAttachPoint="labelNode2"></span>'+
'</h2>'+
'<div class="mblContent" dojoAttachPoint="containerNode"></div>'+
'</li>',
createTemplate: function(s){
array.forEach(["lazy","icon","closeBtnClass"], function(v){
while(s.indexOf("${"+v+"}") != -1){
s = s.replace("${"+v+"}", this[v]);
}
}, this);
var div = win.doc.createElement("DIV");
div.innerHTML = s;
/*
array.forEach(query("[dojoAttachPoint]", domNode), function(node){
this[node.getAttribute("dojoAttachPoint")] = node;
}, this);
*/
var nodes = div.getElementsByTagName("*");
var i, len, s1;
len = nodes.length;
for(i = 0; i < len; i++){
s1 = nodes[i].getAttribute("dojoAttachPoint");
if(s1){
this[s1] = nodes[i];
}
}
if(this.closeIconNode && this.closeBtnProp){
domAttr.set(this.closeIconNode, this.closeBtnProp);
}
var domNode = div.removeChild(div.firstChild);
div = null;
return domNode;
},
buildRendering: function(){
this.inheritParams();
var node = this.createTemplate(this.templateString);
this.subNode = this.createTemplate(this.templateStringSub);
this.subNode._parentNode = this.domNode; // [custom property]
this.domNode = this.srcNodeRef || domConstruct.create("LI");
domClass.add(this.domNode, "mblIconItem");
if(this.srcNodeRef){
// reparent
for(var i = 0, len = this.srcNodeRef.childNodes.length; i < len; i++){
this.containerNode.appendChild(this.srcNodeRef.firstChild);
}
}
this.domNode.appendChild(node);
},
postCreate: function(){
common.createDomButton(this.closeIconNode, {
top: "-2px",
left: "1px"
});
this.connect(this.iconNode, "onmousedown", "onMouseDownIcon");
this.connect(this.iconNode, "onclick", "iconClicked");
this.connect(this.closeIconNode, "onclick", "closeIconClicked");
this.connect(this.iconNode, "onerror", "onError");
},
highlight: function(){
// summary:
// Shakes the icon 10 seconds.
domClass.add(this.iconDivNode, "mblVibrate");
if(this.timeout > 0){
var _this = this;
setTimeout(function(){
_this.unhighlight();
}, this.timeout*1000);
}
},
unhighlight: function(){
// summary:
// Stops shaking the icon.
domClass.remove(this.iconDivNode, "mblVibrate");
},
instantiateWidget: function(e){
// summary:
// Instantiates the icon content.
// avoid use of query
/*
var list = query('[dojoType]', this.containerNode);
for(var i = 0, len = list.length; i < len; i++){
dojo["require"](list[i].getAttribute("dojoType"));
}
*/
var nodes = this.containerNode.getElementsByTagName("*");
var len = nodes.length;
var s;
for(var i = 0; i < len; i++){
s = nodes[i].getAttribute("dojoType");
if(s){
dojo["require"](s);
}
}
if(len > 0){
dojo.parser.parse(this.containerNode);
}
this.lazy = false;
},
isOpen: function(e){
// summary:
// Returns true if the icon is open.
return this.containerNode.style.display != "none";
},
onMouseDownIcon: function (e){
domStyle.set(this.iconNode, "opacity", this.getParent().pressedIconOpacity);
},
iconClicked: function(e){
if(e){
this.setTransitionPos(e);
setTimeout(lang.hitch(this, function(d){ this.iconClicked(); }), 0);
return;
}
if (this.href && this.hrefTarget) {
common.openWindow(this.href, this.hrefTarget);
dojo.style(this.iconNode, "opacity", 1);
return;
}
var transOpts;
if(this.moveTo || this.href || this.url || this.scene){
transOpts = {moveTo: this.moveTo, href: this.href, url: this.url, scene: this.scene, transitionDir: this.transitionDir, transition: this.transition};
}else if(this.transitionOptions){
transOpts = this.transitionOptions;
}
if(transOpts){
setTimeout(lang.hitch(this, function(d){
domStyle.set(this.iconNode, "opacity", 1);
}), 1500);
}else{
return this.open(e);
}
if(transOpts){
return new TransitionEvent(this.domNode,transOpts,e).dispatch();
}
},
closeIconClicked: function(e){
if(e){
setTimeout(lang.hitch(this, function(d){ this.closeIconClicked(); }), 0);
return;
}
this.close();
},
open: function(e){
// summary:
// Opens the icon content, or makes a transition.
var parent = this.getParent(); // IconContainer
if(this.transition == "below"){
if(parent.single){
parent.closeAll();
domStyle.set(this.iconNode, "opacity", this.getParent().pressedIconOpacity);
}
this._open_1();
}else{
parent._opening = this;
if(parent.single){
this.closeNode.style.display = "none";
parent.closeAll();
var view = registry.byId(parent.id+"_mblApplView");
view._heading._setLabelAttr(this.label);
}
var transOpts = this.transitionOptions || {transition: this.transition, transitionDir: this.transitionDir, moveTo: parent.id + "_mblApplView"};
new TransitionEvent(this.domNode, transOpts, e).dispatch();
}
},
_open_1: function(){
this.contentNode.style.display = "";
this.unhighlight();
if(this.lazy){
if(this.requires){
array.forEach(this.requires.split(/,/), function(c){
dojo["require"](c);
});
}
this.instantiateWidget();
}
this.contentNode.scrollIntoView();
this.onOpen();
},
close: function(){
// summary:
// Closes the icon content.
if(has("webkit")){
var t = this.domNode.parentNode.offsetWidth/8;
var y = this.iconNode.offsetLeft;
var pos = 0;
for(var i = 1; i <= 3; i++){
if(t*(2*i-1) < y && y <= t*(2*(i+1)-1)){
pos = i;
break;
}
}
domClass.add(this.containerNode.parentNode, "mblCloseContent mblShrink"+pos);
}else{
this.containerNode.parentNode.style.display = "none";
}
domStyle.set(this.iconNode, "opacity", 1);
this.onClose();
},
onOpen: function(){
// summary:
// Stub method to allow the application to connect to.
},
onClose: function(){
// summary:
// Stub method to allow the application to connect to.
},
onError: function(){
var icon = this.getParent().defaultIcon;
if(icon){
this.iconNode.src = icon;
}
},
_setIconAttr: function(icon){
if(!this.getParent()){ return; } // icon may be invalid because inheritParams is not called yet
this.icon = icon;
common.createIcon(icon, this.iconPos, this.iconNode, this.alt);
if(this.iconPos){
domClass.add(this.iconNode, "mblIconItemSpriteIcon");
var arr = this.iconPos.split(/[ ,]/);
var p = this.iconNode.parentNode;
domStyle.set(p, {
width: arr[2] + "px",
top: Math.round((p.offsetHeight - arr[3]) / 2) + 1 + "px",
margin: "auto"
});
}
},
_setLabelAttr: function(/*String*/text){
this.label = text;
var s = this._cv ? this._cv(text) : text;
this.labelNode1.innerHTML = s;
this.labelNode2.innerHTML = s;
}
});
}); | PypiClean |
/LT3OpenCorpora-0.2.1-py3-none-any.whl/LT3OpenCorpora-0.2.1.data/scripts/lt_convert.py | import argparse
import logging
import sys
import shutil
import os.path
import requests
from tempfile import NamedTemporaryFile
from collections import Counter
from lt3opencorpora.convert import Dictionary, doubleform_signal
sys.path.insert(0, ".")
REPEATED_FORMS = Counter()
def log_double_form(sender, tags_signature):
REPEATED_FORMS.update({tags_signature: 1})
def download_to_tmp(url):
r = requests.get(url, stream=True)
if r.status_code == 200:
cont_type = r.headers.get("content-type")
suffix = ".txt"
if cont_type == "application/x-bzip2":
suffix = ".bz2"
elif cont_type in ("application/gzip", "application/x-gzip"):
suffix = ".gzip"
with NamedTemporaryFile(suffix=suffix, delete=False) as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return f.name
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert LT dict to OpenCorpora format.')
parser.add_argument(
'in_file', help='input file/url (txt gzipped/bzipped txt)')
parser.add_argument(
'out_file', help='XML to save OpenCorpora dictionary to')
parser.add_argument(
'--debug',
help="Output debug information and collect some useful stats",
action='store_true')
parser.add_argument(
'--mapping', help="File with tags, their relationsheeps and meanigns",
default='')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
doubleform_signal.connect(log_double_form)
if args.in_file.startswith(("http://", "https://")):
args.in_file = download_to_tmp(args.in_file)
if not os.path.exists(args.in_file):
exit("In file doesn't exists or cannot be downloaded")
d = Dictionary(args.in_file, mapping=args.mapping)
d.export_to_xml(args.out_file)
if args.debug:
logging.debug("=" * 50)
for term, cnt in REPEATED_FORMS.most_common():
logging.debug(u"%s: %s" % (term, cnt)) | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/umd/locale/ta.js | (function (global, factory) {
if (typeof define === "function" && define.amd) {
define('element/locale/ta', ['module', 'exports'], factory);
} else if (typeof exports !== "undefined") {
factory(module, exports);
} else {
var mod = {
exports: {}
};
factory(mod, mod.exports);
global.ELEMENT.lang = global.ELEMENT.lang || {};
global.ELEMENT.lang.ta = mod.exports;
}
})(this, function (module, exports) {
'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'உறுதி செய்',
clear: 'தெளிவாக்கு'
},
datepicker: {
now: 'தற்போது',
today: 'இன்று',
cancel: 'ரத்து செய்',
clear: 'சரி',
confirm: 'உறுதி செய்',
selectDate: 'தேதியை தேர்வு செய்',
selectTime: 'நேரத்தை தேர்வு செய்',
startDate: 'தொடங்கும் நாள்',
startTime: 'தொடங்கும் நேரம்',
endDate: 'முடியும் தேதி',
endTime: 'முடியும் நேரம்',
prevYear: 'Previous Year', // to be translated
nextYear: 'Next Year', // to be translated
prevMonth: 'Previous Month', // to be translated
nextMonth: 'Next Month', // to be translated
year: 'வருடம்',
month1: 'ஜனவரி',
month2: 'பிப்ரவரி',
month3: 'மார்ச்',
month4: 'ஏப்ரல்',
month5: 'மே',
month6: 'ஜூன்',
month7: 'ஜூலை',
month8: 'ஆகஸ்ட்',
month9: 'செப்டம்பர்',
month10: 'அக்டோபர்',
month11: 'நவம்பர்',
month12: 'டிசம்பர்',
weeks: {
sun: 'ஞாயிறு',
mon: 'திங்கள்',
tue: 'செவ்வாய்',
wed: 'புதன்',
thu: 'வியாழன்',
fri: 'வெள்ளி',
sat: 'சனி'
},
months: {
jan: 'ஜனவரி',
feb: 'பிப்ரவரி',
mar: 'மார்ச்',
apr: 'ஏப்ரல்',
may: 'மே',
jun: 'ஜூன்',
jul: 'ஜூலை',
aug: 'ஆகஸ்ட்',
sep: 'செப்டம்பர்',
oct: 'அக்டோபர்',
nov: 'நவம்பர்',
dec: 'டிசம்பர்'
}
},
select: {
loading: 'தயாராகிக்கொண்டிருக்கிறது',
noMatch: 'பொருத்தமான தரவு கிடைக்கவில்லை',
noData: 'தரவு இல்லை',
placeholder: 'தேர்வு செய்'
},
cascader: {
noMatch: 'பொருத்தமான தரவு கிடைக்கவில்லை',
loading: 'தயாராகிக்கொண்டிருக்கிறது',
placeholder: 'தேர்வு செய்',
noData: 'தரவு இல்லை'
},
pagination: {
goto: 'தேவையான் பகுதிக்கு செல்',
pagesize: '/page',
total: 'மொத்தம் {total}',
pageClassifier: ''
},
messagebox: {
title: 'செய்தி',
confirm: 'உறுதி செய்',
cancel: 'ரத்து செய்',
error: 'பொருத்தாமில்லாத உள்ளீடு'
},
upload: {
deleteTip: 'press delete to remove', // to be translated
delete: 'நீக்கு',
preview: 'முன்னோட்டம் பார்',
continue: 'தொடரு'
},
table: {
emptyText: 'தரவு இல்லை',
confirmFilter: 'உறுதி செய்',
resetFilter: 'புதுமாற்றம் செய்',
clearFilter: 'அனைத்தும்',
sumText: 'கூட்டு'
},
tree: {
emptyText: 'தரவு இல்லை'
},
transfer: {
noMatch: 'பொருத்தமான தரவு கிடைக்கவில்லை',
noData: 'தரவு இல்லை',
titles: ['பட்டியல் 1', 'பட்டியல் 2'],
filterPlaceholder: 'சொல்லை உள்ளீடு செய்',
noCheckedFormat: '{total} items', // to be translated
hasCheckedFormat: '{checked}/{total} தேர்வு செய்யப்பட்டவைகள்'
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
};
module.exports = exports['default'];
}); | PypiClean |
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/text.py | import logging
import math
import weakref
import numpy as np
import matplotlib as mpl
from . import _api, artist, cbook, docstring
from .artist import Artist
from .font_manager import FontProperties
from .patches import FancyArrowPatch, FancyBboxPatch, Rectangle
from .textpath import TextPath # Unused, but imported by others.
from .transforms import (
Affine2D, Bbox, BboxBase, BboxTransformTo, IdentityTransform, Transform)
_log = logging.getLogger(__name__)
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return *rotation* normalized to an angle between 0 and 360 degrees.
Parameters
----------
rotation : float or {None, 'horizontal', 'vertical'}
Rotation angle in degrees. *None* and 'horizontal' equal 0,
'vertical' equals 90.
Returns
-------
float
"""
try:
return float(rotation) % 360
except (ValueError, TypeError) as err:
if cbook._str_equal(rotation, 'horizontal') or rotation is None:
return 0.
elif cbook._str_equal(rotation, 'vertical'):
return 90.
else:
raise ValueError("rotation is {!r}; expected either 'horizontal', "
"'vertical', numeric value, or None"
.format(rotation)) from err
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text.
The bbox position takes text rotation into account, but the width and
height are those of the unrotated box (unlike `.Text.get_window_extent`).
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, the information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
x_box, y_box = Affine2D().rotate(theta).transform((xt_box, yt_box))
return x_box, y_box, w_box, h_box
@docstring.interpd
@cbook._define_aliases({
"color": ["c"],
"fontfamily": ["family"],
"fontproperties": ["font", "font_properties"],
"horizontalalignment": ["ha"],
"multialignment": ["ma"],
"fontname": ["name"],
"fontsize": ["size"],
"fontstretch": ["stretch"],
"fontstyle": ["style"],
"fontvariant": ["variant"],
"verticalalignment": ["va"],
"fontweight": ["weight"],
})
class Text(Artist):
"""Handle storing and drawing of text in window or data coordinates."""
zorder = 3
_cached = cbook.maxdict(50)
def __repr__(self):
return "Text(%s, %s, %s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
transform_rotates_text=False,
*,
parse_math=True,
**kwargs
):
"""
Create a `.Text` instance at *x*, *y* with string *text*.
Valid keyword arguments are:
%(Text:kwdoc)s
"""
super().__init__()
self._x, self._y = x, y
self._text = ''
self.set_text(text)
self.set_color(
color if color is not None else mpl.rcParams["text.color"])
self.set_fontproperties(fontproperties)
self.set_usetex(usetex)
self.set_parse_math(parse_math)
self.set_wrap(wrap)
self.set_verticalalignment(verticalalignment)
self.set_horizontalalignment(horizontalalignment)
self._multialignment = multialignment
self._rotation = rotation
self._transform_rotates_text = transform_rotates_text
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
def update(self, kwargs):
# docstring inherited
kwargs = cbook.normalize_kwargs(kwargs, Text)
sentinel = object() # bbox can be None, so use another sentinel.
# Update fontproperties first, as it has lowest priority.
fontproperties = kwargs.pop("fontproperties", sentinel)
if fontproperties is not sentinel:
self.set_fontproperties(fontproperties)
# Update bbox last, as it depends on font properties.
bbox = kwargs.pop("bbox", sentinel)
super().update(kwargs)
if bbox is not sentinel:
self.set_bbox(bbox)
def __getstate__(self):
d = super().__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""
Return whether the mouse event occurred inside the axis-aligned
bounding-box of the text.
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible() or self._renderer is None:
return False, {}
# Explicitly use Text.get_window_extent(self) and not
# self.get_window_extent() so that Annotation.contains does not
# accidentally cover the entire annotation bounding box.
bbox = Text.get_window_extent(self)
inside = (bbox.x0 <= mouseevent.x <= bbox.x1
and bbox.y0 <= mouseevent.y <= bbox.y1)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
"""
Get the (possibly unit converted) transformed x, y in display coords.
"""
x, y = self.get_unitless_position()
return self.get_transform().transform((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
"""Return the text angle in degrees between 0 and 360."""
if self.get_transform_rotates_text():
angle = get_rotation(self._rotation)
x, y = self.get_unitless_position()
angles = [angle, ]
pts = [[x, y]]
return self.get_transform().transform_angles(angles, pts).item(0)
else:
return get_rotation(self._rotation) # string_or_number -> number
def get_transform_rotates_text(self):
"""
Return whether rotations of the transform affect the text direction.
"""
return self._transform_rotates_text
def set_rotation_mode(self, m):
"""
Set text rotation mode.
Parameters
----------
m : {None, 'default', 'anchor'}
If ``None`` or ``"default"``, the text will be first rotated, then
aligned according to their horizontal and vertical alignments. If
``"anchor"``, then alignment occurs before rotation.
"""
_api.check_in_list(["anchor", "default", None], rotation_mode=m)
self._rotation_mode = m
self.stale = True
def get_rotation_mode(self):
"""Return the text rotation mode."""
return self._rotation_mode
def update_from(self, other):
# docstring inherited
super().update_from(other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._usetex = other._usetex
self._rotation = other._rotation
self._transform_rotates_text = other._transform_rotates_text
self._picker = other._picker
self._linespacing = other._linespacing
self.stale = True
def _get_layout_cache_key(self, renderer=None):
"""
Return a hashable tuple of properties that lets `_get_layout` know
whether a previously computed layout can be reused.
"""
x, y = self.get_unitless_position()
renderer = renderer or self._renderer
return (
x, y, self.get_text(), hash(self._fontproperties),
self._verticalalignment, self._horizontalalignment,
self._linespacing,
self._rotation, self._rotation_mode, self._transform_rotates_text,
self.figure.dpi, weakref.ref(renderer),
)
def _get_layout(self, renderer):
"""
Return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
key = self._get_layout_cache_key(renderer=renderer)
if key in self._cached:
return self._cached[key]
thisx, thisy = 0.0, 0.0
lines = self.get_text().split("\n") # Ensures lines is not empty.
ws = []
hs = []
xs = []
ys = []
# Full vertical extent of font, including ascenders and descenders:
_, lp_h, lp_d = renderer.get_text_width_height_descent(
"lp", self._fontproperties,
ismath="TeX" if self.get_usetex() else False)
min_dy = (lp_h - lp_d) * self._linespacing
for i, line in enumerate(lines):
clean_line, ismath = self._preprocess_math(line)
if clean_line:
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
else:
w = h = d = 0
# For multiline text, increase the line spacing when the text
# net-height (excluding baseline) is larger than that of a "l"
# (e.g., use of superscripts), which seems what TeX does.
h = max(h, lp_h)
d = max(d, lp_d)
ws.append(w)
hs.append(h)
# Metrics of the last line that are needed later:
baseline = (h - d) - thisy
if i == 0:
# position at baseline
thisy = -(h - d)
else:
# put baseline a good distance from bottom of previous line
thisy -= max(min_dy, (h - d) * self._linespacing)
xs.append(thisx) # == 0.
ys.append(thisy)
thisy -= d
# Metrics of the last line that are needed later:
descent = d
# Bounding box definition:
width = max(ws)
xmin = 0
xmax = width
ymax = 0
ymin = ys[-1] - descent # baseline of last line minus its descent
height = ymax - ymin
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
# now offset the individual text lines within the box
malign = self._get_multialignment()
if malign == 'left':
offset_layout = [(x, y) for x, y in zip(xs, ys)]
elif malign == 'center':
offset_layout = [(x + width / 2 - w / 2, y)
for x, y, w in zip(xs, ys, ws)]
elif malign == 'right':
offset_layout = [(x + width - w, y)
for x, y, w in zip(xs, ys, ws)]
# the corners of the unrotated bounding box
corners_horiz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)])
# now rotate the bbox
corners_rotated = M.transform(corners_horiz)
# compute the bounds of the rotated box
xmin = corners_rotated[:, 0].min()
xmax = corners_rotated[:, 0].max()
ymin = corners_rotated[:, 1].min()
ymax = corners_rotated[:, 1].max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + xmax) / 2
elif halign == 'right':
offsetx = xmax
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + ymax) / 2
elif valign == 'top':
offsety = ymax
elif valign == 'baseline':
offsety = ymin + descent
elif valign == 'center_baseline':
offsety = ymin + height - baseline / 2.0
else:
offsety = ymin
else:
xmin1, ymin1 = corners_horiz[0]
xmax1, ymax1 = corners_horiz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
elif valign == 'center_baseline':
offsety = ymax1 - baseline / 2.0
else:
offsety = ymin1
offsetx, offsety = M.transform((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first (x, y) position
xys = M.transform(offset_layout) - (offsetx, offsety)
ret = bbox, list(zip(lines, zip(ws, hs), *xys.T)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self.
Parameters
----------
rectprops : dict with properties for `.patches.FancyBboxPatch`
The default boxstyle is 'square'. The mutation
scale of the `.patches.FancyBboxPatch` is set to the fontsize.
Examples
--------
::
t.set_bbox(dict(facecolor='red', alpha=0.5))
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if isinstance(boxstyle, str) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
self._bbox_patch = FancyBboxPatch(
(0, 0), 1, 1,
boxstyle=boxstyle, transform=IdentityTransform(), **props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch, or None if the `.patches.FancyBboxPatch`
is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox.
This method should be used when the position and size of the bbox needs
to be updated before actually drawing the bbox.
"""
if self._bbox_patch:
# don't use self.get_unitless_position here, which refers to text
# position in Text:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = self.get_transform().transform((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
self._bbox_patch.set_transform(
Affine2D()
.rotate_deg(self.get_rotation())
.translate(posx + x_box, posy + y_box))
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox_patch:
self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
# docstring inherited.
super().set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
# docstring inherited.
super().set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
# docstring inherited.
super().set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""Return whether the text can be wrapped."""
return self._wrap
def set_wrap(self, wrap):
"""
Set whether the text can be wrapped.
Parameters
----------
wrap : bool
Notes
-----
Wrapping does not work together with
``savefig(..., bbox_inches='tight')`` (which is also used internally
by ``%matplotlib inline`` in IPython/Jupyter). The 'tight' setting
rescales the canvas to accommodate all content and happens before
wrapping.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Return the maximum line width for wrapping text based on the current
orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360, x0, y0, figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Return the distance from the given points to the boundaries of a
rotated box, in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = y0 / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = x0 / math.cos(math.radians(quad))
h2 = y0 / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = x0 / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Return the width of a given text string, in pixels.
"""
w, h, d = self._renderer.get_text_width_height_descent(
text,
self.get_fontproperties(),
False)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text string with new lines added so that the text
is wrapped relative to the parent figure (if `get_wrap` is True).
"""
if not self.get_wrap():
return self.get_text()
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_lines = []
# New lines in the user's text force a split
unwrapped_lines = self.get_text().split('\n')
# Now wrap each individual unwrapped line
for unwrapped_line in unwrapped_lines:
sub_words = unwrapped_line.split(' ')
# Remove items from sub_words as we go, so stop when empty
while len(sub_words) > 0:
if len(sub_words) == 1:
# Only one word, so just add it to the end
wrapped_lines.append(sub_words.pop(0))
continue
for i in range(2, len(sub_words) + 1):
# Get width of all words up to and including here
line = ' '.join(sub_words[:i])
current_width = self._get_rendered_text_width(line)
# If all these words are too wide, append all not including
# last word
if current_width > line_width:
wrapped_lines.append(' '.join(sub_words[:i - 1]))
sub_words = sub_words[i - 1:]
break
# Otherwise if all words fit in the width, append them all
elif i == len(sub_words):
wrapped_lines.append(' '.join(sub_words[:i]))
sub_words = []
break
return '\n'.join(wrapped_lines)
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text() == '':
return
renderer.open_group('text', self.get_gid())
with self._cm_set(text=self._get_wrapped_text()):
bbox, info, descent = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text
# position in Text:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform((posx, posy))
if not np.isfinite(posx) or not np.isfinite(posy):
_log.warning("posx and posy should be finite values")
return
canvasw, canvash = renderer.get_canvas_width_height()
# Update the location and size of the bbox
# (`.patches.FancyBboxPatch`), and draw it.
if self._bbox_patch:
self.update_bbox_position_size(renderer)
self._bbox_patch.draw(renderer)
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_url(self._url)
self._set_gc_clip(gc)
angle = self.get_rotation()
for line, wh, x, y in info:
mtext = self if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = self._preprocess_math(line)
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
self.get_path_effects(), renderer)
else:
textrenderer = renderer
if self.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"""Return the color of the text."""
return self._color
def get_fontproperties(self):
"""Return the `.font_manager.FontProperties`."""
return self._fontproperties
def get_fontfamily(self):
"""
Return the list of font families used for font lookup.
See Also
--------
.font_manager.FontProperties.get_family
"""
return self._fontproperties.get_family()
def get_fontname(self):
"""
Return the font name as a string.
See Also
--------
.font_manager.FontProperties.get_name
"""
return self._fontproperties.get_name()
def get_fontstyle(self):
"""
Return the font style as a string.
See Also
--------
.font_manager.FontProperties.get_style
"""
return self._fontproperties.get_style()
def get_fontsize(self):
"""
Return the font size as an integer.
See Also
--------
.font_manager.FontProperties.get_size_in_points
"""
return self._fontproperties.get_size_in_points()
def get_fontvariant(self):
"""
Return the font variant as a string.
See Also
--------
.font_manager.FontProperties.get_variant
"""
return self._fontproperties.get_variant()
def get_fontweight(self):
"""
Return the font weight as a string or a number.
See Also
--------
.font_manager.FontProperties.get_weight
"""
return self._fontproperties.get_weight()
def get_stretch(self):
"""
Return the font stretch as a string or a number.
See Also
--------
.font_manager.FontProperties.get_stretch
"""
return self._fontproperties.get_stretch()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as a string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"""Return the (x, y) unitless position of the text."""
# This will get the position with all unit information stripped away.
# This is here for convenience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"""Return the (x, y) position of the text."""
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
# When removing, also remove the hash(color) check in set_color()
@_api.deprecated("3.5")
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_unitless_position()
renderer = renderer or self._renderer
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self._transform_rotates_text,
self.figure.dpi, weakref.ref(renderer),
self._linespacing
)
def get_text(self):
"""Return the text string."""
return self._text
def get_verticalalignment(self):
"""
Return the vertical alignment as a string. Will be one of
'top', 'center', 'bottom', 'baseline' or 'center_baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
"""
Return the `.Bbox` bounding the text, in display units.
In addition to being used internally, this is useful for specifying
clickable regions in a png file on a web page.
Parameters
----------
renderer : Renderer, optional
A renderer is needed to compute the bounding box. If the artist
has already been drawn, the renderer is cached; thus, it is only
necessary to pass this argument when calling `get_window_extent`
before the first `draw`. In practice, it is usually easier to
trigger a draw first (e.g. by saving the figure).
dpi : float, optional
The dpi value for computing the bbox, defaults to
``self.figure.dpi`` (*not* the renderer dpi); should be set e.g. if
to match regions with a figure saved with a custom dpi value.
"""
if not self.get_visible():
return Bbox.unit()
if dpi is None:
dpi = self.figure.dpi
if self.get_text() == '':
with cbook._setattr_cm(self.figure, dpi=dpi):
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
self._renderer = self.figure._cachedRenderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
with cbook._setattr_cm(self.figure, dpi=dpi):
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform((x, y))
bbox = bbox.translated(x, y)
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
Parameters
----------
color : color
See Also
--------
.set_bbox : To change the position of the bounding box
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
Parameters
----------
color : color
"""
# "auto" is only supported by axisartist, but we can just let it error
# out at draw time for simplicity.
if not cbook._str_equal(color, "auto"):
mpl.colors._check_color_like(color=color)
# Make sure it is hashable, or get_prop_tup will fail (remove this once
# get_prop_tup is removed).
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
self.stale = True
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
Parameters
----------
align : {'center', 'right', 'left'}
"""
_api.check_in_list(['center', 'right', 'left'], align=align)
self._horizontalalignment = align
self.stale = True
def set_multialignment(self, align):
"""
Set the text alignment for multiline texts.
The layout of the bounding box of all the lines is determined by the
horizontalalignment and verticalalignment properties. This property
controls the alignment of the text lines within that box.
Parameters
----------
align : {'left', 'right', 'center'}
"""
_api.check_in_list(['center', 'right', 'left'], align=align)
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
The default line spacing is 1.2.
Parameters
----------
spacing : float (multiple of font size)
"""
self._linespacing = spacing
self.stale = True
def set_fontfamily(self, fontname):
"""
Set the font family. May be either a single string, or a list of
strings in decreasing priority. Each string may be either a real font
name or a generic font class name. If the latter, the specific font
names will be looked up in the corresponding rcParams.
If a `Text` instance is constructed with ``fontfamily=None``, then the
font is set to :rc:`font.family`, and the
same is done when `set_fontfamily()` is called on an existing
`Text` instance.
Parameters
----------
fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \
'monospace'}
See Also
--------
.font_manager.FontProperties.set_family
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_fontvariant(self, variant):
"""
Set the font variant.
Parameters
----------
variant : {'normal', 'small-caps'}
See Also
--------
.font_manager.FontProperties.set_variant
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontstyle(self, fontstyle):
"""
Set the font style.
Parameters
----------
fontstyle : {'normal', 'italic', 'oblique'}
See Also
--------
.font_manager.FontProperties.set_style
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontsize(self, fontsize):
"""
Set the font size.
Parameters
----------
fontsize : float or {'xx-small', 'x-small', 'small', 'medium', \
'large', 'x-large', 'xx-large'}
If float, the fontsize in points. The string values denote sizes
relative to the default font size.
See Also
--------
.font_manager.FontProperties.set_size
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def get_math_fontfamily(self):
"""
Return the font family name for math text rendered by Matplotlib.
The default value is :rc:`mathtext.fontset`.
See Also
--------
set_math_fontfamily
"""
return self._fontproperties.get_math_fontfamily()
def set_math_fontfamily(self, fontfamily):
"""
Set the font family for math text rendered by Matplotlib.
This does only affect Matplotlib's own math renderer. It has no effect
when rendering with TeX (``usetex=True``).
Parameters
----------
fontfamily : str
The name of the font family.
Available font families are defined in the
:ref:`matplotlibrc.template file
<customizing-with-matplotlibrc-files>`.
See Also
--------
get_math_fontfamily
"""
self._fontproperties.set_math_fontfamily(fontfamily)
def set_fontweight(self, weight):
"""
Set the font weight.
Parameters
----------
weight : {a numeric value in range 0-1000, 'ultralight', 'light', \
'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', \
'demi', 'bold', 'heavy', 'extra bold', 'black'}
See Also
--------
.font_manager.FontProperties.set_weight
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontstretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
Parameters
----------
stretch : {a numeric value in range 0-1000, 'ultra-condensed', \
'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', \
'expanded', 'extra-expanded', 'ultra-expanded'}
See Also
--------
.font_manager.FontProperties.set_stretch
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text.
Parameters
----------
xy : (float, float)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text.
Parameters
----------
x : float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text.
Parameters
----------
y : float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text.
Parameters
----------
s : float or {'vertical', 'horizontal'}
The rotation angle in degrees in mathematically positive direction
(counterclockwise). 'horizontal' equals 0, 'vertical' equals 90.
"""
self._rotation = s
self.stale = True
def set_transform_rotates_text(self, t):
"""
Whether rotations of the transform affect the text direction.
Parameters
----------
t : bool
"""
self._transform_rotates_text = t
self.stale = True
def set_verticalalignment(self, align):
"""
Set the vertical alignment.
Parameters
----------
align : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
"""
_api.check_in_list(
['top', 'bottom', 'center', 'baseline', 'center_baseline'],
align=align)
self._verticalalignment = align
self.stale = True
def set_text(self, s):
r"""
Set the text string *s*.
It may contain newlines (``\n``) or math in LaTeX syntax.
Parameters
----------
s : object
Any object gets converted to its `str` representation, except for
``None`` which is converted to an empty string.
"""
if s is None:
s = ''
if s != self._text:
self._text = str(s)
self.stale = True
def _preprocess_math(self, s):
"""
Return the string *s* after mathtext preprocessing, and the kind of
mathtext support needed.
- If *self* is configured to use TeX, return *s* unchanged except that
a single space gets escaped, and the flag "TeX".
- Otherwise, if *s* is mathtext (has an even number of unescaped dollar
signs) and ``parse_math`` is not set to False, return *s* and the
flag True.
- Otherwise, return *s* with dollar signs unescaped, and the flag
False.
"""
if self.get_usetex():
if s == " ":
s = r"\ "
return s, "TeX"
elif not self.get_parse_math():
return s, False
elif cbook.is_math_text(s):
return s, True
else:
return s.replace(r"\$", "$"), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text.
Parameters
----------
fp : `.font_manager.FontProperties` or `str` or `pathlib.Path`
If a `str`, it is interpreted as a fontconfig pattern parsed by
`.FontProperties`. If a `pathlib.Path`, it is interpreted as the
absolute path to a font file.
"""
self._fontproperties = FontProperties._from_any(fp).copy()
self.stale = True
def set_usetex(self, usetex):
"""
Parameters
----------
usetex : bool or None
Whether to render using TeX, ``None`` means to use
:rc:`text.usetex`.
"""
if usetex is None:
self._usetex = mpl.rcParams['text.usetex']
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""Return whether this `Text` object uses TeX for rendering."""
return self._usetex
def set_parse_math(self, parse_math):
"""
Override switch to disable any mathtext parsing for this `Text`.
Parameters
----------
parse_math : bool
If False, this `Text` will never use mathtext. If True, mathtext
will be used if there is an even number of unescaped dollar signs.
"""
self._parse_math = bool(parse_math)
def get_parse_math(self):
"""Return whether mathtext parsing is considered for this `Text`."""
return self._parse_math
def set_fontname(self, fontname):
"""
Alias for `set_family`.
One-way alias only: the getter differs.
Parameters
----------
fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \
'monospace'}
See Also
--------
.font_manager.FontProperties.set_family
"""
return self.set_family(fontname)
class OffsetFrom:
"""Callable helper class for working with `Annotation`."""
def __init__(self, artist, ref_coord, unit="points"):
"""
Parameters
----------
artist : `.Artist` or `.BboxBase` or `.Transform`
The object to compute the offset from.
ref_coord : (float, float)
If *artist* is an `.Artist` or `.BboxBase`, this values is
the location to of the offset origin in fractions of the
*artist* bounding box.
If *artist* is a transform, the offset origin is the
transform applied to this value.
unit : {'points, 'pixels'}, default: 'points'
The screen units to use (pixels or points) for the offset input.
"""
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
"""
Set the unit for input to the transform used by ``__call__``.
Parameters
----------
unit : {'points', 'pixels'}
"""
_api.check_in_list(["points", "pixels"], unit=unit)
self._unit = unit
def get_unit(self):
"""Return the unit for input to the transform used by ``__call__``."""
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
"""
Return the offset transform.
Parameters
----------
renderer : `RendererBase`
The renderer to use to compute the offset
Returns
-------
`Transform`
Maps (x, y) in pixel or point units to screen units
relative to the given artist.
"""
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
xf, yf = self._ref_coord
x = bbox.x0 + bbox.width * xf
y = bbox.y0 + bbox.height * yf
elif isinstance(self._artist, BboxBase):
bbox = self._artist
xf, yf = self._ref_coord
x = bbox.x0 + bbox.width * xf
y = bbox.y0 + bbox.height * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc).translate(x, y)
return tr
class _AnnotationBase:
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
return self._get_xy_transform(renderer, s).transform((x, y))
def _get_xy_transform(self, renderer, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, s1)
tr2 = self._get_xy_transform(renderer, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
elif callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
elif isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not isinstance(s, str):
raise RuntimeError("unknown coordinate type : %s" % s)
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.figbbox
elif bbox_name == "subfigure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
xy0 = bbox0.p0
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
if unit == "points":
# dots per points
dpp = self.figure.dpi / 72
tr = Affine2D().scale(dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.dpi / 72
tr = Affine2D().scale(dpp)
elif unit == "fraction":
w, h = bbox0.size
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
Return x, y (in display coordinates) that is to be used for a reference
of any offset coordinate.
"""
return self._get_xy(renderer, *self.xy, self.xycoords)
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
Set the annotation's clipping behavior.
Parameters
----------
b : bool or None
- True: the annotation will only be drawn when ``self.xy`` is
inside the axes.
- False: the annotation will always be drawn regardless of its
position.
- None: the ``self.xy`` will be checked only if *xycoords* is
"data".
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return the annotation's clipping behavior.
See `set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"""Return the pixel position of the annotated point."""
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer):
"""Check whether the annotation at *xy_pixel* should be drawn."""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
xy_pixel = self._get_position_xy(renderer)
return self.axes.contains_point(xy_pixel)
return True
def draggable(self, state=None, use_blit=False):
"""
Set whether the annotation is draggable with the mouse.
Parameters
----------
state : bool or None
- True or False: set the draggability.
- None: toggle the draggability.
Returns
-------
DraggableAnnotation or None
If the annotation is draggable, the corresponding
`.DraggableAnnotation` helper is returned.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
"""
An `.Annotation` is a `.Text` that can refer to a specific position *xy*.
Optionally an arrow pointing from the text to *xy* can be drawn.
Attributes
----------
xy
The annotated position.
xycoords
The coordinate system for *xy*.
arrow_patch
A `.FancyArrowPatch` to point from *xytext* to *xy*.
"""
def __str__(self):
return "Annotation(%g, %g, %r)" % (self.xy[0], self.xy[1], self._text)
def __init__(self, text, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
"""
Annotate the point *xy* with text *text*.
In the simplest form, the text is placed at *xy*.
Optionally, the text can be displayed in another position *xytext*.
An arrow pointing from the text to the annotated point *xy* can then
be added by defining *arrowprops*.
Parameters
----------
text : str
The text of the annotation.
xy : (float, float)
The point *(x, y)* to annotate. The coordinate system is determined
by *xycoords*.
xytext : (float, float), default: *xy*
The position *(x, y)* to place the text at. The coordinate system
is determined by *textcoords*.
xycoords : str or `.Artist` or `.Transform` or callable or \
(float, float), default: 'data'
The coordinate system that *xy* is given in. The following types
of values are supported:
- One of the following strings:
==================== ============================================
Value Description
==================== ============================================
'figure points' Points from the lower left of the figure
'figure pixels' Pixels from the lower left of the figure
'figure fraction' Fraction of figure from lower left
'subfigure points' Points from the lower left of the subfigure
'subfigure pixels' Pixels from the lower left of the subfigure
'subfigure fraction' Fraction of subfigure from lower left
'axes points' Points from lower left corner of axes
'axes pixels' Pixels from lower left corner of axes
'axes fraction' Fraction of axes from lower left
'data' Use the coordinate system of the object
being annotated (default)
'polar' *(theta, r)* if not native 'data'
coordinates
==================== ============================================
Note that 'subfigure pixels' and 'figure pixels' are the same
for the parent figure, so users who want code that is usable in
a subfigure can use 'subfigure pixels'.
- An `.Artist`: *xy* is interpreted as a fraction of the artist's
`~matplotlib.transforms.Bbox`. E.g. *(0, 0)* would be the lower
left corner of the bounding box and *(0.5, 1)* would be the
center top of the bounding box.
- A `.Transform` to transform *xy* to screen coordinates.
- A function with one of the following signatures::
def transform(renderer) -> Bbox
def transform(renderer) -> Transform
where *renderer* is a `.RendererBase` subclass.
The result of the function is interpreted like the `.Artist` and
`.Transform` cases above.
- A tuple *(xcoords, ycoords)* specifying separate coordinate
systems for *x* and *y*. *xcoords* and *ycoords* must each be
of one of the above described types.
See :ref:`plotting-guide-annotation` for more details.
textcoords : str or `.Artist` or `.Transform` or callable or \
(float, float), default: value of *xycoords*
The coordinate system that *xytext* is given in.
All *xycoords* values are valid as well as the following
strings:
================= =========================================
Value Description
================= =========================================
'offset points' Offset (in points) from the *xy* value
'offset pixels' Offset (in pixels) from the *xy* value
================= =========================================
arrowprops : dict, optional
The properties used to draw a `.FancyArrowPatch` arrow between the
positions *xy* and *xytext*. Defaults to None, i.e. no arrow is
drawn.
For historical reasons there are two different ways to specify
arrows, "simple" and "fancy":
**Simple arrow:**
If *arrowprops* does not contain the key 'arrowstyle' the
allowed keys are:
========== ======================================================
Key Description
========== ======================================================
width The width of the arrow in points
headwidth The width of the base of the arrow head in points
headlength The length of the arrow head in points
shrink Fraction of total length to shrink from both ends
? Any key to :class:`matplotlib.patches.FancyArrowPatch`
========== ======================================================
The arrow is attached to the edge of the text box, the exact
position (corners or centers) depending on where it's pointing to.
**Fancy arrow:**
This is used if 'arrowstyle' is provided in the *arrowprops*.
Valid keys are the following `~matplotlib.patches.FancyArrowPatch`
parameters:
=============== ==================================================
Key Description
=============== ==================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos see below; default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ==================================================
The exact starting point position of the arrow is defined by
*relpos*. It's a tuple of relative coordinates of the text box,
where (0, 0) is the lower left corner and (1, 1) is the upper
right corner. Values <0 and >1 are supported and specify points
outside the text box. By default (0.5, 0.5) the starting point is
centered in the text box.
annotation_clip : bool or None, default: None
Whether to draw the annotation when the annotation point *xy* is
outside the axes area.
- If *True*, the annotation will only be drawn when *xy* is
within the axes.
- If *False*, the annotation will always be drawn.
- If *None*, the annotation will only be drawn when *xy* is
within the axes and *xycoords* is 'data'.
**kwargs
Additional kwargs are passed to `~matplotlib.text.Text`.
Returns
-------
`.Annotation`
See Also
--------
:ref:`plotting-guide-annotation`
"""
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
_api.warn_external("You have used the `textcoords` kwarg, but "
"not the `xytext` kwarg. This can lead to "
"surprising results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
self.arrowprops = arrowprops
if arrowprops is not None:
arrowprops = arrowprops.copy()
if "arrowstyle" in arrowprops:
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
for key in [
'width', 'headwidth', 'headlength', 'shrink', 'frac']:
arrowprops.pop(key, None)
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **arrowprops)
else:
self.arrow_patch = None
# Must come last, as some kwargs may be propagated to arrow_patch.
Text.__init__(self, x, y, text, **kwargs)
def contains(self, event):
inside, info = self._default_contains(event)
if inside is not None:
return inside, info
contains, tinfo = Text.contains(self, event)
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(event)
contains = contains or in_patch
return contains, tinfo
@property
def xycoords(self):
return self._xycoords
@xycoords.setter
def xycoords(self, xycoords):
def is_offset(s):
return isinstance(s, str) and s.startswith("offset")
if (isinstance(xycoords, tuple) and any(map(is_offset, xycoords))
or is_offset(xycoords)):
raise ValueError("xycoords cannot be an offset coordinate")
self._xycoords = xycoords
@property
def xyann(self):
"""
The text position.
See also *xytext* in `.Annotation`.
"""
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
def get_anncoords(self):
"""
Return the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
return self._textcoords
def set_anncoords(self, coords):
"""
Set the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
self._textcoords = coords
anncoords = property(get_anncoords, set_anncoords, doc="""
The coordinate system to use for `.Annotation.xyann`.""")
def set_figure(self, fig):
# docstring inherited
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
"""
Update the pixel positions of the annotation text and the arrow patch.
"""
x1, y1 = self._get_position_xy(renderer) # Annotated position.
# generate transformation,
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
if self.arrowprops is None:
return
bbox = Text.get_window_extent(self, renderer)
d = self.arrowprops.copy()
ms = d.pop("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in d:
# Approximately simulate the YAArrow.
# Pop its kwargs:
shrink = d.pop('shrink', 0.0)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
# Ignore frac--it is useless.
frac = d.pop('frac', None)
if frac is not None:
_api.warn_external(
"'frac' option in 'arrowprops' is no longer supported;"
" use 'headlength' to set the head length in points.")
headlength = d.pop('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the corner of the text bbox closest to annotated point.
xpos = [(bbox.x0, 0), ((bbox.x0 + bbox.x1) / 2, 0.5), (bbox.x1, 1)]
ypos = [(bbox.y0, 0), ((bbox.y0 + bbox.y1) / 2, 0.5), (bbox.y1, 1)]
x, relposx = min(xpos, key=lambda v: abs(v[0] - x1))
y, relposy = min(ypos, key=lambda v: abs(v[0] - y1))
self._arrow_relpos = (relposx, relposy)
r = np.hypot(y - y1, x - x1)
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to the textbox.
# TODO : Rotation needs to be accounted.
relposx, relposy = self._arrow_relpos
x0 = bbox.x0 + bbox.width * relposx
y0 = bbox.y0 + bbox.height * relposy
# The arrow will be drawn from (x0, y0) to (x1, y1). It will be first
# clipped by patchA and patchB. Then it will be shrunk by shrinkA and
# shrinkB (in points). If patch A is not set, self.bbox_patch is used.
self.arrow_patch.set_positions((x0, y0), (x1, y1))
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
if self.get_text() == "":
self.arrow_patch.set_patchA(None)
return
pad = renderer.points_to_pixels(4)
r = Rectangle(xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
transform=IdentityTransform(), clip_on=False)
self.arrow_patch.set_patchA(r)
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if renderer is not None:
self._renderer = renderer
if not self.get_visible() or not self._check_xy(renderer):
return
# Update text positions before `Text.draw` would, so that the
# FancyArrowPatch is correctly positioned.
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
# docstring inherited
# This block is the same as in Text.get_window_extent, but we need to
# set the renderer before calling update_positions().
if not self.get_visible() or not self._check_xy(renderer):
return Bbox.unit()
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
self._renderer = self.figure._cachedRenderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
self.update_positions(self._renderer)
text_bbox = Text.get_window_extent(self)
bboxes = [text_bbox]
if self.arrow_patch is not None:
bboxes.append(self.arrow_patch.get_window_extent())
return Bbox.union(bboxes)
def get_tightbbox(self, renderer):
# docstring inherited
if not self._check_xy(renderer):
return Bbox.null()
return super().get_tightbbox(renderer)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__) | PypiClean |
/Flask-MDBootstrap-3.0.5.tar.gz/Flask-MDBootstrap-3.0.5/flask_mdbootstrap/static/MDB-Pro/src/js/vendor/free/bs-custom-file-input.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global = global || self, global.bsCustomFileInput = factory());
}(this, function () {
'use strict';
var Selector = {
CUSTOMFILE: '.custom-file input[type="file"]',
CUSTOMFILELABEL: '.custom-file-label',
FORM: 'form',
INPUT: 'input'
};
var textNodeType = 3;
var getDefaultText = function getDefaultText(input) {
var defaultText = '';
var label = input.parentNode.querySelector(Selector.CUSTOMFILELABEL);
if (label) {
defaultText = label.innerHTML;
}
return defaultText;
};
var findFirstChildNode = function findFirstChildNode(element) {
if (element.childNodes.length > 0) {
var childNodes = [].slice.call(element.childNodes);
for (var i = 0; i < childNodes.length; i++) {
var node = childNodes[i];
if (node.nodeType !== textNodeType) {
return node;
}
}
}
return element;
};
var restoreDefaultText = function restoreDefaultText(input) {
var defaultText = input.bsCustomFileInput.defaultText;
var label = input.parentNode.querySelector(Selector.CUSTOMFILELABEL);
if (label) {
var element = findFirstChildNode(label);
element.innerHTML = defaultText;
}
};
var fileApi = !!window.File;
var FAKE_PATH = 'fakepath';
var FAKE_PATH_SEPARATOR = '\\';
var getSelectedFiles = function getSelectedFiles(input) {
if (input.hasAttribute('multiple') && fileApi) {
return [].slice.call(input.files).map(function (file) {
return file.name;
}).join(', ');
}
if (input.value.indexOf(FAKE_PATH) !== -1) {
var splittedValue = input.value.split(FAKE_PATH_SEPARATOR);
return splittedValue[splittedValue.length - 1];
}
return input.value;
};
function handleInputChange() {
var label = this.parentNode.querySelector(Selector.CUSTOMFILELABEL);
if (label) {
var element = findFirstChildNode(label);
var inputValue = getSelectedFiles(this);
if (inputValue.length) {
element.innerHTML = inputValue;
} else {
restoreDefaultText(this);
}
}
}
function handleFormReset() {
var customFileList = [].slice.call(this.querySelectorAll(Selector.INPUT)).filter(function (input) {
return !!input.bsCustomFileInput;
});
for (var i = 0, len = customFileList.length; i < len; i++) {
restoreDefaultText(customFileList[i]);
}
}
var customProperty = 'bsCustomFileInput';
var Event = {
FORMRESET: 'reset',
INPUTCHANGE: 'change'
};
var bsCustomFileInput = {
init: function init(inputSelector, formSelector) {
if (inputSelector === void 0) {
inputSelector = Selector.CUSTOMFILE;
}
if (formSelector === void 0) {
formSelector = Selector.FORM;
}
var customFileInputList = [].slice.call(document.querySelectorAll(inputSelector));
var formList = [].slice.call(document.querySelectorAll(formSelector));
for (var i = 0, len = customFileInputList.length; i < len; i++) {
var input = customFileInputList[i];
Object.defineProperty(input, customProperty, {
value: {
defaultText: getDefaultText(input)
},
writable: true
});
handleInputChange.call(input);
input.addEventListener(Event.INPUTCHANGE, handleInputChange);
}
for (var _i = 0, _len = formList.length; _i < _len; _i++) {
formList[_i].addEventListener(Event.FORMRESET, handleFormReset);
Object.defineProperty(formList[_i], customProperty, {
value: true,
writable: true
});
}
},
destroy: function destroy() {
var formList = [].slice.call(document.querySelectorAll(Selector.FORM)).filter(function (form) {
return !!form.bsCustomFileInput;
});
var customFileInputList = [].slice.call(document.querySelectorAll(Selector.INPUT)).filter(function (input) {
return !!input.bsCustomFileInput;
});
for (var i = 0, len = customFileInputList.length; i < len; i++) {
var input = customFileInputList[i];
restoreDefaultText(input);
input[customProperty] = undefined;
input.removeEventListener(Event.INPUTCHANGE, handleInputChange);
}
for (var _i2 = 0, _len2 = formList.length; _i2 < _len2; _i2++) {
formList[_i2].removeEventListener(Event.FORMRESET, handleFormReset);
formList[_i2][customProperty] = undefined;
}
}
};
return bsCustomFileInput;
}));
//# sourceMappingURL=bs-custom-file-input.js.map
document.addEventListener("DOMContentLoaded", function () {
bsCustomFileInput.init()
}); | PypiClean |
/LinkPython-0.1.1.tar.gz/LinkPython-0.1.1/modules/pybind11/docs/advanced/pycpp/object.rst | Python types
############
.. _wrappers:
Available wrappers
==================
All major Python types are available as thin C++ wrapper classes. These
can also be used as function parameters -- see :ref:`python_objects_as_args`.
Available types include :class:`handle`, :class:`object`, :class:`bool_`,
:class:`int_`, :class:`float_`, :class:`str`, :class:`bytes`, :class:`tuple`,
:class:`list`, :class:`dict`, :class:`slice`, :class:`none`, :class:`capsule`,
:class:`iterable`, :class:`iterator`, :class:`function`, :class:`buffer`,
:class:`array`, and :class:`array_t`.
.. warning::
Be sure to review the :ref:`pytypes_gotchas` before using this heavily in
your C++ API.
.. _instantiating_compound_types:
Instantiating compound Python types from C++
============================================
Dictionaries can be initialized in the :class:`dict` constructor:
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
py::dict d("spam"_a=py::none(), "eggs"_a=42);
A tuple of python objects can be instantiated using :func:`py::make_tuple`:
.. code-block:: cpp
py::tuple tup = py::make_tuple(42, py::none(), "spam");
Each element is converted to a supported Python type.
A `simple namespace`_ can be instantiated using
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
py::object SimpleNamespace = py::module_::import("types").attr("SimpleNamespace");
py::object ns = SimpleNamespace("spam"_a=py::none(), "eggs"_a=42);
Attributes on a namespace can be modified with the :func:`py::delattr`,
:func:`py::getattr`, and :func:`py::setattr` functions. Simple namespaces can
be useful as lightweight stand-ins for class instances.
.. _simple namespace: https://docs.python.org/3/library/types.html#types.SimpleNamespace
.. _casting_back_and_forth:
Casting back and forth
======================
In this kind of mixed code, it is often necessary to convert arbitrary C++
types to Python, which can be done using :func:`py::cast`:
.. code-block:: cpp
MyClass *cls = ...;
py::object obj = py::cast(cls);
The reverse direction uses the following syntax:
.. code-block:: cpp
py::object obj = ...;
MyClass *cls = obj.cast<MyClass *>();
When conversion fails, both directions throw the exception :class:`cast_error`.
.. _python_libs:
Accessing Python libraries from C++
===================================
It is also possible to import objects defined in the Python standard
library or available in the current Python environment (``sys.path``) and work
with these in C++.
This example obtains a reference to the Python ``Decimal`` class.
.. code-block:: cpp
// Equivalent to "from decimal import Decimal"
py::object Decimal = py::module_::import("decimal").attr("Decimal");
.. code-block:: cpp
// Try to import scipy
py::object scipy = py::module_::import("scipy");
return scipy.attr("__version__");
.. _calling_python_functions:
Calling Python functions
========================
It is also possible to call Python classes, functions and methods
via ``operator()``.
.. code-block:: cpp
// Construct a Python object of class Decimal
py::object pi = Decimal("3.14159");
.. code-block:: cpp
// Use Python to make our directories
py::object os = py::module_::import("os");
py::object makedirs = os.attr("makedirs");
makedirs("/tmp/path/to/somewhere");
One can convert the result obtained from Python to a pure C++ version
if a ``py::class_`` or type conversion is defined.
.. code-block:: cpp
py::function f = <...>;
py::object result_py = f(1234, "hello", some_instance);
MyClass &result = result_py.cast<MyClass>();
.. _calling_python_methods:
Calling Python methods
========================
To call an object's method, one can again use ``.attr`` to obtain access to the
Python method.
.. code-block:: cpp
// Calculate e^π in decimal
py::object exp_pi = pi.attr("exp")();
py::print(py::str(exp_pi));
In the example above ``pi.attr("exp")`` is a *bound method*: it will always call
the method for that same instance of the class. Alternately one can create an
*unbound method* via the Python class (instead of instance) and pass the ``self``
object explicitly, followed by other arguments.
.. code-block:: cpp
py::object decimal_exp = Decimal.attr("exp");
// Compute the e^n for n=0..4
for (int n = 0; n < 5; n++) {
py::print(decimal_exp(Decimal(n));
}
Keyword arguments
=================
Keyword arguments are also supported. In Python, there is the usual call syntax:
.. code-block:: python
def f(number, say, to):
... # function code
f(1234, say="hello", to=some_instance) # keyword call in Python
In C++, the same call can be made using:
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
f(1234, "say"_a="hello", "to"_a=some_instance); // keyword call in C++
Unpacking arguments
===================
Unpacking of ``*args`` and ``**kwargs`` is also possible and can be mixed with
other arguments:
.. code-block:: cpp
// * unpacking
py::tuple args = py::make_tuple(1234, "hello", some_instance);
f(*args);
// ** unpacking
py::dict kwargs = py::dict("number"_a=1234, "say"_a="hello", "to"_a=some_instance);
f(**kwargs);
// mixed keywords, * and ** unpacking
py::tuple args = py::make_tuple(1234);
py::dict kwargs = py::dict("to"_a=some_instance);
f(*args, "say"_a="hello", **kwargs);
Generalized unpacking according to PEP448_ is also supported:
.. code-block:: cpp
py::dict kwargs1 = py::dict("number"_a=1234);
py::dict kwargs2 = py::dict("to"_a=some_instance);
f(**kwargs1, "say"_a="hello", **kwargs2);
.. seealso::
The file :file:`tests/test_pytypes.cpp` contains a complete
example that demonstrates passing native Python types in more detail. The
file :file:`tests/test_callbacks.cpp` presents a few examples of calling
Python functions from C++, including keywords arguments and unpacking.
.. _PEP448: https://www.python.org/dev/peps/pep-0448/
.. _implicit_casting:
Implicit casting
================
When using the C++ interface for Python types, or calling Python functions,
objects of type :class:`object` are returned. It is possible to invoke implicit
conversions to subclasses like :class:`dict`. The same holds for the proxy objects
returned by ``operator[]`` or ``obj.attr()``.
Casting to subtypes improves code readability and allows values to be passed to
C++ functions that require a specific subtype rather than a generic :class:`object`.
.. code-block:: cpp
#include <pybind11/numpy.h>
using namespace pybind11::literals;
py::module_ os = py::module_::import("os");
py::module_ path = py::module_::import("os.path"); // like 'import os.path as path'
py::module_ np = py::module_::import("numpy"); // like 'import numpy as np'
py::str curdir_abs = path.attr("abspath")(path.attr("curdir"));
py::print(py::str("Current directory: ") + curdir_abs);
py::dict environ = os.attr("environ");
py::print(environ["HOME"]);
py::array_t<float> arr = np.attr("ones")(3, "dtype"_a="float32");
py::print(py::repr(arr + py::int_(1)));
These implicit conversions are available for subclasses of :class:`object`; there
is no need to call ``obj.cast()`` explicitly as for custom classes, see
:ref:`casting_back_and_forth`.
.. note::
If a trivial conversion via move constructor is not possible, both implicit and
explicit casting (calling ``obj.cast()``) will attempt a "rich" conversion.
For instance, ``py::list env = os.attr("environ");`` will succeed and is
equivalent to the Python code ``env = list(os.environ)`` that produces a
list of the dict keys.
.. TODO: Adapt text once PR #2349 has landed
Handling exceptions
===================
Python exceptions from wrapper classes will be thrown as a ``py::error_already_set``.
See :ref:`Handling exceptions from Python in C++
<handling_python_exceptions_cpp>` for more information on handling exceptions
raised when calling C++ wrapper classes.
.. _pytypes_gotchas:
Gotchas
=======
Default-Constructed Wrappers
----------------------------
When a wrapper type is default-constructed, it is **not** a valid Python object (i.e. it is not ``py::none()``). It is simply the same as
``PyObject*`` null pointer. To check for this, use
``static_cast<bool>(my_wrapper)``.
Assigning py::none() to wrappers
--------------------------------
You may be tempted to use types like ``py::str`` and ``py::dict`` in C++
signatures (either pure C++, or in bound signatures), and assign them default
values of ``py::none()``. However, in a best case scenario, it will fail fast
because ``None`` is not convertible to that type (e.g. ``py::dict``), or in a
worse case scenario, it will silently work but corrupt the types you want to
work with (e.g. ``py::str(py::none())`` will yield ``"None"`` in Python).
| PypiClean |
/4quila-0.36.200302-py3-none-any.whl/_4server/web.py | import json
import os
import inspect
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, Application
from tornado.websocket import WebSocketHandler
from _4helper import _4ssert
class WebServer:
@classmethod
def parse_ip_port(cls, ip_port):
if isinstance(ip_port, int) or ":" not in ip_port:
return "127.0.0.1", int(ip_port)
else:
ip, port = ip_port.split(":")
return ip, int(port)
@classmethod
def start(cls, config):
ip = config.get("ip", "127.0.0.1")
port = int(config.get("port", "80"))
routes = config.get("routes", {"/": cls})
class _WebSocketHandler(WebSocketHandler):
async def open(self, *args, **kwargs):
print(f"open {args} {kwargs}")
async def on_close(self):
print("close")
async def on_message(self, message):
print(f"handling {message}")
self.write_message(f"got {message}")
class _Handler(RequestHandler):
SUPPORTED_METHODS = ["GET", "POST"]
async def get(self):
await self.handle()
async def post(self):
await self.handle(True)
async def handle(self, is_post=False):
match_handler = None
max_match_length = 0
for path, handler in routes.items():
if self.request.path.startswith(path):
match_length = len(path)
if match_length > max_match_length:
max_match_length = match_length
match_handler = handler
if match_handler is None:
self.set_status(404)
self.finish()
return
func_name = "handle_%s" % self.request.path[max_match_length:]
func = getattr(match_handler, func_name, None)
if func is None:
self.set_status(404)
self.finish()
return
if self.request.arguments:
request = dict(
(i, j[0].decode()) for i, j in self.request.arguments.items()
)
else:
request = json.loads(self.request.body or "{}")
request = dict((i, str(j)) for i, j in request.items())
func_parameters = inspect.signature(func).parameters
for key, value in (
("headers", self.request.headers),
("body", self.request.body),
):
_4ssert(key not in request)
if key in func_parameters:
request[key] = value
response = await func(**request)
if isinstance(response, dict):
self.write(json.dumps(response))
else:
self.write(response)
self.finish()
Application(
[(r"/websocket", _WebSocketHandler), (r".*", _Handler,)],
static_path=os.path.join(os.getcwd(), "static"),
).listen(port, address=ip)
IOLoop.current().start() | PypiClean |
/Mynus-0.1.0.tar.gz/Mynus-0.1.0/README.md | Mynus, the minimalist wiki
==========================
The pupose of Mynus is to demonstrate the philosophy of "batteries included".
Thus, Mynus will never depends on anything else than the Python's standard
library.
Features
========
* Provides a tiny wiki engine to launch everywhere.
* No others dependencies than Python itself.
* Offers a `mynus` script which allow you to transform any directory into a
wiki repository.
* WSGI Compliant.
License
=======
Mynus is released under the terms of the GNU Affero General Public License v3
or later.
TODO
====
* Add showdown.js : http://attacklab.net/showdown/
* Improve the mynus command line tool
* Document the project
* Make beautiful templates
| PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/operator/_kinetic.py | from typing import Optional, Callable, Union, List
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
from netket.utils.types import DType, PyTree, Array
import netket.jax as nkjax
from netket.hilbert import AbstractHilbert
from netket.operator import ContinuousOperator
from netket.utils import HashableArray
def jacrev(f):
def jacfun(x):
y, vjp_fun = nkjax.vjp(f, x)
if y.size == 1:
eye = jnp.eye(y.size, dtype=x.dtype)[0]
J = jax.vmap(vjp_fun, in_axes=0)(eye)
else:
eye = jnp.eye(y.size, dtype=x.dtype)
J = jax.vmap(vjp_fun, in_axes=0)(eye)
return J
return jacfun
def jacfwd(f):
def jacfun(x):
jvp_fun = lambda s: jax.jvp(f, (x,), (s,))[1]
eye = jnp.eye(len(x), dtype=x.dtype)
J = jax.vmap(jvp_fun, in_axes=0)(eye)
return J
return jacfun
class KineticEnergy(ContinuousOperator):
r"""This is the kinetic energy operator (hbar = 1). The local value is given by:
:math:`E_{kin} = -1/2 ( \sum_i \frac{1}{m_i} (\log(\psi))'^2 + (\log(\psi))'' )`
"""
def __init__(
self,
hilbert: AbstractHilbert,
mass: Union[float, List[float]],
dtype: Optional[DType] = None,
):
r"""Args:
hilbert: The underlying Hilbert space on which the operator is defined
mass: float if all masses are the same, list indicating the mass of each particle otherwise
dtype: Data type of the matrix elements. Defaults to `np.float64`
"""
self._mass = jnp.asarray(mass, dtype=dtype)
self._is_hermitian = np.allclose(self._mass.imag, 0.0)
self.__attrs = None
super().__init__(hilbert, self._mass.dtype)
@property
def mass(self):
return self._mass
@property
def is_hermitian(self):
return self._is_hermitian
def _expect_kernel_single(
self, logpsi: Callable, params: PyTree, x: Array, mass: Optional[PyTree]
):
def logpsi_x(x):
return logpsi(params, x)
dlogpsi_x = jacrev(logpsi_x)
dp_dx2 = jnp.diag(jacfwd(dlogpsi_x)(x)[0].reshape(x.shape[0], x.shape[0]))
dp_dx = dlogpsi_x(x)[0][0] ** 2
return -0.5 * jnp.sum(mass * (dp_dx2 + dp_dx), axis=-1)
@partial(jax.vmap, in_axes=(None, None, None, 0, None))
def _expect_kernel(
self, logpsi: Callable, params: PyTree, x: Array, coefficient: Optional[PyTree]
):
return self._expect_kernel_single(logpsi, params, x, coefficient)
def _pack_arguments(self) -> PyTree:
return 1.0 / self._mass
@property
def _attrs(self):
if self.__attrs is None:
self.__attrs = (self.hilbert, self.dtype, HashableArray(self.mass))
return self.__attrs
def __repr__(self):
return f"KineticEnergy(m={self._mass})" | PypiClean |
/EvidentialToolBus-1.0.1.tar.gz/EvidentialToolBus-1.0.1/etb/terms.py | from __future__ import unicode_literals
import sys
import re
import weakref
import collections
import itertools
import json
import parser
# ----------------------------------------------------------------------
# terms
# ----------------------------------------------------------------------
def id_match(id):
"""Check if id is a valid id string, used to decide whether to use
IdConst or StringConst"""
return re.match(r"[^][(){}=:`'\".,~?% \\0-9][^][(){}=:`'\".,~?% \\]*$", id)
def mk_idconst(val):
"""
Make an id constant term out of the given string value.
:parameters:
- `val`: a basestring or an IdConst
:returntype:
- a hashconsed `IdConst` instance
"""
if isinstance(val, IdConst):
return val.hashcons()
else:
t = IdConst(val)
return t.hashcons()
def mk_stringconst(val):
"""
Make a string constant term out of the given value
:parameters:
- `val`: a basestring or a StringConst
:returntype:
- a hashconsed `StringConst` instance
"""
if isinstance(val, StringConst):
return val.hashcons()
else:
t = StringConst(val)
return t.hashcons()
def mk_boolconst(val):
"""
Make a boolean constant term out of the given value
:parameters:
- `val`: a bool, basestring or a BoolConst
:returntype:
- a hashconsed `BoolConst` instance
"""
if isinstance(val, BoolConst):
return val.hashcons()
else:
t = BoolConst(val)
return t.hashcons()
def mk_numberconst(val):
"""
Make a number constant term out of the given value
:parameters:
- `val`: a basestring or a NumberConst
:returntype:
- a hashconsed `NumberConst` instance
"""
if isinstance(val, NumberConst):
return val.hashcons()
else:
t = NumberConst(val)
return t.hashcons()
def mk_var(val):
"""
Make a variable term.
:parameters:
- `val`: a basestring or Var
:returntype:
- a hashconsed `Var` instance
"""
if isinstance(val, Var):
return val.hashcons()
else:
t = Var(val)
return t.hashcons()
def mk_array(elems):
"""
Make an array from the given Term elements.
"""
if isinstance(elems, Array):
return elems.hashcons()
else:
t = Array(elems)
return t.hashcons()
def mk_map(entries):
"""
Make a map from the given pairs. Pairs are converted to
pairs of terms if needed.
"""
if isinstance(entries, Map):
return entries.hashcons()
else:
t = Map(entries)
return t.hashcons()
def mk_literal(pred, args):
"""
Makes a literal from pred (a string) and args (a list or tuple of terms)
"""
l = Literal(pred, args)
return l.hashcons()
_count = 0
def mk_fresh_var():
"""
Create a new variable.
>>> mk_fresh_var() != mk_fresh_var()
True
"""
global _count
v = mk_var(_count)
_count += 1
return v
def _occur_check(var, t):
"""
Checks whether var occurs in t.
"""
return var in t.free_vars()
def is_fileref(term):
"""
Checks if term is a valid fileref, meaning it is a Map with
`file` and `sha1` slots.
"""
if term.is_map():
fileterm = mk_stringconst("file")
sha1term = mk_stringconst("sha1")
items = term.get_args()
valid = (fileterm in items and sha1term in items)
return valid
else:
return False
def is_filerefs(term):
"""
Checks if term is a fileref, or an array of (array of) filerefs
"""
return term.is_array() and all(is_fileref(x) or is_filerefs(x)
for x in term.get_args())
def get_fileref(term):
"""
Checks if term is a fileref (a map with 'file' and 'sha1' fields)
and returns a dict fileref in that case, or None if not.
Note that a fileref may have other fields - these are (will be) simply
carried along.
"""
if is_fileref(term):
fileterm = mk_stringconst('file')
sha1term = mk_stringconst('sha1')
filestr = term.get_args()[fileterm].val
sha1 = term.get_args()[sha1term].val
fref = { 'file'.encode('utf8') : filestr, 'sha1'.encode('utf8') : sha1 }
return fref
else:
return None
def get_filerefs(term):
"""
Checks if term is (recursively) a list of filerefs (maps
with 'file' and 'sha1' fields) and returns a Python array of the
same form, with dict filerefs at the leaves. Returns None otherwise.
Note that a fileref may have other fields - these are (will be) simply
carried along.
"""
if is_filerefs(term):
return [get_filerefs(x) for x in term.get_args()]
elif is_fileref(term):
return get_fileref(term)
else:
return None
def mk_term(obj):
"""Converts a mix of python and Term to Term
Deals with arrays, tuples, dicts, strings, and Terms.
This is useful in wrappers, where it is convenient to build up Maps and Arrays
using dicts and lists (or tuples) in Python.
This will try to guess what to do with strings, but it's probably best to construct
the corresponding terms or call the parser directly.
"""
if isinstance(obj, Term):
return obj
elif isinstance(obj, (list, tuple)):
return mk_array(map(lambda x: mk_term(x), obj))
elif isinstance(obj, dict):
return mk_map(map(lambda (x, y): (mk_stringconst(x), mk_term(y)), obj.iteritems()))
elif isinstance(obj, bool):
return mk_boolconst(obj)
elif isinstance(obj, (int, float)):
return mk_numberconst(obj)
elif isinstance(obj, basestring):
if obj == '':
return mk_stringconst(obj)
try:
float(obj)
return mk_numberconst(obj)
except:
pass
idmatch = id_match(obj)
if idmatch:
if obj[0].isupper():
return mk_var(obj)
else:
return mk_idconst(obj)
else:
sobj = obj.strip()
if sobj == '':
return mk_stringconst(obj)
elif ((sobj[0] == '[' and sobj[-1] == ']') or
(sobj[0] == '{' and sobj[-1] == '}')):
return parser.parse_term(sobj)
else:
return mk_stringconst(obj)
class Term(object):
"""
A datalog+JSON term, with hashconsing
Note that Term instances should not be created directly, instead use
IdConst, StringConst, BoolConst, NumberConst, Var, Array, or Map
"""
# hashconsing of terms
__terms = weakref.WeakValueDictionary()
# chars to escape
escapestr = re.compile("""[\\\n\t:[]{} ]""")
__slots__ = ['val', 'args', '_hash', '_fvars',
'_volatile', '_normal_form', '__weakref__']
def __lt__(self, other):
"""
Arbitrary order (with hash...)
"""
# TODO a better (total?) ordering
return self != other and hash(self) < hash(other)
def __init__(self):
"""
Initialize the term.
"""
self._hash = None
self._fvars = None
self._normal_form = None
self._volatile = False
def __nonzero__(self):
"""
Checks if term is nonzero, e.g., empty Array or Map, nonzero NumberConst
"""
# Overridden below for some term subclasses
return False
def hashcons(self):
"""
Returns the term that is representative for the equivalence
class of terms that are equal to self.
>>> t = mk_stringconst('foo')
>>> t.hashcons() == t
True
>>> t.hashcons() is mk_stringconst('foo').hashcons()
True
"""
return Term.__terms.setdefault(self, self)
def is_var(self):
"""Check whether the term is a variable."""
return isinstance(self, Var)
def is_const(self):
"""Check whether the term is a constant."""
return isinstance(self, Const)
def is_idconst(self):
"""Check whether the term is an id constant."""
return isinstance(self, IdConst)
def is_stringconst(self):
"""Check whether the term is a string constant."""
return isinstance(self, StringConst)
def is_boolconst(self):
"""Check whether the term is a boolean constant."""
return isinstance(self, BoolConst)
def is_numconst(self):
"""Check whether the term is a numeric constant."""
return isinstance(self, NumberConst)
def is_map(self):
"""Check whether the term is a map."""
return isinstance(self, Map)
def is_array(self):
"""Check whether the term is an array."""
return isinstance(self, Array)
@staticmethod
def all_terms():
"""
Iterate through all current terms.
"""
for t in Term.__terms.itervalues():
yield t
def unify(self, other):
"""
Unify this term against the other. In case of success,
returns a substitution (even empty), in case of failure, returns None.
>>> mk_var(1).unify(mk_idconst('p'))
subst(X1 = p)
>>> mk_idconst('p').unify(mk_idconst('q'))
>>> mk_array([mk_idconst('p'), mk_idconst('a'), mk_idconst('b'), mk_var(1)]).unify(
... mk_array([mk_idconst('p'), mk_idconst('a'), mk_idconst('b'), mk_idconst('c')]))
subst(X1 = c)
>>> mk_array([mk_idconst('p'), mk_map({ mk_stringconst('a'): mk_idconst('b'), mk_stringconst('c'): mk_var(1) })]).unify(
... mk_array([mk_idconst('p'), mk_map({ mk_stringconst('a'): mk_idconst('b'), mk_stringconst('c'): mk_idconst('d')})]))
subst(X1 = d)
"""
assert isinstance(other, Term), 'Unify only works for terms'
assert not self.free_vars().intersection(other.free_vars()), 'Unify unhappy with the free vars'
# create a substitution
bindings = Subst()
# create a stack of pairs of terms to unify
stack = [ (self, other) ]
while stack:
left, right = stack.pop()
# apply the substitution to terms
left = bindings(left)
right = bindings(right)
if left == right:
continue
elif left.is_var():
if _occur_check(left, right):
return None
bindings.bind(left, right)
elif right.is_var():
if _occur_check(right, left):
return None
bindings.bind(right, left)
# elif left.is_apply() and right.is_apply():
# if len(left.args) != len(right.args):
# return None # failure
# # otherwise, just unify preds and arguments pairwise
# stack.append( (left.val, right.val) )
# for l, r in itertools.izip(left.args, right.args):
# stack.append( (l, r) )
elif left.is_array() and right.is_array() and \
len(left.elems) == len(right.elems):
for l, r in itertools.izip(left.elems, right.elems):
stack.append( (l, r) )
elif left.is_map() and right.is_map():
# most interesting case: unify keys pairwise
# only ground keys are authorized.
if not left.items.viewkeys() == right.items.viewkeys():
return None
for k, v in left.items.iteritems():
assert k.is_ground(), 'k is not ground; unify unhappy'
stack.append( (v, right.items[k]) )
else:
return None # failure
return bindings
def is_volatile(self):
"""
Check whether the term is volatile
"""
return self._volatile
def set_volatile(self):
"""
Mark the symbol as volatile.
"""
self._volatile = True
def rename(self, offset=None):
"""
Performs an alpha-renaming of this term, obtained by replacing
all variables in it by fresh variables.
Returns (renaming, renamed_term)
>>> t = mk_array([mk_idconst("p"), mk_var(1),
... mk_map( { mk_stringconst("foo"): mk_var(2) } ) ] )
>>> t
[p, X1, {"foo": X2}]
>>> t.free_vars() == frozenset((mk_var(1), mk_var(2)))
True
>>> renaming, t2 = t.rename()
>>> t == t2
False
>>> t.unify(t2).is_renaming()
True
"""
free_vars = self.free_vars()
if offset is None:
offset = max(v.val for v in free_vars if isinstance(v.val, int)) + 1
renaming = Subst()
for i, v in enumerate(free_vars):
renaming.bind(v, mk_var(i + offset))
assert renaming.is_renaming(), 'renaming not a renaming; rename unhappy'
return (renaming, renaming(self))
def negative_rename(self):
"""
Performs an alpha-renaming of the term, using
only negative variables.
>>> t = mk_array([mk_idconst("p"), mk_var(1), mk_map({mk_stringconst("foo"): mk_var(2)})])
>>> t
[p, X1, {"foo": X2}]
>>> t.negative_rename()[1]
[p, X-3, {"foo": X-2}]
"""
free_vars = self.free_vars()
offset = max(v.val for v in free_vars if isinstance(v.val, int)) + 1
return self.rename(offset=-offset)
def is_ground(self):
"""
Checks whether the term is ground.
>>> t = mk_array([mk_idconst('p'), mk_var(1), mk_idconst('q')] )
>>> t.is_ground()
False
>>> mk_array([mk_idconst("p"), mk_stringconst("q"), mk_map({mk_stringconst("foo"): mk_numberconst(42) })]).is_ground()
True
"""
return not self.free_vars()
def free_vars(self):
"""Returns the set of free variables of this term.
"""
if self._fvars is None:
vars = set()
self._compute_free_vars(vars)
self._fvars = frozenset(vars)
return self._fvars
def _compute_free_vars(self, vars):
"""
Adds the free vars of the term to vars
"""
if self.is_var():
vars.add(self)
return
elif self.is_const():
return
if self.is_array():
for t in self.elems:
t._compute_free_vars(vars)
elif self.is_map():
for k, v in self.items.iteritems():
k._compute_free_vars(vars)
v._compute_free_vars(vars)
def normalize(self):
"""
Returns a normalized version of the term. Variables in it
are replaced by X0...Xn-1) where n is the number of free
variables in the term.
Returns (renaming, term) where renaming is used to normalize.
>>> t = mk_array([mk_idconst("p"), mk_var(3),
... mk_map({ mk_stringconst("foo"): mk_var(2) })] )
>>> t
[p, X3, {"foo": X2}]
>>> t.normalize()[1]
[p, X0, {"foo": X1}]
>>> t = mk_array([mk_idconst("p"), mk_var(2),
... mk_map( { mk_stringconst("foo"): mk_var(1)} )] )
>>> t
[p, X2, {"foo": X1}]
>>> t.normalize()
(subst(X2 = X0), [p, X0, {"foo": X1}])
"""
if self.is_ground():
return (Subst(), self)
fvars = self.ordered_free_vars()
renaming = Subst(dict( (v, mk_var(i)) for \
i, v in enumerate(fvars) ))
return (renaming, renaming(self))
def is_normalized(self):
"""
Checks whether the term is normalized
"""
if self._normal_form is None:
self._normal_form = self.normalize()[1]
return self._normal_form == self
def ordered_free_vars(self, l=None):
"""
Returns the list of variables in the term, by order of prefix
traversal. Free vars may occur several times in the list
"""
if l is None:
l = []
if self.is_var():
if self not in l: # avoid duplicates
l.append(self)
# elif self.is_apply():
# for t in self.args:
# t.ordered_free_vars(l)
elif self.is_array():
for t in self.elems:
t.ordered_free_vars(l)
elif self.is_map():
for k, v in self.items.iteritems():
k.ordered_free_vars(l)
v.ordered_free_vars(l)
return l
def first_symbol(self):
"""
Finds the first symbol in the object
>>> mk_idconst('a').first_symbol()
a
>>> mk_array([ mk_idconst('a'), mk_idconst('b')]).first_symbol()
a
"""
if self.is_const() or self.is_var():
return self
elif self.is_array():
return self.elems[0].first_symbol()
elif self.is_map():
raise AssertionError('map term has no first symbol')
else:
raise ValueError('unhandled case for first_symbol: ' + \
repr(self))
def reduce_access(self, access):
if not isinstance(access, list):
raise ValueError('Illegal access: {0}: {1} should be a list'
.format(access, type(access)))
if access:
raise ValueError('Illegal access: {0}: {1} should be a map or array'
.format(self, type(self)))
else:
return self
# Term subclasses: Const, Var, Map, Array
class Const(Term):
"""
Constant terms, e.g., ids, strings and numbers
"""
def to_python(self):
'''Convert ground terms to python'''
return self.val
def __nonzero__(self):
return bool(self.val)
def get_val(self):
return self.val
class IdConst(Const):
"""
Id consts. Like StringConsts, the val is a string, but must not start
with a capital letter, and is printed without string quotes.
Note that IdConst("foo") != StringConst("foo") (e.g., foo != "foo")
"""
def __init__(self, idstr):
"""
Initialize an IdConst with idstr
"""
if not isinstance(idstr, basestring):
raise ValueError('IdConst: string expected for {0} of type {1}'
.format(idstr, type(idstr)))
if idstr[0].isupper():
raise ValueError('IdConst: {0} must not start with an uppercase char'
.format(idstr))
if idstr[0].isdigit():
raise ValueError('IdConst: {0} must not start with a digit'
.format(idstr))
Term.__init__(self)
self.val = idstr.encode('utf8')
def __eq__(self, other):
if isinstance(other, IdConst):
return hash(self) == hash(other) and self.val == other.val
elif isinstance(other, basestring):
return self.val == other
else:
return False
def __lt__(self, other):
if isinstance(other, IdConst):
return self.val < other.val
else:
return not (isinstance(other, Var) or isinstance(other, NumberConst))
def __repr__(self):
return '{0}'.format(self.val.encode('utf8'))
def __hash__(self):
self._hash = hash(self.val)
return self._hash
def to_dot(self):
return str(self.val)
class StringConst(Const):
"""String consts"""
def __init__(self, text):
if text == u"dummy":
raise ValueError(text)
Term.__init__(self)
self.val = text.encode('utf8')
def __eq__(self, other):
if isinstance(other, StringConst):
return hash(self) == hash(other) and self.val == other.val
elif isinstance(other, basestring):
return self.val == other
else:
return False
def __lt__(self, other):
if isinstance(other, StringConst):
return self.val < other.val
else:
return isinstance(other, Array) or isinstance(other, Map)
def __repr__(self):
return '"{0}"'.format(self.val.encode('utf8'))
def __hash__(self):
self._hash = hash(self.val)
return self._hash
def to_dot(self):
return str(self.val)
class BoolConst(Const):
"""Boolean Consts"""
def __init__(self, val):
Term.__init__(self)
if isinstance(val, basestring):
if val == 'true':
val = True
elif val == 'false':
val = False
if isinstance(val, bool):
self.val = val
else:
raise TypeError('Boolean (or string "true"/"false" expected for BoolConst')
def __eq__(self, other):
return (isinstance(other, BoolConst)
and hash(self) == hash(other)
and self.val == other.val)
def __lt__(self, other):
if isinstance(other, BoolConst):
return self.val < other.val
else:
return not isinstance(other, Var)
def __repr__(self):
return '{0}'.format('true' if self.val else 'false')
def __hash__(self):
self._hash = hash(self.val)
return self._hash
def to_dot(self):
return 'true' if self.val else 'false'
class NumberConst(Const):
"""Number consts"""
def __init__(self, val):
Term.__init__(self)
if isinstance(val, int) or isinstance(val, float):
self.val = str(val)
self.num = val
elif isinstance(val, basestring):
self.val = val
if any(i in '\.eE' for i in val):
self.num = float(val)
else:
self.num = int(val)
else:
raise TypeError('Number or string expected for NumberConst')
def __eq__(self, other):
return (isinstance(other, NumberConst)
and hash(self) == hash(other)
and self.val == other.val)
def __lt__(self, other):
if isinstance(other, NumberConst):
return self.num < other.num
else:
return not isinstance(other, Var)
def __repr__(self):
return '{0}'.format(self.val)
def __hash__(self):
self._hash = hash(self.val)
return self._hash
def to_dot(self):
return str(self.val)
class Var(Term):
"""Variable terms, start with capital letter; integer also possible
>>> Var('Foo')
Foo
>>> Var(1)
X1
"""
def __init__(self, val):
Term.__init__(self)
assert isinstance(val, (basestring, int)),\
'Var must be a string or int: {0}: {1}'.format(val, type(val))
self.val = val
def __eq__(self, other):
"""See if the self var is eq to the other Term
>>> Var('X') == Var('X')
True
>>> Var(3) == Var(3)
True
>>> Var('X') == Var(3)
False
"""
return (isinstance(other, Var)
and hash(self) == hash(other)
and self.val == other.val)
def __lt__(self, other):
if isinstance(other, Var):
return self.val < other.val
else:
return False
def __hash__(self):
self._hash = hash(self.val)
return self._hash
def __repr__(self):
return "X%d" % self.val if isinstance(self.val, int) else unicode(self.val)
def to_python(self):
'''Convert ground terms to python'''
return self.val
def to_dot(self):
return "X%d" % self.val if isinstance(self.val, int) else unicode(self.val)
def get_val(self):
return self.val
class Array(Term):
"""Array (list) terms"""
def __init__(self, elems):
Term.__init__(self)
if isinstance(elems, Term):
elems = (elems,)
assert all(isinstance(e, Term) for e in elems),\
'Array: elems {0} should be a Term or Terms'.format(elems)
self.elems = tuple(elems)
def __eq__(self, other):
return (isinstance(other, Array)
and hash(self) == hash(other)
and self.elems == other.elems)
def __lt__(self, other):
if isinstance(other, Array):
return self.elems < other.elems
else:
return isinstance(other, Map)
def __hash__(self):
self._hash = hash(self.elems)
return self._hash
def __repr__(self):
return repr(list(self.elems))
# def __str__(self):
# return "{0}".format([str(x) for x in list(self.elems)])
def __getitem__(self, index):
if isinstance(index, NumberConst):
return self.elems[index.num]
else:
return self.elems[index]
def to_python(self):
'''Convert ground terms to python'''
return [a.to_python() for a in self.elems]
def to_dot(self):
return "[%s]" % ', '.join(a.to_dot() for a in self.elems)
def __nonzero__(self):
return bool(self.elems)
def get_args(self):
return self.elems
def reduce_access(self, access):
if isinstance(access, Term):
access = [access]
elif not isinstance(access, list):
raise ValueError('Illegal access: {0}: {1} should be a list'
.format(access, type(access)))
if access:
if isinstance(access[0], int):
idx = access[0]
elif isinstance(access[0], NumberConst):
idx = access[0].num
else:
raise ValueError('Illegal access for term {0}: {1} should be a number'
.format(self, access))
if 0 <= idx and idx < len(self.elems):
return self.elems[idx].reduce_access(access[1:])
else:
raise ValueError('Illegal access for term {0}: {1} should be between 0 and {2}'
.format(self, access[0], len(self.elems)))
else:
return self
class Map(Term):
"""Map (dict) terms"""
def __init__(self, items):
"""items is a list of tuple pairs (or something with an iteritems method)"""
if not isinstance(items, (dict, tuple, list)):
raise TypeError('terms.Map needs a dict, tuple, or list, given {0} of type {1}'
.format(items, type(items)))
if isinstance(items, dict):
litems = items.items()
else:
if not all(isinstance(x, (tuple, list)) and len(x) == 2 for x in items):
raise TypeError('terms.Map items must be lists or tuples of length 2, given {0}'
.format(items))
litems = items
if not all(isinstance(k, (Const, basestring)) and isinstance(v, (Term, basestring))
for k, v in litems):
raise TypeError('terms.Map: items {0} should be a list of (Const, Term) tuples'
.format(items))
sitems = [(mk_stringconst(k.val.encode('utf8') if isinstance(k, Const) else k.encode('utf8')),
mk_stringconst(v if isinstance(v, basestring) else v.val) \
if isinstance(v, (basestring, Const)) else v)
for k, v in litems]
Term.__init__(self)
# Only allow stringconst keys; easier to ensure equality
self.items = collections.OrderedDict(sorted(sitems))
def __eq__(self, other):
return (isinstance(other, Map)
and hash(self) == hash(other)
and self.items == other.items)
def __lt__(self, other):
return (isinstance(other, Map)
and self.items < other.items)
def __hash__(self):
self._hash = hash(tuple(self.items.iteritems()))
return self._hash
def __repr__(self):
return "{" + ", ".join('%r: %r' % (key, self.items[key]) for key in sorted(self.items)) + "}"
# def __str__(self):
# if is_fileref(self) and False:
# fstr = mk_stringconst("file")
# file = self.get_args()[fstr]
# filestr = file.val
# return "FH:" + filestr
# else:
# return "{" + ", ".join('%s: %s' % (key, self.items[key]) for key in sorted(self.items)) + "}"
def __getitem__(self, key):
if isinstance(key, basestring):
key = mk_stringconst(key)
if key in self.items:
return self.items[key]
def __contains__(self, key):
if isinstance(key, basestring):
key = mk_stringconst(key)
return key in self.items
def to_python(self):
'''Convert ground terms to python'''
return dict([(k.to_python(), v.to_python())
for k, v in self.items.iteritems()])
def to_dot(self):
if mk_stringconst('file') in self.items:
return str(self.items[mk_stringconst('file')])
return "[%s]" % ', '.join(
'%s: %s' % (k, v) for k, v in self.items.iteritems())
def __nonzero__(self):
return bool(self.items)
def get_args(self):
return self.items
def reduce_access(self, access):
if isinstance(access, Term):
access = [access]
elif not isinstance(access, list):
raise ValueError('Illegal access: {0}: {1} should be a list'
.format(access, type(access)))
if access:
if isinstance(access[0], basestring):
key = mk_stringconst(access[0])
elif isinstance(access[0], StringConst):
key = access[0]
elif isinstance(access[0], IdConst):
key = mk_stringconst(access[0].val)
else:
raise ValueError('Illegal access for term {0}: {1} should be a string'
.format(self, access[0]))
if key in self.items:
return self.items[key].reduce_access(access[1:])
else:
raise ValueError('Illegal access for term {0}: {1} not a valid key'
.format(self, access[0]))
else:
return self
# literals
class Literal(object):
"""
Literals, e.g., ``'p(\"this\", 3, V)'``
"""
# hashconsing of Literals
__lits = weakref.WeakValueDictionary()
__slots__ = ['pred', 'args', '_hash', '_fvars',
'_volatile', '_normal_form', '__weakref__']
def __init__(self, pred, args):
"""
Create a Literal object from a `pred` and `args`.
:parameters:
- `pred`: an instance of :class:`IdConst` or :class:`StringConst`
- `args`: a list or tuple of :class:`Terms`
"""
assert isinstance(pred, (IdConst, StringConst, basestring)),\
'mk_literal: pred must be an id or string'
if isinstance(pred, basestring):
if id_match(pred):
if pred[0].isupper():
raise ValueError('Literal predicate ids must start with lower case: {0}'
.format(pred))
else:
pred = mk_idconst(pred)
else:
pred = mk_stringconst(pred)
self.pred = pred
self.args = tuple([mk_term(a) for a in args])
self._hash = None
self._fvars = None
self._normal_form = None
self._volatile = False
def __eq__(self, other):
return self is other or self._equal(other)
def _equal(self, other):
return (isinstance(other, Literal)
and hash(self) == hash(other)
and self.pred == other.pred
and self.args == other.args)
def __lt__(self, other):
"""Arbitrary order (with hash...)"""
# TODO a better (total?) ordering
return self != other and hash(self) < hash(other)
def __nonzero__(self):
return False
def __repr__(self):
if self.args:
return '{0}({1})'.format(self.pred, ', '.join(map(repr, self.args)))
else:
return '{0}'.format(self.pred)
# def __str__(self):
# if self.args:
# return '{0}({1})'.format(self.pred, ', '.join(map(str, self.args)))
# else:
# return '{0}'.format(self.pred)
def to_python(self):
return {'pred': self.pred.to_python(),
'args': [a.to_python() for a in self.args]}
def to_dot(self):
return "%s(%s)".format(self.pred, ', '.join(a.to_dot() for a in self.args))
def __hash__(self):
self._hash = hash((self.pred, self.args))
return self._hash
def hashcons(self):
"""Returns the literal that is representative for the equivalence
class of literals that are equal to self.
>>> t = mk_literal(mk_idconst('p'), [mk_stringconst("foo")])
>>> t.hashcons() == t
True
>>> t.hashcons() is mk_literal(mk_idconst('p'), [mk_stringconst("foo")]).hashcons()
True
"""
return Literal.__lits.setdefault(self, self)
def unify(self, other):
if (isinstance(other, Literal)
and self.pred == other.pred
and len(self.args) == len(other.args)):
if len(self.args) == 0:
return Subst()
elif len(self.args) == 1:
return self.args[0].unify(other.args[0])
else:
left = Array(self.args)
right = Array(other.args)
return left.unify(right)
def is_volatile(self):
"""Check whether the literal is volatile"""
return self._volatile
def set_volatile(self):
"""Mark the symbol as volatile."""
self._volatile = True
def get_pred(self):
return self.pred
def is_ground(self):
return not self.free_vars()
def free_vars(self):
"""Returns the set of free variables of this literal.
"""
if self._fvars is None:
vars = set()
self._compute_free_vars(vars)
self._fvars = frozenset(vars)
return self._fvars
def _compute_free_vars(self, vars):
"""Adds the free vars of the literal to vars"""
for t in self.args:
t._compute_free_vars(vars)
def normalize(self):
if self.is_ground():
return (Subst(), self)
fvars = self.ordered_free_vars()
renaming = Subst(dict( (v, mk_var(i)) for \
i, v in enumerate(fvars) ))
return (renaming, renaming(self))
def is_normalized(self):
"""Checks whether the term is normalized"""
if self._normal_form is None:
self._normal_form = self.normalize()[1]
return self._normal_form == self
def ordered_free_vars(self, l=None):
"""Returns the list of variables in the term, by order of prefix
traversal. Free vars may occur several times in the list
"""
if l is None:
l = []
else:
for t in self.args:
t.ordered_free_vars(l)
def first_symbol(self):
return self.pred.first_symbol()
def get_args(self):
return self.args
class InfixLiteral(Literal):
"""Class for <, etc."""
def __repr__(self):
return '{0} {1} {2}'.format(self.args[0], self.pred, self.args[1])
# ----------------------------------------------------------------------
# clauses
# ----------------------------------------------------------------------
def mk_clause(head, body):
"""Constructor for clauses"""
return Clause(head, body)
class Clause(object):
"""A Horn clause, with a head and a (possibly empty) body.
>>> Clause(mk_literal(mk_idconst('p'), []), [])
p.
>>> Clause(mk_literal(mk_idconst('p'), []), [mk_var(1), mk_map({mk_stringconst('foo'): mk_numberconst('42')})])
p :- X1, {"foo": 42}.
"""
def __init__(self, head, body, temp=False):
self.head = head
self.body = tuple(body)
# compute the free variables (used to be local to the non temp case)
self._free_vars = frozenset([]).union(* (x.free_vars() for x in body))
# check that the clause is well-formed: all variables in the head are
# bound in the body
if not temp:
if not (head.free_vars() <= self._free_vars):
print >>sys.stderr, head
for b in body:
print >>sys.stderr, b
print >>sys.stderr, head.free_vars()
print >>sys.stderr, self._free_vars
assert False, 'datalog restriction fails! Clause __init__ unhappy'
self._is_ground = head.is_ground() and all(x.is_ground() for x in body)
self._done = True # freeze
def __setattr__(self, attr, val):
if getattr(self, '_done', False):
raise ValueError('immutable term')
super(Clause, self).__setattr__(attr, val)
def __hash__(self):
h = hash(self.head)
for x in self.body:
h = hash( (h, x) )
return h
def __eq__(self, other):
return (isinstance(other, Clause)
and self.head == other.head
and self.body == other.body)
def __repr__(self):
if self.body:
return '{0} :- {1}.'.format(self.head,
', '.join(repr(x) for x in self.body))
else:
return '{0}.'.format(self.head)
# def __str__(self):
# if self.body:
# return '{0!s} :- {1}.'.format(self.head,
# ', '.join(str(x) for x in self.body))
# else:
# return '{0!s}.'.format(self.head)
def is_ground(self):
"""Checks whether the clause contains no variables"""
return self._is_ground
def free_vars(self):
"""Free variables of the clause (equivalent to the free variables
of the body of the clause).
"""
return self._free_vars
def rename(self, offset=None):
"""Perform a renaming of the variables in the clause, using
variables that do not occur in the clause.
If an offset is provided, it will be used instead of finding one
that ensures the absence of variables collisions.
Returns (renaming, renamed_clause)
>>> c = Clause(mk_var(1), [mk_var(1), mk_var('X')])
>>> _, c2 = c.rename()
>>> c2.free_vars().intersection(c.free_vars())
frozenset([])
>>> c2.head.free_vars() <= c2.free_vars()
True
>>> c.rename(offset=4)
(subst(X1 = X4, X = X5), X4 :- X4, X5.)
"""
fvars = self.free_vars()
if not fvars:
return [], self
elif offset is None:
# compute an offset: by adding this number to the variables,
# we are sure never to collide with fvars
offset = max(v.get_val() for v in fvars if isinstance(v.get_val(), int)) + 1
renaming = Subst()
for i, v in enumerate(fvars):
renaming.bind(v, mk_var(i + offset))
assert renaming.is_renaming(), 'renaming no a renaming; rename unhappy'
return (renaming, renaming(self))
def mk_fact_rule(head):
"""Constructor for FactRules"""
return FactRule(head)
def mk_derivation_rule(head, body):
"""Constructor for DerivationRules"""
return DerivationRule(head, body)
def mk_inference_rule(head, body):
"""Constructor for InferenceRules"""
return InferenceRule(head, body)
class FactRule(Clause):
def __init__(self, head, temp=False):
Clause.__init__(self, head, [], temp)
class DerivationRule(Clause):
def __init__(self, head, body, temp=False):
Clause.__init__(self, head, body, temp)
class InferenceRule(Clause):
def __init__(self, head, body, temp=False):
assert isinstance(head, Literal), 'Bad head: {0}: {1}'.format(head, type(head))
assert all(isinstance(b, Literal) for b in body), 'Bad body: {0}'.format(body)
Clause.__init__(self, head, body, temp)
# ----------------------------------------------------------------------
# claims
# ----------------------------------------------------------------------
def mk_claim(lit, reason):
"""Constructor for claims"""
return Claim(lit, reason)
class Claim(object):
"""
A claim is a pair of a ground literal and an explanation.
The explanation can be a derivation rule, an inference rule, or an
application of an interpreted predicate.
"""
def __init__(self, literal, reason):
"""Create the claim. reason can be a string or a Clause."""
assert isinstance(literal, Literal), 'Literal expected'
assert literal.is_ground(), 'Non-ground claim {0}, free_vars {1}'.format(literal, literal.free_vars())
self.literal = literal
if isinstance(reason, Clause):
self.subst = reason.head.unify(literal)
self.reason = reason
self._hash = hash((self.literal, self.reason))
def __hash__(self):
return self._hash
def __eq__(self, other):
return (isinstance(other, self.__class__)
and other.literal == self.literal
and self.reason == other.reason)
def __lt__(self, other):
return self != other and self.literal < other.literal
def __repr__(self):
if isinstance(self.reason, basestring):
reason = '"' + self.reason + '"'
else:
reason = self.reason
return 'claim({0}, reason={1})'.format(self.literal, reason)
# def __str__(self):
# return 'claim({0}, reason=NP)'.format(self.literal)
def mk_derived_claim(lit, reason):
"""Makes a DerivedClaim"""
return DerivedClaim(lit, reason)
class DerivedClaim(Claim):
'''
Claim establised by derivation.
'''
def __init__(self, literal, reason):
assert isinstance(reason, DerivationRule), 'reason not a DerivationRule, DerivedClaim __init__ unhappy.'
Claim.__init__(self, literal, reason)
def __repr__(self):
return 'derivedClaim({0}, reason={1})'.format(self.literal, self.reason)
def mk_proved_claim(lit, reason):
"""Makes a DerivedClaim"""
return ProvedClaim(lit, reason)
class ProvedClaim(Claim):
'''
Claim establised by inference.
'''
def __init__(self, literal, reason):
assert isinstance(reason, InferenceRule), 'reason not a InferenceRule, ProvedClaim __init__ unhappy.'
Claim.__init__(self, literal, reason)
def __repr__(self):
return 'provedClaim({0}, reason={1})'.format(self.literal, self.reason)
def mk_interpreted_claim(lit, reason):
"""Makes a InterpretedClaim"""
return InterpretedClaim(lit, reason)
class InterpretedClaim(Claim):
'''
Claim established by an interpretation.
'''
def __init__(self, literal, reason):
Claim.__init__(self, literal, reason)
def __repr__(self):
return 'interpretedClaim({0}, reason={1})'.format(self.literal, self.reason)
# ----------------------------------------------------------------------
# substitutions and renamings
# ----------------------------------------------------------------------
def mk_subst(**bindings):
"""Named arguments constructor for substitutions. It builds
terms from its named arguments.
>>> mk_subst(X=mk_numberconst(42), Y=mk_array([mk_numberconst(1),mk_numberconst(2),mk_numberconst(3)]))
subst(X = 42, Y = [1, 2, 3])
"""
term_bindings = dict((mk_var(k), v) for k, v in bindings.iteritems())
return Subst(term_bindings)
class Subst(object):
"""A substitution.
>>> s = Subst( { mk_var(1): mk_stringconst('p'),
... mk_var(2): mk_stringconst('u') } )
>>> s
subst(X1 = "p", X2 = "u")
>>> sorted(s.domain())
[X1, X2]
>>> sorted(s.range())
["p", "u"]
>>> s.is_empty()
False
>>> len(s)
2
>>> s.restrict( [mk_var(1)] )
subst(X1 = "p")
"""
__slots__ = ['_bindings', '_hash', '_introduced',
'_timestamp', '_introduced_timestamp', '__weakref__']
def __init__(self, bindings=None):
"""Initialize the substitution"""
self._bindings = []
self._hash = None
self._timestamp = 0 # modification timestamp
self._introduced_timestamp = 0 # last update of introduced
self._introduced = set() # set of introduced variables
if bindings is not None:
if isinstance(bindings, dict):
for k, v in bindings.iteritems():
self.bind(k, v)
elif isinstance(bindings, list) or isinstance(bindings, tuple):
for k, v in bindings:
self.bind(k, v)
else:
assert False, 'unknown kind of substitution'
def __eq__(self, other):
"""Equality between substitutions."""
return isinstance(other, Subst) and self._bindings == other._bindings
def __hash__(self):
return hash(frozenset(self._bindings))
def __repr__(self):
"""Representation of the subst"""
return "subst(%s)" % ', '.join(
'%s = %s' % (k, v) for k, v in self._bindings)
def __lt__(self, other):
"""Lexicographic order on the sorted bindings"""
return self._bindings < other._bindings
def __len__(self):
"""Number of bindings in the subst."""
return len(self._bindings)
def __getitem__(self, t):
"""Apply a substitution to a term"""
if isinstance(t, basestring):
t = mk_var(t)
return self.__call__(t)
def __call__(self, t):
"""Apply the substitution to a term.
>>> s = Subst( {mk_var(1): mk_stringconst('foo')} )
>>> s
subst(X1 = "foo")
>>> s(mk_var(1))
"foo"
>>> s(mk_var(2))
X2
>>> t = mk_array( [mk_stringconst("foo"), mk_numberconst('42'), mk_map({mk_stringconst("bar"): mk_var(1)}) ] )
>>> t.is_ground()
False
>>> t
["foo", 42, {"bar": X1}]
>>> s(t)
["foo", 42, {"bar": "foo"}]
>>> s(t) != t
True
"""
if isinstance(t, Term):
if t.is_ground():
return t
elif t.is_var():
for i in xrange(len(self._bindings)):
if self._bindings[i][0] == t:
return self._bindings[i][1]
return t
elif t.is_const():
return t
# elif t.is_apply():
# return mk_apply(self(t.val), map(self, t.args))
elif t.is_array():
return mk_array(map(self, t.elems))
elif t.is_map():
return mk_map(dict((self(k), self(v)) for k, v in \
t.items.iteritems()))
else:
assert False, 'unknown kind of term in __call__'
elif isinstance(t, Literal):
return t.__class__(self(t.pred), map(self, t.args))
elif isinstance(t, Clause):
return t.__class__(self(t.head), map(self, t.body))
else:
print t.__class__
print t
assert False, 'bad arg %s of class %s; __call__ unhappy' % (t, t.__class__)
def __nonzero__(self):
"""A substitution, even empty, is to be considered as a true value"""
return True
def __contains__(self, var):
"""Checks whether var is bound by the substitution
>>> s = Subst({ mk_var(1): mk_numberconst(42)})
>>> mk_var(1) in s
True
>>> mk_var(2) in s
False
"""
assert var.is_var(), 'var ain\'t a var; __contains__ unhappy'
for k, _ in self._bindings:
if k == var:
return True
return False
def get(self, str):
t = self(mk_var(str))
return dumps(t)
def get_bindings(self):
return self._bindings
def clone(self):
"""Return a copy of the substitution."""
s = Subst()
for k, v in self._bindings:
s.bind(k, v)
return s
def bind(self, var, t):
"""Bind var to t in the substitution. Var must not
be already bound.
"""
assert var.is_var(), 'var ain\'t a var; bind unhappy'
assert isinstance(t, Term), '{0}: {1} not a term; bind unhappy'.format(t, type(t))
if var == t:
return # no-op
assert self(var) == var, 'var not bound; bind unhappy'
self._bindings.append( (var, t) )
self._bindings.sort()
self._timestamp += 1 # update timestamp
def is_empty(self):
"""Checks whether the substitution is empty."""
return not self._bindings
def range(self):
"""Values of the substitution"""
for _, v in self._bindings:
yield v
def domain(self):
"""Variables bound by the substitution"""
for k, _ in self._bindings:
yield k
def introduced(self):
"""Variables introduced by the substitution (iterator)"""
if self._timestamp > self._introduced_timestamp:
# must update the set of introduced variables
self._introduced.clear()
for t in self.range():
self._introduced.update(t.free_vars())
self._introduced_timestamp = self._timestamp
for var in self._introduced:
yield var # yield content of the set
def compose(self, other):
"""Composes the two substitutions, self o other.
The resulting substitution
is { x -> other(self(x)) } for x in
domain(self) union domain(other)
be careful that this is backward w.r.t. function composition,
since t \sigma \theta = t (\sigma o \theta)
>>> s = Subst({mk_var(1): mk_var(3)})
>>> t = Subst({mk_var(2): mk_array([mk_idconst('p'), mk_var(1)]),
... mk_var(3): mk_idconst('b')})
>>> s.compose(t) == Subst({mk_var(1): mk_idconst('b'),
... mk_var(2): mk_array([mk_idconst('p'), mk_var(1)]),
... mk_var(3): mk_idconst('b')})
True
>>> s.compose(s) == s
True
"""
assert isinstance(other, Subst)
s = Subst()
for var in self.domain():
s.bind(var, other(self(var)))
for var in other.domain():
if var not in self:
s.bind(var, other(var))
return s
def join(self, other):
"""Take the join of the two substitutions, self . other,
the resulting substitution is::
{ x -> other(self(x)) for x in domain(self) } union
{ x -> other(x) } for x in domain(other) vars(range(self)).
>>> s = Subst({mk_var(1): mk_var(3)})
>>> t = Subst({mk_var(2): mk_array([mk_idconst('p'), mk_var(1)]),
... mk_var(3): mk_idconst('b')})
>>> s.join(t) == Subst({mk_var(1): mk_idconst('b'),
... mk_var(2): mk_array([mk_idconst('p'), mk_var(1)]) })
True
"""
assert isinstance(other, Subst)
s = Subst()
for var, t in self._bindings:
s.bind(var, other(t))
for var in other.domain():
if var not in self.introduced():
s.bind(var, other(var))
return s
def is_renaming(self):
"""Checks whether the substitution is a renaming.
>>> Subst( { mk_var(1): mk_stringconst("a") } ).is_renaming()
False
>>> Subst( { mk_var(1): mk_var(2) } ).is_renaming()
True
"""
return all(x.is_var() for x in self.range()) and \
len(list(self.domain())) == len(list(self.range()))
def restrict(self, domain):
"""Returns a new substitution, which is the same but
restricted to the given domain.
>>> s = Subst({mk_var(2): mk_array([mk_idconst('p'), mk_var(1)]),
... mk_var(3): mk_idconst('b')})
>>> s.restrict([mk_var(2)])
subst(X2 = [p, X1])
"""
s = Subst()
for var, t in self._bindings:
if var in domain:
s.bind(var, t)
return s
# ----------------------------------------------------------------------
# json parsing and printing
# ----------------------------------------------------------------------
class TermJSONEncoder(json.JSONEncoder):
"""Custom encoder in JSON. It deals with terms, clauses,
substitutions and claims.
"""
def default(self, o):
"try to encode terms"
if 'to_json' in dir(o):
return o.to_json()
elif isinstance(o, Term):
if o.is_var():
return { '__Var': o.get_val() }
elif o.is_stringconst():
return { '__StringConst': o.get_val() }
elif o.is_idconst():
return { '__IdConst': o.get_val() }
elif o.is_boolconst():
return { '__BoolConst': o.get_val() }
elif o.is_numconst():
return { '__NumberConst': o.get_val() }
elif o.is_array():
return { '__Array': list(o.get_args()) }
elif o.is_map():
return { '__Map': dict((k.val, v) for k, v in o.get_args().iteritems()) }
elif isinstance(o, Literal):
return {'__Literal': [o.pred] + list(o.args)}
elif isinstance(o, Clause):
return {'__Clause': [o.head] + list(o.body)}
elif isinstance(o, Subst):
return {'__Subst': list(o.get_bindings())}
elif isinstance(o, Claim):
return {'__Claim': o.literal,
'__Reason': o.reason }
print 'Should have to_json defined for {0}'.format(o.__class__)
return json.JSONEncoder.default(self, o) # defer to default
class TermReadableJSONEncoder(json.JSONEncoder):
"""Custom encoder in JSON. It deals with terms, clauses,
substitutions and claims, but prints them more readably for clients.
"""
def default(self, o):
"try to encode terms"
if isinstance(o, Term):
if o.is_var():
return {'name': o.get_val()}
elif o.is_const():
return o.get_val()
# elif o.is_apply():
# return {'pred': o.get_pred(), 'args': list(o.get_args())}
elif o.is_array():
return list(o.get_args())
elif o.is_map():
return dict((repr(k), v) for k, v in o.get_args().iteritems())
elif isinstance(o, Literal):
return {'pred': o.pred, 'args': list(o.args)}
elif isinstance(o, Clause):
return {'head': o.head, 'body': list(o.body)}
elif isinstance(o, Subst):
return {'__Subst': list(o.get_bindings())}
elif isinstance(o, Literal):
return {'__Literal': o.pred, 'args': list(o.args)}
elif isinstance(o, Claim):
return {'__Claim': o.literal,
'__Reason': o.reason }
return json.JSONEncoder.default(self, o) # defer to default
def term_object_hook(o):
"""Given the JSON object o (a dict), tries to parse terms,
claims, clauses and substs from it.
"""
# detect special kinds of maps
if '__Var' in o:
return mk_var(o['__Var'])
elif '__IdConst' in o:
l = o['__IdConst']
return mk_idconst(l)
elif '__StringConst' in o:
l = o['__StringConst']
return mk_stringconst(l)
elif '__BoolConst' in o:
l = o['__BoolConst']
return mk_boolconst(l)
elif '__NumberConst' in o:
l = o['__NumberConst']
return mk_numberconst(l)
elif '__Array' in o:
l = o['__Array']
return Array(l)
elif '__Map' in o:
l = o['__Map']
return Map(dict([(mk_stringconst(k), v) for k, v in l.iteritems()]))
elif '__Clause' in o:
l = o['__Clause']
assert len(l) >= 1
return Clause(l[0], l[1:])
elif '__Subst' in o:
l = o['__Subst']
return Subst( [(k, v) for k, v in l] )
elif '__Literal' in o:
l = o['__Literal']
return Literal(l[0], l[1:])
elif '__Claim' in o and '__Reason' in o:
lit = o['__Claim']
reason = o['__Reason']
return Claim(lit, reason=reason)
elif '__Subst' in o:
bindings = o['__Subst']
return Subst( [(mk_term(k), mk_term(v)) for k, v in bindings] )
# default choice: just return the object
return o
def remove_unicode(input):
"""json.loads will read in strings as unicode, hence creates u'foo' forms,
which are difficult to work with. This function rebuilds the structures as
plain utf8 strings"""
if isinstance(input, dict):
return {remove_unicode(key): remove_unicode(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [remove_unicode(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
# The dump functions convert obj to JSON, load converts JSON to obj
# Thus we should have load(dump(obj) == obj and dump(load(json)) == json
# For any obj in terms, and any legitimate json string
def dump(obj, filedesc, *args, **kwargs):
"""Print the object in JSON on the given file descriptor."""
json.dump(obj, filedesc, cls=TermJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
"""Print the object in JSON into a string"""
return json.dumps(obj, cls=TermJSONEncoder, *args, **kwargs)
def dumps_readably(obj, *args, **kwargs):
"""Print the object in JSON into a string, old form of term
"""
return json.dumps(obj, cls=TermReadableJSONEncoder, *args, **kwargs)
def load(filedesc, *args, **kwargs):
"""Print the object in JSON on the given file descriptor.
"""
return json.load(filedesc, object_hook=term_object_hook, *args, **kwargs)
def loads(s, *args, **kwargs):
"""Converts a JSON string to term classes
>>> pid = mk_idconst('p')
>>> loads(dumps(pid)) == pid
True
>>> dumps(loads('{"__IdConst": "p"}')) == '{"__IdConst": "p"}'
True
>>> arr = mk_array([mk_idconst('a'), mk_var('V'), mk_var(1), mk_numberconst(3)])
>>> loads(dumps(arr)) == arr
True
>>> arstr = '{"__Array": [{"__IdConst": "a"}, {"__Var": "V"}, {"__Var": 1}, {"__NumberConst": "3"}]}'
>>> dumps(loads(arstr)) == arstr
True
>>> fref = mk_map({mk_stringconst('file'): mk_stringconst('doc.pdf'),
... mk_stringconst('sha1'): mk_stringconst("9af")})
>>> loads(dumps(fref)) == fref
True
>>> fstr = '{"__Map": {"sha1": {"__StringConst": "9af"}, "file": {"__StringConst": "doc.pdf"}}}'
>>> dumps(loads(fstr)) == fstr
True
>>> lit = mk_literal(mk_idconst('p'), [mk_idconst('a'), mk_var('V'), mk_var(1), mk_numberconst(3)])
>>> loads(dumps(lit)) == lit
True
>>> litstr = '{"__Literal": [{"__IdConst": "p"}, {"__IdConst": "a"}, {"__Var": "V"}, {"__Var": 1}, {"__NumberConst": "3"}]}'
>>> dumps(loads(litstr)) == litstr
True
>>> cls = Clause(mk_literal(mk_idconst('p'), []), [])
>>> loads(dumps(cls)) == cls
True
>>> clstr = '{"__Clause": [{"__Literal": [{"__IdConst": "p"}]}]}'
>>> dumps(loads(clstr)) == clstr
True
>>> cls2 = Clause(mk_literal(mk_idconst('p'), []), [mk_var(1), mk_map({mk_stringconst('foo'): mk_numberconst('42')})])
>>> loads(dumps(cls2)) == cls2
True
>>> clstr2 = '{"__Clause": [{"__Literal": [{"__IdConst": "p"}]}, {"__Var": 1}, {"__Map": {"foo": {"__NumberConst": "42"}}}]}'
>>> dumps(loads(clstr2)) == clstr2
True
>>> sbst = Subst( { mk_var(1): mk_stringconst('p'),
... mk_var(2): mk_stringconst('u') } )
>>> loads(dumps(sbst)) == sbst
True
>>> sbstr = '{"__Subst": [[{"__Var": 1}, {"__StringConst": "p"}], [{"__Var": 2}, {"__StringConst": "u"}]]}'
>>> dumps(loads(sbstr)) == sbstr
True
"""
return remove_unicode(json.loads(s, object_hook=term_object_hook, *args, **kwargs)) | PypiClean |
/Codons-0.0.10.tar.gz/Codons-0.0.10/README.rst | Translating, Transcribing, and Investigating Genetic Sequences and their corresponding Proteins
--------------------------------------------------------------------------------------------------------
|PyPI version| |Actions Status| |Downloads| |License|
.. |PyPI version| image:: https://img.shields.io/pypi/v/codons.svg?logo=PyPI&logoColor=brightgreen
:target: https://pypi.org/project/codons/
:alt: PyPI version
.. |Actions Status| image:: https://github.com/freiburgermsu/codons/workflows/Test%20Codons/badge.svg
:target: https://github.com/freiburgermsu/codons/actions
:alt: Actions Status
.. |License| image:: https://img.shields.io/badge/License-MIT-blue.svg
:target: https://opensource.org/licenses/MIT
:alt: License
.. |Downloads| image:: https://pepy.tech/badge/Codons
:target: https://pepy.tech/project/Codons
:alt: Downloads
The Codons module is a lightweight tool for 1) conducting transcription and translation of genetic sequences, either from a FASTA format or a string; 2) making and reading FASTA or multi-FASTA files; and 3) conducting BLAST searches of protein and nucleotide sequences. Example Notebooks of these features are offered in the "examples" directory of the `Codons GitHub repository <https://github.com/freiburgermsu/codons/tree/main/examples>`_.
Installation
+++++++++++++
The following command installs ``Codons`` in a command prompt/terminal environment::
pip install codons
The full documentation is available on `ReadTheDocs <https://codons.readthedocs.io/en/latest/>`_. | PypiClean |
/BuildSimHubAPI-2.0.0-py3-none-any.whl/buildsimlearn/svm.py | import BuildSimHubAPI as bsh_api
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import learning_curve
"""
User Inputs
"""
project_api_key = 'ec9ea39e-4c2a-47e3-baf2-c42a1a8210fa'
model_api_key = '453b2cf5-c443-40a3-84b6-30f4f5ab087a'
# User Input hyper-parameters
test_size = 0.2
C = 10
lw = 1
gamma_num = 0.05
"""
SCRIPT!!
"""
bsh = bsh_api.BuildSimHubAPIClient()
results = bsh.parametric_results(project_api_key, model_api_key)
# Collect results
result_dict = results.net_site_eui()
for i in range(len(result_dict)):
tempstr = result_dict["value"]
dict = {}
for key in result_dict:
if key == "model":
templist = result_dict[key]
tempdict = {}
for i in range(len(templist)):
tempstr = result_dict["model"][i]
templist = tempstr.split(',')
for j in range(len(templist)):
pair = templist[j].split(': ')
if pair[0] not in tempdict:
tempdict[pair[0]] = []
tempdict[pair[0]].append(pair[1])
for subkey in tempdict:
dict[subkey] = tempdict[subkey]
else:
dict[key] = result_dict[key]
df = pd.DataFrame(dict)
df.head(5)
df_copy = df.copy()
df_copy = df_copy.drop('model_plot', axis=1).astype(float)
print(df_copy.describe(include='all'))
# print(df_copy.describe())
values= np.array(df_copy['value'])
features = df_copy.drop('value', axis = 1) # axis 1 refers to the columns
feature_list = list(features.columns)
features = np.array(features)
# SVM model
X_train, X_test, y_train, y_test = train_test_split(features, values, test_size=test_size)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# SVM linear kernel model
svr_lin = SVR(kernel='linear', C=C)
pred_lin = svr_lin.fit(X_train, y_train).predict(X_test)
# plt.plot(y_test, pred_lin)
plt.scatter(y_test, pred_lin, label='Linear model')
# change the size (hyperparameters)
plt.plot([min(y_test), max(y_test)], [min(pred_lin), max(pred_lin)], "orange", lw)
print('linear model R-squared = {}'.format(round(float(svr_lin.score(X_test, y_test)), 4)))
print('linear MSE = {}'.format(round(float(mean_squared_error(y_test, pred_lin)), 4)))
# SVM rbf kernel model
svr_rbf = SVR(kernel='rbf', C=C, gamma=gamma_num)
pred_rbf = svr_rbf.fit(X_train, y_train).predict(X_test)
plt.scatter(y_test, pred_rbf, label='rbf model')
plt.plot([min(y_test), max(y_test)], [min(pred_rbf), max(pred_rbf)], "lightblue", lw=1)
plt.xlabel("True value")
plt.ylabel("Predicted value")
plt.legend()
print('rbf model R-squared = {}'.format(round(float(svr_rbf.score(X_test, y_test)), 4)))
print('rbf model MSE = {}'.format(round(float(mean_squared_error(y_test, pred_rbf)), 4)))
plt.figure()
# features importance for linear kerner in SVM
svm = svr_lin.fit(X_train, y_train)
def f_importances(coef, names):
imp = coef
imp, names = zip(*sorted(zip(imp,names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.show()
f_importances(svm.coef_[0], feature_list)
print('Coefficients:{0}, intercept {1}'.format(svm.coef_, svm.intercept_))
print('Score: {0}' .format(svm.score(X_test, y_test)) + ', which is a bit low.')
plt.figure()
# evaluate linear model perfomance
print('SVR linear Mean Absolute Error:', metrics.mean_absolute_error(y_test, pred_lin))
print('SVR linear Mean Squared Error:', metrics.mean_squared_error(y_test, pred_lin))
print('SVR linear Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, pred_lin)))
# evaluate rbf model performance
print('SVR rbf Mean Absolute Error:', metrics.mean_absolute_error(y_test, pred_rbf))
print('SVR rbf Mean Squared Error:', metrics.mean_squared_error(y_test, pred_rbf))
print('SVR rbf Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, pred_rbf)))
print('The the difference between RMSE and MAE is small, so the error size is relatively consistent.')
# We can use the function learning_curve to generate the values that are required to plot such a
# learning curve (number of samples that have been used, the average scores on the training sets
# and the average scores on the validation sets)
svr_rbf = SVR(kernel='rbf', C=C, gamma=gamma_num)
pred_rbf = svr_rbf.fit(X_train, y_train).predict(X_test)
plt.xlabel("True value")
plt.ylabel("Predicted value")
print('SVR brf R-squared = {}'.format(round(float(svr_rbf.score(X_test, y_test)), 4)))
print('SVR brf MSE = {}'.format(round(float(mean_squared_error(y_test, pred_rbf)), 4)))
svr = SVR(kernel='rbf', gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = learning_curve(svr, X_train, y_train,
train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=5)
svr2 = SVR(kernel='linear')
train_sizes2, train_scores_svr2, test_scores_svr2 = learning_curve(svr2, X_train, y_train,
train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=5)
plt.plot(train_sizes, -test_scores_svr.mean(1), 'o-', color="r",
label="SVR rbf testing error")
plt.plot(train_sizes, -train_scores_svr.mean(1), 'o-', color="b",
label="SVR rbf training error")
plt.plot(train_sizes2, -test_scores_svr2.mean(1), 'o-', color="g",
label="SVR linear testing error")
plt.plot(train_sizes2, -train_scores_svr2.mean(1), 'o-', color="y",
label="SVR linear training error")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
print('From the graph, we can know lots of information. SVR linear model has relatively low bias, '
'compared to SVR rbf model, which indicates the model fits training data very well. '
'The SVR linear model has lower variance, which indicates that our model prediction '
'and true data are homogenous, which is pretty good, but on the other hand, '
'it is possible for the model to be a bit underfitted. Both models has their advantages in our case, '
'but after trade-off, we might prefer linear model')
plt.show() | PypiClean |
/HolmesV-2021.10.8.tar.gz/HolmesV-2021.10.8/mycroft/util/parse.py | from warnings import warn
from difflib import SequenceMatcher
# lingua_franca is optional, both lingua_franca and lingua_nostra are supported
# if both are installed preference is given to LN
# "setters" will be set in both lbs
# LN should be functionality equivalent to LF
from mycroft.util.time import now_local
from mycroft.util.log import LOG
try:
try:
from lingua_nostra.parse import extract_number, extract_numbers, \
extract_duration, get_gender, normalize
from lingua_nostra.parse import extract_datetime as lf_extract_datetime
from lingua_nostra.time import now_local
except ImportError:
from lingua_franca.parse import extract_number, extract_numbers, \
extract_duration, get_gender, normalize
from lingua_franca.parse import extract_datetime as lf_extract_datetime
from lingua_franca.time import now_local
except ImportError:
def lingua_franca_error(*args, **kwargs):
raise ImportError("lingua_franca is not installed")
extract_number = extract_numbers = extract_duration = get_gender = \
normalize = lf_extract_datetime = lingua_franca_error
def fuzzy_match(x, against):
"""Perform a 'fuzzy' comparison between two strings.
Returns:
float: match percentage -- 1.0 for perfect match,
down to 0.0 for no match at all.
"""
return SequenceMatcher(None, x, against).ratio()
def match_one(query, choices):
"""
Find best match from a list or dictionary given an input
Arguments:
query: string to test
choices: list or dictionary of choices
Returns: tuple with best match, score
"""
if isinstance(choices, dict):
_choices = list(choices.keys())
elif isinstance(choices, list):
_choices = choices
else:
raise ValueError('a list or dict of choices must be provided')
best = (_choices[0], fuzzy_match(query, _choices[0]))
for c in _choices[1:]:
score = fuzzy_match(query, c)
if score > best[1]:
best = (c, score)
if isinstance(choices, dict):
return (choices[best[0]], best[1])
else:
return best
def _log_unsupported_language(language, supported_languages):
"""
Log a warning when a language is unsupported
Args:
language: str
The language that was supplied.
supported_languages: [str]
The list of supported languages.
"""
supported = ' '.join(supported_languages)
LOG.warning('Language "{language}" not recognized! Please make sure your '
'language is one of the following: {supported}.'
.format(language=language, supported=supported))
def extract_datetime(text, anchorDate="DEFAULT", lang=None,
default_time=None):
"""Extracts date and time information from a sentence.
Parses many of the common ways that humans express dates and times,
including relative dates like "5 days from today", "tomorrow', and
"Tuesday".
Vague terminology are given arbitrary values, like:
* morning = 8 AM
* afternoon = 3 PM
* evening = 7 PM
If a time isn't supplied or implied, the function defaults to 12 AM
Args:
text (str): the text to be interpreted
anchorDate (:obj:`datetime`, optional): the date to be used for
relative dating (for example, what does "tomorrow" mean?).
Defaults to the current local date/time.
lang (str): the BCP-47 code for the language to use, None uses default
default_time (datetime.time): time to use if none was found in
the input string.
Returns:
[:obj:`datetime`, :obj:`str`]: 'datetime' is the extracted date
as a datetime object in the user's local timezone.
'leftover_string' is the original phrase with all date and time
related keywords stripped out. See examples for further
clarification
Returns 'None' if no date or time related text is found.
Examples:
>>> extract_datetime(
... "What is the weather like the day after tomorrow?",
... datetime(2017, 06, 30, 00, 00)
... )
[datetime.datetime(2017, 7, 2, 0, 0), 'what is weather like']
>>> extract_datetime(
... "Set up an appointment 2 weeks from Sunday at 5 pm",
... datetime(2016, 02, 19, 00, 00)
... )
[datetime.datetime(2016, 3, 6, 17, 0), 'set up appointment']
>>> extract_datetime(
... "Set up an appointment",
... datetime(2016, 02, 19, 00, 00)
... )
None
"""
if anchorDate is None:
warn(DeprecationWarning("extract_datetime(anchorDate=None) is "
"deprecated. This parameter can be omitted."))
if anchorDate is None or anchorDate == "DEFAULT":
anchorDate = now_local()
if not lang:
from mycroft.configuration.locale import get_default_lang
lang = get_default_lang()
return lf_extract_datetime(text,
anchorDate,
lang,
default_time) | PypiClean |
/4ch-1.0.0.tar.gz/4ch-1.0.0/README.rst | fourch
======
.. _docs: https://4ch.readthedocs.org
.. _repo: https://github.com/plausibility/4ch
fourch (stylized as 4ch) is a wrapper to the 4chan JSON API, provided by moot. It allows you to interact with 4chan (in a READONLY way) easily through your scripts.
Originally <strike>stolen</strike> forked from `e000/py-4chan <https://github.com/e000/py-4chan>`_, but then I moved repos and renamed stuff since I'm pretty bad about that.
Requirements
------------
- Python 2.7 (what I test with, 2.x might work)
- requests
Notes
-----
- This isn't guaranteed to work all the time; after all, the API may change, and 4ch will have to be updated accordingly.
- If a feature is missing, open an issue on the `repo`_, and it may well be implemented.
Running / Usage
---------------
- Install & import: ``$ pip install 4ch``, ``import fourch``
- See the `docs`_
Contributing
------------
If you're interested in contributing to the usability of 4ch, or just want to give away stars, you can visit the 4ch github `repo`_.
| PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/runtime/auth/providers/acs_token_provider.py | import requests
import office365.logger
from office365.runtime.auth.authentication_provider import AuthenticationProvider
from office365.runtime.auth.token_response import TokenResponse
from office365.runtime.compat import urlparse
class ACSTokenProvider(AuthenticationProvider, office365.logger.LoggerContext):
def __init__(self, url, client_id, client_secret):
"""
Provider to acquire the access token from a Microsoft Azure Access Control Service (ACS)
:param str client_id: The OAuth client id of the calling application.
:param str client_secret: Secret string that the application uses to prove its identity when requesting a token
:param str url: SharePoint web or site url
"""
self.url = url
self.redirect_url = None
self.error = None
self.SharePointPrincipal = "00000003-0000-0ff1-ce00-000000000000"
self._client_id = client_id
self._client_secret = client_secret
self._cached_token = None
def authenticate_request(self, request):
"""
:type request: office365.runtime.http.request_options.RequestOptions
"""
self.ensure_app_only_access_token()
request.set_header('Authorization', self._get_authorization_header())
def ensure_app_only_access_token(self):
if self._cached_token is None:
self._cached_token = self.get_app_only_access_token()
return self._cached_token and self._cached_token.is_valid
def get_app_only_access_token(self):
"""
Retrieves an app-only access token from ACS
"""
try:
realm = self._get_realm_from_target_url()
url_info = urlparse(self.url)
return self._get_app_only_access_token(url_info.hostname, realm)
except requests.exceptions.RequestException as e:
self.error = e.response.text if e.response is not None else "Acquire app-only access token failed."
raise ValueError(self.error)
def _get_app_only_access_token(self, target_host, target_realm):
"""
Retrieves an app-only access token from ACS to call the specified principal
at the specified targetHost. The targetHost must be registered for target principal.
:param str target_host: Url authority of the target principal
:param str target_realm: Realm to use for the access token's nameid and audience
"""
resource = self.get_formatted_principal(self.SharePointPrincipal, target_host, target_realm)
principal_id = self.get_formatted_principal(self._client_id, None, target_realm)
sts_url = self.get_security_token_service_url(target_realm)
oauth2_request = {
'grant_type': 'client_credentials',
'client_id': principal_id,
'client_secret': self._client_secret,
'scope': resource,
'resource': resource
}
response = requests.post(url=sts_url, headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=oauth2_request)
response.raise_for_status()
return TokenResponse.from_json(response.json())
def _get_realm_from_target_url(self):
"""Get the realm for the URL"""
response = requests.head(url=self.url, headers={'Authorization': 'Bearer'})
return self.process_realm_response(response)
@staticmethod
def process_realm_response(response):
"""
:type response: requests.Response
"""
header_key = "WWW-Authenticate"
if header_key in response.headers:
auth_values = response.headers[header_key].split(",")
bearer = auth_values[0].split("=")
return bearer[1].replace('"', '')
return None
@staticmethod
def get_formatted_principal(principal_name, host_name, realm):
if host_name:
return "{0}/{1}@{2}".format(principal_name, host_name, realm)
return "{0}@{1}".format(principal_name, realm)
@staticmethod
def get_security_token_service_url(realm):
return "https://accounts.accesscontrol.windows.net/{0}/tokens/OAuth/2".format(realm)
def _get_authorization_header(self):
return 'Bearer {0}'.format(self._cached_token.accessToken)
def get_last_error(self):
return self.error | PypiClean |
/ExpectoCastellum-0.5.tar.gz/ExpectoCastellum-0.5/expectocastellum/engine.py | import rooms
import thesaurus
import errors
import things
import people
import add_words
import json
import os
import dictionary
class Engine(object):
def __init__(self, name):
self.name = name
rooms.make_rooms_from_json(self.name)
things.make_things_from_json(self.name)
people.make_people_from_json(self.name)
self.player = rooms.Player()
self.death = rooms.Death()
self.start_location = ''
self.roomdict = rooms.phonebook
self.thingdict = things.objectlist
self.npcdict = people.npclist
self.mirror_paths = True
def new(self, type, **attrs):
if type.lower() == 'room':
to_build = rooms.Room()
dictname = self.roomdict
elif type.lower() == 'thing':
to_build = things.Thing()
dictname = self.thingdict
elif type.lower() == 'npc':
to_build = people.Person()
dictname = self.npcdict
else:
errors.unknown_type(type)
return
if not attrs or 'name' not in attrs.keys():
to_build.name = errors.nameless_item(type.lower())
to_build.setprops(**attrs)
dictname[to_build.name] = to_build
if (type.lower() == 'thing' or type.lower() == 'npc') and not to_build.ref_name:
defaultname = to_build.name.replace(' ','')
to_build.ref_name = defaultname
errors.no_ref_name(defaultname)
self.save_type(type.lower())
return to_build
def update_game_dictionary(self,type):
if type.lower() == 'room':
dictname = self.roomdict
elif type.lower() == 'thing':
dictname = self.thingdict
elif type.lower() == 'npc':
dictname = self.npcdict
else:
errors.unknown_type()
for name, instance in dictname.iteritems():
if name != instance.name:
dictname[instance.name] = instance
del dictname[name]
def update_game_dictionaries(self):
self.update_game_dictionary('room')
self.update_game_dictionary('thing')
self.update_game_dictionary('npc')
def save_type(self, type):
existing = dict()
if type.lower() == 'room':
pathextend = 'rooms'
dictname = self.roomdict
if self.mirror_paths:
rooms.mirror_paths()
elif type.lower() == 'thing':
pathextend = 'things'
dictname = self.thingdict
elif type.lower() == 'npc':
pathextend = 'people'
dictname = self.npcdict
else:
errors.unknown_type()
return
try:
with open(os.getcwd()+'/'+self.name+'/'+pathextend+'.json') as json_repo:
existing = json.load(json_repo)
except:
pass
for name, instance in dictname.iteritems():
if dictname == self.thingdict or dictname == self.npcdict:
if not instance.ref_name:
defaultname=instance.name.replace(' ','')
instance.ref_name = defaultname
errors.no_ref_name(defaultname)
else:
self.parser_words_update(type, instance)
existing[name] = { k : v for k,v in instance.__dict__.iteritems() if v }
with open(os.getcwd()+'/'+self.name+'/'+pathextend+'.json', 'w') as json_repo:
json.dump(existing, json_repo)
def save(self):
self.save_type('room')
self.save_type('thing')
self.save_type('npc')
def parser_words_update(self, type, gameobject):
if type.lower() == 'thing':
add_words.add_noun(gameobject.ref_name)
elif type.lower() == 'npc':
add_words.add_people(gameobject.ref_name)
elif type.lower() == 'room':
errors.adding_room_to_parser_dict()
else:
errors.unknown_type()
def new_room(self, **attrs):
newroom = self.new('room',**attrs)
return newroom
def new_thing(self, **attrs):
newthing = self.new('thing',**attrs)
return newthing
def new_npc(self, **attrs):
newnpc = self.new('npc',**attrs)
return newnpc
def play(self):
thesaurus.canonwords.extend([item for item in dictionary.directions if item not in thesaurus.canonwords])
thesaurus.canonwords.extend([item for item in dictionary.nouns if item not in thesaurus.canonwords])
thesaurus.canonwords.extend([item for item in dictionary.people if item not in thesaurus.canonwords])
thesaurus.canonwords.extend([item for item in dictionary.spells if item not in thesaurus.canonwords])
self.start_location = [room.name for room in rooms.phonebook.values() if room.start_location]
if len(self.start_location) > 1:
errors.too_many_start_locations(self.start_location[0])
elif self.start_location == '':
errors.no_start_location()
else:
self.player.location = self.start_location[0]
rooms.phonebook[self.player.location].look(self.player)
rooms_to_init = [room.name for room in rooms.phonebook.values() if room.stairrooms]
for room in rooms_to_init:
rooms.phonebook[room].shuffle_stairs()
while True:
user_input = raw_input("> ").lower()
next = thesaurus.process(user_input, self.player, self.name)
if next == 'break':
break | PypiClean |
/AstroAugmentations-0.1.0.tar.gz/AstroAugmentations-0.1.0/README.md | # AstroAugmentations
Custom image augmentations specifically designed around astronomical
instruments and data. Please open an
[issue](https://github.com/mb010/AstroAugmentations/issues) to highlight missing augmentations and / or datasets. This is an open source project, so feel free to fork, make changes and submit a [pull request](https://github.com/mb010/AstroAugmentations/pulls) of your additions and modifications!
This package is based on [Albumentations](https://github.com/albumentations-team/albumentations/).
This should allow scalability and applicability in a multitude of cases,
including both TensorFlow and PyTorch.
# Features
- Augmentations designed for specific astronomical domains and data formats.
- Access to standardized default data sets.
- Most recent version covers:
- Radio image augmentations (designed with interferometers in mind)
# Installation
**Install**:\
`pip install AstroAugmentations`\
**Import format**:\
`import astroaugmentations as AA`.
:warning: **v0.1.0 requires torch and torchvision which are not autmatically installed!**
The version you install depends on your system.
Please see the official [PyTorch](https://pytorch.org/) site to download
an appropriate configuration. These are currently used in the example datasets.\
Developed using: `torch>=1.10.2+cu113` and `'torchvision>=0.11.3+cu113`.
# Usage
Augmentations for all modalities and domains supported are provided within the `AA.AstroAugmentations()` class in [this file](https://github.com/mb010/AstroAugmentations/tree/main/astroaugmentations/augmentations.py).
`AA.CustomKernelConvolution()` requires a kernel to be available in a directory as a saved numpy array (e.g. `./kernels/FIRST_kernel.npy`). We provide a kernel we generated
[here](https://github.com/mb010/AstroAugmentations/tree/main/astroaugmentations/kernels)
(designed for the [FIRST Survey](http://sundog.stsci.edu/)).
# Demo / Examples
Please see the ipython notebooks provided for demonstrations of the
various augmentations. These are implemented using Torch.
The interaction with the Albumentations package should allow for
AstroAugmentations to be applied to other frameworks.
See examples of their implementations [here](https://albumentations.ai/docs/examples/).
# Using the in-built datasets
Data sets are called using the scripts provided in
[astroaugmentations/datasets](https://github.com/mb010/AstroAugmentations/tree/main/astroaugmentations/datasets).
See use examples in the demonstration ipython notebooks.
# Adapting Data Loaders (PyTorch)
Following Albumentions notation, we adapt respective torch data loaders from a functional call to an Albumnetations call as shown in their [PyTorch Example](https://albumentations.ai/docs/examples/pytorch_semantic_segmentation/#Define-a-PyTorch-dataset-class) which allows respective transformations to be applied simultaneously to segmentation masks. We present an example of what this can look like.
Assuming there is a `self.transform` attribute as a parameter in our data class. In which case, normally inside the `__getitem__` method, a conditional application of the transform is made:
```
if self.transform is not None:
image = self.transform(image)
```
For Albumentations, and thus our package, we need to adapt this notation. In the case of image augmentations (no mask augmentations) we write:
```
if self.transform is not None:
image = self.transform(image=image)["image"]
```
This seems unnecessary, until we consider an example of what happens when we try to apply our transformations to masks as well as the input:
```
if self.transform is not None:
transformed = self.transform(image=image, mask=mask)
image = transformed["image"]
mask = transformed["mask"]
```
# Package Structure:
```
AstroAugmentations
├── LICENSE
├── astroaugmentations
│ ├── __init__.py
│ ├── augmentations.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── VLA_raw_antenna_position.py
│ │ └── kernel_creation.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── galaxy_mnist.py
│ │ └── MiraBest_F.py
│ └── module_numpy_2.py
├── README.md
└── setup.py
```
# Citation
Relevant publication in prep. Please reach out to the author for updates.
# Contact
For questions please contact: micah.bowles@postgrad.manchester.ac.uk
For bugs or any issues with implementing this package, please open an [issue](https://github.com/mb010/AstroAugmentations/issues).
| PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/plugins/a11yhelp/dialogs/lang/fo.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("a11yhelp","fo",{title:"Accessibility Instructions",contents:"Help Contents. To close this dialog press ESC.",legend:[{name:"General",items:[{name:"Editor Toolbar",legend:"Press ${toolbarFocus} to navigate to the toolbar. Move to the next and previous toolbar group with TAB and SHIFT+TAB. Move to the next and previous toolbar button with RIGHT ARROW or LEFT ARROW. Press SPACE or ENTER to activate the toolbar button."},{name:"Editor Dialog",legend:"Inside a dialog, press TAB to navigate to the next dialog element, press SHIFT+TAB to move to the previous dialog element, press ENTER to submit the dialog, press ESC to cancel the dialog. When a dialog has multiple tabs, the tab list can be reached either with ALT+F10 or with TAB as part of the dialog tabbing order. With tab list focused, move to the next and previous tab with RIGHT and LEFT ARROW, respectively."},
{name:"Editor Context Menu",legend:"Press ${contextMenu} or APPLICATION KEY to open context-menu. Then move to next menu option with TAB or DOWN ARROW. Move to previous option with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the menu option. Open sub-menu of current option with SPACE or ENTER or RIGHT ARROW. Go back to parent menu item with ESC or LEFT ARROW. Close context menu with ESC."},{name:"Editor List Box",legend:"Inside a list-box, move to next list item with TAB OR DOWN ARROW. Move to previous list item with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the list option. Press ESC to close the list-box."},
{name:"Editor Element Path Bar",legend:"Press ${elementsPathFocus} to navigate to the elements path bar. Move to next element button with TAB or RIGHT ARROW. Move to previous button with SHIFT+TAB or LEFT ARROW. Press SPACE or ENTER to select the element in editor."}]},{name:"Commands",items:[{name:" Undo command",legend:"Press ${undo}"},{name:" Redo command",legend:"Press ${redo}"},{name:" Bold command",legend:"Press ${bold}"},{name:" Italic command",legend:"Press ${italic}"},{name:" Underline command",
legend:"Press ${underline}"},{name:" Link command",legend:"Press ${link}"},{name:" Toolbar Collapse command",legend:"Press ${toolbarCollapse}"},{name:" Access previous focus space command",legend:"Press ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},{name:" Access next focus space command",legend:"Press ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},
{name:" Accessibility Help",legend:"Press ${a11yHelp}"},{name:" Paste as plain text",legend:"Press ${pastetext}",legendEdge:"Press ${pastetext}, followed by ${paste}"}]}],tab:"Tab",pause:"Pause",capslock:"Caps Lock",escape:"Escape",pageUp:"Page Up",pageDown:"Page Down",leftArrow:"Left Arrow",upArrow:"Up Arrow",rightArrow:"Right Arrow",downArrow:"Down Arrow",insert:"Insert",leftWindowKey:"Left Windows key",rightWindowKey:"Right Windows key",selectKey:"Select key",numpad0:"Numpad 0",numpad1:"Numpad 1",
numpad2:"Numpad 2",numpad3:"Numpad 3",numpad4:"Numpad 4",numpad5:"Numpad 5",numpad6:"Numpad 6",numpad7:"Numpad 7",numpad8:"Numpad 8",numpad9:"Numpad 9",multiply:"Falda",add:"Pluss",subtract:"Frádráttar",decimalPoint:"Decimal Point",divide:"Býta",f1:"F1",f2:"F2",f3:"F3",f4:"F4",f5:"F5",f6:"F6",f7:"F7",f8:"F8",f9:"F9",f10:"F10",f11:"F11",f12:"F12",numLock:"Num Lock",scrollLock:"Scroll Lock",semiColon:"Semikolon",equalSign:"Javnatekn",comma:"Komma",dash:"Dash",period:"Punktum",forwardSlash:"Forward Slash",
graveAccent:"Grave Accent",openBracket:"Open Bracket",backSlash:"Backslash",closeBracket:"Close Bracket",singleQuote:"Single Quote"}); | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/FUTURE.rst | *One can always dream...*
.. contents::
The following 2023 road map that appears the 6.0.0 hasn't gone through enough discussion. This provisional.
Check the github repository for updates.
2023 Roadmap
============
When the release settles, "Forms, Boxing, And "Formatting" is the next
large refactor slated. Having this will allow us to supporting Jupyter or other front
ends. And it is something that is most visibly wrong in Mathics3 output.
See ``PAST.rst`` for how the 2023 Roadmap compares to the 2022 Roadmap.
Forms, Boxing and Formatting
----------------------------
This remains the biggest holdover item from 2022, and seems easily doable.
It hinders interaction with Jupyter or other front ends.
Right now "Form" (a high-level specification of how to format) and
"format" (a low level specification of how output is encoded) are sometimes muddied.
For example, TeXForm may be a "Form", but output encoded for AMS-LaTeX is done by a *formatter*.
So AMS-LaTeX rendering and other kinds of rendering should be split into its own rendering for formatter module.
Currently we have asymptote, and svg "format" modules.
Back to high-level again, Boxing is something that can be written in Mathics3, and doing this at
least initially ensures that we have design that fits more naturally
into the Wolfram Language philosophy.
Performance
-----------
While this is probably more of an overall concern, for now, big refactoring needed here, such as
going over pattern matching, will get done after Forms, Boxing and Formatting .
Forms, Boxing and Formatting will however contain one improvement that
should speed up our performance: separating M-Expression evaluation from
Box "evaluations).
We expect there will be other little opportunities here and there as we have seen in the past.
More Custom kinds of (compound) Expressions
+++++++++++++++++++++++++++++++++++++++++++
We scratched the surface here with ListExpression. Associations and Python/Sympy/numpy literals can be customized with an aim towards reducing conversions from and to M-expressions.
A number of compound expressions, especially those which involve literals are more efficiently represented in some other way. For example,
representing a Mathics3 Association as a Python ordered dictionary, a Mathics3 List as a Python list or tuple, or as a numpy array.
Further Code Reorganization in Core and Eval
--------------------------------------------
Core object like ``BaseElement`` and possibly ``Symbol``, (and
probably others) are too "fat": they have too many custom methods that
are not applicable for most of the subclasses support. It is likely
another pass will be made over this.
We have started moving "eval" code out of the "eval" methods and into its own module.
Mathics3 Module Enhancement
---------------------------
While we have put in quite a bit of effort to get these to be 6.0.0 compliant. There is still more work to do, and there are numerous bugs there.
Refactoring code to generate Graphs in ``pymathics.graph`` might happen. Porting the ``pymathics.graph`` code to use NetworkX 3.0 would be nice;
``pymathics.natlang`` could also use a look over in terms of the libraries we are using.
Python upgrades
---------------
After Mathics3 Version 6.0.0, Python 3.6 will be dropped and possibly 3.7. Changes are needed to support 3.11 so we will be focusing on 3.8 to 3.11.
We have gradually been using a more modern Python programming style
and idioms: more type annotation, use of ``isort`` (order Python
imports), ``black`` (code formatting), and ``flake8`` (Python lint
checking).
Deferred
--------
As mentioned before, pattern-matching revision is for later. `This
discussion
<https://github.com/Mathics3/mathics-core/discussions/800>`_ is a
placeholder for this discussion.
Overhauling the documentation to use something better supported and
more mainstream like sphinx is deferred. This would really be nice to
have, but it will require a bit of effort and detracts from all of the other work that is needed.
We will probably try this out in a limited basis in one of the Mathics3 modules.
Speaking of Mathics3 Modules, there are probably various scoping/context issues that Mathics3 modules make more apparent.
This will is deferred for now.
Way down the line, is converting to a more sequence-based interpreter which is needed for JIT'ing and better Compilation support.
Likewise, speeding up startup time via saving and loading an image is something that is more of a long-term goal.
Things in this section can change, depending on the help we can get.
Miscellaneous
-------------
No doubt there will be numerous bug fixes, and builtin-function additions especially now that we have a better framework to support this kind of growth.
Some of the smaller deferred issues refactorings may get addressed.
As always, where and how fast things grow here depends on help available.
2022 Roadmap
=============
Code reorganization and Refactoring
-----------------------------------
This has been the biggest impediment to doing just about anything else.
Boxing and Formatting
+++++++++++++++++++++
We will isolate and make more scalable how boxing and top-level formatting is done. This will happen right after release 5.0.0
API Expansion
+++++++++++++
We have an API for graphics3d which is largely used for many Graphics 3D objects like spheres and regular polyhedra. However, this needs to get expanded for Plotting.
An API for JSON 2D plotting is needed too.
Execution Performance
----------------------
While we have made a start on this in 5.0, much more is needed.
We have only gone over the top-level evaluation for compound expressions.
The following evaluation phases need to be gone over and revised:
* pattern-matching and rewrite rules
* apply steps
With respect to top-level evaluation, we have only scratched the surface of what can be done with evaluation specialization. We currently have a kind of specialization for Lists. Possibly the same is needed for Associations.
This work will continue after the 5.0.0 release. We expect plotting will be faster by the next release or major release.
Being able to run existing WMA packages
----------------------------------------
Sadly, Mathics cannot run most of the open-source WMA packages.
In particular we would like to see the following run:
* Rubi
* KnotTheory
This is a longer-term goal.
Documentation System
--------------------
The current home-grown documentation should be replaced with Sphynx and autodoc.
Compilation
-----------
Compilation is a rather unsophisticated process by trying to speed up Python code using llvmlite. The gains here will always be small compared the kinds of gains a compiler can get. However in order to even be able to contemplate writing a compiler (let alone say a JIT compiler), the code base needs to be made to work more like a traditional interpreter. Some work will be needed just to be able or create a sequence of instructions to run.
Right now the interpreter is strictly a tree interpreter.
Simpler Things
---------------
There have been a number of things that have been deferred:
* Using unicode symbols in output
* Making StandardOutput of polynomials match WMA
* Finish reorganizing Builtin Functions so that the structure matches is more logical
* Adding more Graphics Primitives
* Working on Jupyter integrations
In some cases like the first two items these are easy, and more important things have prevented doing this. In some cases like the last two, there are more foundational work that should be done first.
2021 Roadmap
=============
Graphics3D
----------
With 4.0.0, we have started defining a Graphics3D protocol. It is
currently expressed in JSON. There is an independent `threejs-based
module
<https://www.npmjs.com/package/@mathicsorg/mathics-threejs-backend>`_
to implement this. Tiago Cavalcante Trindade is responsible for this
code and for modernizing our JavaScript, and it use in threejs.
We expect a lot more to come. For example UniformPolyhedra is too new
to have been able to make this release.
We also need to define a protocol and implementation for 2D Graphics.
Boxing, Formatting, Forms
-------------------------
While we have started to segregate boxing (bounding-box layout) and
formatting (translation to a conventional rendering format or
language), a lot more work needs to be done.
Also, a lot more Forms should be defined. And those that exist, like
TeXForm, and StandardForm, could use improvement.
This area is still a big mess.
Jupyter and other Front Ends
----------------------------
Although we had planned to move forward on this previously, it now
appears that we should nail down some of the above better, before
undertaking. Jupyter uses a wire protocol, and we still have
work to do in defining the interfaces mentioned above.
That said, this is still on the horizon.
Interest has also been expressed in WebGL, and Flask front ends. But
these too will require use to have better protocols defined and in
place.
Documentation
-------------
Sometime around release 4.0.0, all of the code related to producing
documentation in LaTeX and in Mathics Django, and running doctests
will be split off and put into its own git repository.
I've spent a lot of time banging on this to try to get to to be be
less fragile, more modular, more intelligible, but it still needs a
*lot* more work and still is very fragile.
Also there is much to do on the editor side of things in terms of
reorganizing sections (which also implies reorganizing the builtin
module structure, since those are tightly bound together).
We still need to convert this into Sphinx-based, with its doctest. We
also need to be able to extract information in sphinx/RsT format
rather than its home-brew markup language which is sort of XML like.
Performance
-----------
This is one area where we know a lot about what *kinds* of things need
to be done, but have barely scratched the surface here.
The current implementation is pretty bare bones.
We have problems with recursion, memory consumption, loading time, and
overall speed in computation.
Support for External Packages
-----------------------------
I would have liked to have seen this going earlier. However right now
Mathics is still at too primitive a level for any serious package to
be run on it. This will change at some point though.
Support for Mathematica Language Levels
---------------------------------------
This is something that I think would be extremely useful and is
straightforward to do someone has used Mathematica over the years
knows it well. I think most of this could be supported in Mathics code
itself and loaded as packages. Any takers?
| PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/meta/bytecodetools/instruction.py | from __future__ import print_function
import opcode
import sys
py3 = sys.version_info.major >= 3
co_ord = (lambda c:c) if py3 else ord
class Instruction(object):
'''
A Python byte-code instruction.
'''
def __init__(self, i= -1, op=None, lineno=None):
self.i = i
self.op = op
self.lineno = lineno
self.oparg = None
self.arg = None
self.extended_arg = 0
self.linestart = False
@property
def opname(self):
return opcode.opname[self.op]
@property
def is_jump(self):
return self.op in opcode.hasjrel or self.op in opcode.hasjabs
@property
def to(self):
if self.op in opcode.hasjrel:
return self.arg
elif self.op in opcode.hasjabs:
return self.oparg
else:
raise Exception("this is not a jump op (%s)" % (self.opname,))
def __repr__(self):
res = '<%s(%i)' % (opcode.opname[self.op], self.i,)
if self.arg is not None:
res += ' arg=%r' % (self.arg,)
elif self.oparg is not None:
res += ' oparg=%r' % (self.oparg,)
return res + '>'
def __str__(self):
result = []
if self.linestart:
result.append("%3d" % self.lineno)
else:
result.append(" ")
if self.lasti:
result.append('-->')
else:
result.append(' ')
if self.label:
result.append('>>')
else:
result.append(' ')
result.append(repr(self.i).rjust(4))
result.append(opcode.opname[self.op].ljust(20))
if self.op >= opcode.HAVE_ARGUMENT:
result.append(repr(self.oparg).rjust(5))
if self.op in opcode.hasconst:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasname:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasjrel:
result.append('(to ' + repr(self.arg) + ')')
elif self.op in opcode.haslocal:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hascompare:
result.append('(' + repr(self.arg) + ')')
elif self.op in opcode.hasfree:
result.append('(' + repr(self.arg) + ')')
return ' '.join(result) | PypiClean |
/Flask-Philo-3.7.1.tar.gz/Flask-Philo-3.7.1/flask_philo/admin.py | import os
import argparse
from jinja2 import Template
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def create_from_template(**data):
template_folder = os.path.join(BASE_DIR, 'templates')
project_name = data['project_name']
path = data['path']
filename = data['filename']
resource_path = os.path.join(template_folder, filename)
with open(resource_path, 'r') as f:
params = {
'project_name': project_name
}
t = Template(f.read())
template = t.render(**params)
with open(os.path.join(path, filename), 'w') as f:
f.write(template)
if template:
f.write('\n')
def start_project():
parser = argparse.ArgumentParser(
description='Admin tool for Flask-Philo projects')
parser.add_argument('command', help='command to be executed')
parser.add_argument('name', help='name of project')
args = parser.parse_args()
project_name = args.name
dir_name = os.path.join(os.getcwd(), project_name)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
else:
print('Directory {} already exists\n'.format(dir_name))
exit(1)
root_folders = ('documentation', 'src')
for folder in root_folders:
os.mkdir(os.path.join(dir_name, folder))
create_from_template(**{
'project_name': project_name,
'path': dir_name,
'filename': 'README.md',
})
# ./src
src_folders = (
'app', 'config', 'console_commands', 'tests', 'tools')
for folder in src_folders:
os.mkdir(os.path.join(dir_name, 'src', folder))
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src'),
'filename': 'manage.py',
})
# ./src/app
app_folders = ('models', 'serializers', 'views')
for folder in app_folders:
os.mkdir(os.path.join(dir_name, 'src', 'app', folder))
f = open(
os.path.join(dir_name, 'src', 'app', folder, '__init__.py'), 'x')
f.close()
f = open(os.path.join(dir_name, 'src', 'app', '__init__.py'), 'x')
f.close()
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src', 'app'),
'filename': 'urls.py',
})
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src', 'app', 'views'),
'filename': 'example_views.py',
})
# ./src/config
config_files = ('development.py', 'test.py')
for cfile in config_files:
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src', 'config'),
'filename': cfile,
})
# ./src/console_commands
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src', 'console_commands'),
'filename': '__init__.py',
})
# ./src/tests
f = open(os.path.join(dir_name, 'src', 'tests', '__init__.py'), 'x')
f.close()
create_from_template(**{
'project_name': project_name,
'path': os.path.join(dir_name, 'src', 'tests'),
'filename': 'test_views.py',
})
# ./src/tools
os.mkdir(os.path.join(dir_name, 'src', 'tools', 'requirements'))
req_files = ('dev.txt', 'base.txt')
for rfile in req_files:
create_from_template(**{
'project_name': project_name,
'path': os.path.join(
dir_name, 'src', 'tools', 'requirements'),
'filename': rfile,
})
def main():
cmd = {
'startproject': start_project
}
parser = argparse.ArgumentParser(
description='Admin tool for Flask-Philo projects')
parser.add_argument('command', help='command to be executed')
args, extra_params = parser.parse_known_args()
if args.command not in cmd:
print('Invalid command. Valid commands are:')
for k in cmd.keys():
print('\n * {}'.format(k))
exit(1)
cmd[args.command]()
if __name__ == '__main__':
main() | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/forms.py | from django import forms
import uuid
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.forms import DeclarativeFieldsMetaclass, BaseForm
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, FileInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
from django.utils.ipv6 import clean_ipv6_address
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _, ngettext_lazy
import copy
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
# BoundField is imported for backwards compatibility in Django 1.9
from django.forms.boundfield import BoundField # NOQA
from django.forms.fields import Field, FileField
# pretty_name is imported for backwards compatibility in Django 1.9
from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA
from django.forms.widgets import Media, MediaDefiningClass
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from widgets import *
class SimpleForm(BaseForm, metaclass=DeclarativeFieldsMetaclass):
def get_vue_app_js(self,app_id):
"""
将该方法需要的js渲染进去
:return:
"""
base_vue_app="""
<script>
var %(app_name)s = new Vue({
el: "#%(app_name)s",
data() {
return {
%(data)s
}
}
})
</script>
"""
data={}
for name, field in self.fields.items():
data[name] = ''
data.update(self.data)
data_s=""
for key,value in data.items():
data_s+="%(key)s:'%(value)s'"%{
'key':key,
'value':value
}+','
return base_vue_app %{"app_name":app_id,"data":data_s}
def submit_button(self):
"""
渲染模板增加submit格式
:return:
"""
# return '<input type="submit">'
pass
def as_element(self):
"Return this form rendered as HTML <tr>s -- excluding the <table></table>."
x= self._html_output(
normal_row='<div %(html_class_attr)s>%(label)s %(field)s%(help_text)s</div>',
error_row='%s',
row_ender='</div>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True,flag=True
)
return x
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row,flag=False):
"Output HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
if flag:
bf.field.widget.flag=True
bf_errors = self.error_class(bf.errors)
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': str(e)}
for e in bf_errors])
hidden_fields.append(str(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % str(bf_errors))
if bf.label:
label = conditional_escape(bf.label)
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % field.help_text
else:
help_text = ''
output.append(normal_row % {
'errors': bf_errors,
'label': label,
'field': bf,
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % top_errors)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
if flag:
app_id = "x" + str(uuid.uuid4())[0:5]
output.insert(0, "<div id='%s'>" %app_id)
output.append('</div>')
output.append(self.get_vue_app_js(app_id))
return mark_safe('\n'.join(output))
class SCharField(forms.CharField):
widget = STextInput
class SIntegerField(forms.IntegerField):
def widget_attrs(self, widget):
"""
解决max和min无法传递到widget的问题
"""
attrs = super().widget_attrs(widget)
if isinstance(widget, NumberInput) or isinstance(widget,SNumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
widget = SNumberInput
class SEmailField(forms.EmailField):
widget = SEmailInput
class SURLField(forms.URLField):
widget = SURLInput | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/umd/locale/sv-SE.js | (function (global, factory) {
if (typeof define === "function" && define.amd) {
define('element/locale/sv-SE', ['module', 'exports'], factory);
} else if (typeof exports !== "undefined") {
factory(module, exports);
} else {
var mod = {
exports: {}
};
factory(mod, mod.exports);
global.ELEMENT.lang = global.ELEMENT.lang || {};
global.ELEMENT.lang.svSE = mod.exports;
}
})(this, function (module, exports) {
'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'OK',
clear: 'Töm'
},
datepicker: {
now: 'Nu',
today: 'Idag',
cancel: 'Avbryt',
clear: 'Töm',
confirm: 'OK',
selectDate: 'Välj datum',
selectTime: 'Välj tid',
startDate: 'Startdatum',
startTime: 'Starttid',
endDate: 'Slutdatum',
endTime: 'Sluttid',
prevYear: 'Previous Year', // to be translated
nextYear: 'Next Year', // to be translated
prevMonth: 'Previous Month', // to be translated
nextMonth: 'Next Month', // to be translated
year: 'År',
month1: 'Januari',
month2: 'Februari',
month3: 'Mars',
month4: 'April',
month5: 'Maj',
month6: 'Juni',
month7: 'Juli',
month8: 'Augusti',
month9: 'September',
month10: 'Oktober',
month11: 'November',
month12: 'December',
// week: 'week',
weeks: {
sun: 'Sön',
mon: 'Mån',
tue: 'Tis',
wed: 'Ons',
thu: 'Tor',
fri: 'Fre',
sat: 'Lör'
},
months: {
jan: 'Jan',
feb: 'Feb',
mar: 'Mar',
apr: 'Apr',
may: 'Maj',
jun: 'Jun',
jul: 'Jul',
aug: 'Aug',
sep: 'Sep',
oct: 'Okt',
nov: 'Nov',
dec: 'Dec'
}
},
select: {
loading: 'Laddar',
noMatch: 'Hittade inget',
noData: 'Ingen data',
placeholder: 'Välj'
},
cascader: {
noMatch: 'Hittade inget',
loading: 'Laddar',
placeholder: 'Välj',
noData: 'Ingen data'
},
pagination: {
goto: 'Gå till',
pagesize: '/sida',
total: 'Total {total}',
pageClassifier: ''
},
messagebox: {
title: 'Meddelande',
confirm: 'OK',
cancel: 'Avbryt',
error: 'Felaktig inmatning'
},
upload: {
deleteTip: 'press delete to remove', // to be translated
delete: 'Radera',
preview: 'Förhandsvisa',
continue: 'Fortsätt'
},
table: {
emptyText: 'Inga Data',
confirmFilter: 'Bekräfta',
resetFilter: 'Återställ',
clearFilter: 'Alla',
sumText: 'Sum' // to be translated
},
tree: {
emptyText: 'Inga Data'
},
transfer: {
noMatch: 'Hittade inget',
noData: 'Ingen data',
titles: ['List 1', 'List 2'], // to be translated
filterPlaceholder: 'Enter keyword', // to be translated
noCheckedFormat: '{total} items', // to be translated
hasCheckedFormat: '{checked}/{total} checked' // to be translated
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
};
module.exports = exports['default'];
}); | PypiClean |
/AQoPA-0.9.5.tar.gz/AQoPA-0.9.5/aqopa/simulator/algorithm.py | from aqopa.model import AlgWhile, TupleExpression, AlgCallFunction, AlgReturn, AlgIf, AlgAssignment
from aqopa.simulator.error import RuntimeException
class AlgorithmResolver():
def __init__(self):
self.algorithms = {}
def add_algorithm(self, name, algorithm):
self.algorithms[name] = algorithm
def has_algorithm(self, name):
return name in self.algorithms
def get_algorithm(self, name):
return self.algorithms[name]
def calculate(self, context, host, alg_name, variables=None):
if not self.has_algorithm(alg_name):
return 0
if variables is None:
variables = {}
return AlgorithmCalculator(context, host, alg_name, variables, self.algorithms[alg_name]).calculate()
class AlgorithmCalculator():
def __init__(self, context, host, algorithm_name, variables, algorithm):
self.context = context
self.host = host
self.algorithm_name = algorithm_name
self.variables = variables
self.algorithm = algorithm
self.instructions = algorithm['instructions']
self.link_quality = variables['link_quality'] if 'link_quality' in variables else 1
self.instructions_stack = [algorithm['instructions']]
self.instructions_stack_index = 0
self.instructions_lists_indexes = {0: 0}
self.left_associative_operators = ['--', '-', '+', '*', '/', '==', '!=', '<=', '>=', '>', '<', '&&', '||']
self.right_associative_operators = []
self.all_operators = self.left_associative_operators + self.right_associative_operators
self.return_value = None
def get_index_in_current_list(self):
""" Returns the index of instruction in current list """
return self.instructions_lists_indexes[self.instructions_stack_index]
def in_main_stack(self):
""" Returns True when current instruction is in main stack """
return self.instructions_stack_index == 0
def has_current_instruction(self):
""" """
return self.get_index_in_current_list() < len(self.instructions_stack[self.instructions_stack_index])
def get_current_instruction(self):
""" Returns the instruction that should be executed next """
return self.instructions_stack[self.instructions_stack_index][self.get_index_in_current_list()]
def finished(self):
""" Returns True when algorithm is finished """
return self.return_value is not None or (not self.has_current_instruction() and self.in_main_stack())
def goto_next_instruction(self):
""" """
self.instructions_lists_indexes[self.instructions_stack_index] += 1
while not self.finished() and not self.has_current_instruction():
self.instructions_stack.pop()
del self.instructions_lists_indexes[self.instructions_stack_index]
self.instructions_stack_index -= 1
if not self.finished():
if not isinstance(self.get_current_instruction(), AlgWhile):
self.instructions_lists_indexes[self.instructions_stack_index] += 1
def add_instructions_list(self, instructions):
""" Adds new instructions list to the stack """
self.instructions_stack.append(instructions)
self.instructions_stack_index += 1
self.instructions_lists_indexes[self.instructions_stack_index] = 0
def calculate_function_value(self, call_function_instruction):
if call_function_instruction.function_name == 'size':
var_name = call_function_instruction.args[0]
if var_name not in self.variables:
raise RuntimeException("Variable {0} not defined in communication algorithm {1}."
.format(var_name, self.algorithm_name))
value = self.variables[var_name]
# If tuple element expression
if len(call_function_instruction.args) > 1:
if not isinstance(value, TupleExpression):
raise RuntimeException("Variable {0} in communication algorithm {1} is not tuple, it is: {2}."
.format(var_name, self.algorithm_name, unicode(value)))
index = call_function_instruction.args[1]
if len(value.elements) <= index:
raise RuntimeException("Variable {0} in communication algorithm {1} has "
"{2} elements while index {3} is asked."
.format(var_name, self.algorithm_name, len(value.elements), index))
value = value.elements[index]
return self.context.metrics_manager.get_expression_size(value, self.context, self.host)
elif call_function_instruction.function_name == 'quality':
if self.link_quality is None:
raise RuntimeException("Link quality is undefined in {0} algorithm. "
.format(self.algorithm_name))
return self.link_quality
raise RuntimeException("Unresolved reference to function {0}() in algorithm {1}."
.format(call_function_instruction.function_name, self.algorithm_name))
def _is_operation_token(self, token):
return isinstance(token, basestring) and token in self.all_operators
def _operator_order(self, operator):
"""
Returns the order of operator as number.
"""
orders = [['==', '!=', '<=', '>=', '>', '<', '&&', '||'], ['--', '-', '+'], ['*', '/']]
for i in range(0, len(orders)):
if operator in orders[i]:
return i
raise RuntimeException("Operator {0} undefined in algorithm {1}.".format(operator, self.algorithm_name))
def _make_rpn(self, expression):
""" """
stack = []
rpn = []
for token in expression:
# if operator
if self._is_operation_token(token):
while len(stack) > 0:
top_operator = stack[len(stack)-1]
# if current operator is left-associative and its order is lower or equal than top operator
# or current operator is right-associative and its order is lower than top operator
if (token in self.left_associative_operators
and self._operator_order(token) <= self._operator_order(top_operator))\
or (token in self.right_associative_operators
and self._operator_order(token) < self._operator_order(top_operator)):
rpn.append(stack.pop())
else:
break
stack.append(token)
elif token == '(':
stack.append(token)
elif token == ')':
found_paran = False
while len(stack) > 0:
top_operator = stack[len(stack)-1]
if top_operator == '(':
found_paran = True
stack.pop()
break
else:
rpn.append(stack.pop())
if not found_paran:
raise RuntimeException("Incorrect number of brackets in algorithm {0}.".format(self.algorithm_name))
else: # else number
if isinstance(token, AlgCallFunction):
token = self.calculate_function_value(token)
elif isinstance(token, basestring):
if token not in self.variables:
raise RuntimeException("Variable {0} not defined in communication algorithm {1}."
.format(token, self.algorithm_name))
token = self.variables[token]
rpn.append(float(token))
while len(stack) > 0:
rpn.append(stack.pop())
return rpn
def _calculate_operation(self, operator, left, right):
""" """
if operator == '+':
return left + right
elif operator == '-':
return left - right
elif operator == '*':
return left * right
elif operator == '/':
return left / right
elif operator == '==':
return left == right
elif operator == '!=':
return left != right
elif operator == '>=':
return left >= right
elif operator == '>':
return left > right
elif operator == '<=':
return left <= right
elif operator == '<':
return left < right
else:
raise RuntimeException("Incorrect operator {0} of brackets in algorithm {1}."
.format(operator, self.algorithm_name))
def _calculate_rpn(self, rpn_elements):
""" """
stack = []
for token in rpn_elements:
# if operator
if self._is_operation_token(token):
if token == '--':
value = stack.pop()
value = - value
stack.append(value)
else:
a = stack.pop()
b = stack.pop()
stack.append(self._calculate_operation(token, b, a))
else: # number
stack.append(token)
return stack.pop()
def calculate_value(self, expression):
rpn_elements = self._make_rpn(expression)
return self._calculate_rpn(rpn_elements)
def execute_current_instruction(self):
current_instruction = self.get_current_instruction()
if isinstance(current_instruction, AlgReturn):
self.return_value = self.calculate_value(current_instruction.expression)
self.goto_next_instruction()
elif isinstance(current_instruction, AlgWhile):
if len(current_instruction.instructions) > 0 and self.calculate_value(current_instruction.condition):
self.add_instructions_list(current_instruction.instructions)
else:
self.goto_next_instruction()
elif isinstance(current_instruction, AlgIf):
if self.calculate_value(current_instruction.condition):
instructions = current_instruction.true_instructions
else:
instructions = current_instruction.false_instructions
if len(instructions) > 0:
self.add_instructions_list(instructions)
else:
self.goto_next_instruction()
elif isinstance(current_instruction, AlgAssignment):
self.variables[current_instruction.identifier] = self.calculate_value(current_instruction.expression)
self.goto_next_instruction()
def calculate(self):
while not self.finished():
self.execute_current_instruction()
if self.return_value is None:
raise RuntimeException("Algorithm {0} has no return value. Did you forget to use return instruction?"
.format(self.algorithm_name))
return self.return_value | PypiClean |
/LDtoolsets-0.0.14.tar.gz/LDtoolsets-0.0.14/nbs/01_Sumstat.ipynb | ```
# default_exp sumstat
```
# Sumstat module
> read and extract Sumstat
```
#hide
from nbdev.showdoc import *
#export
import yaml
import numpy as np
import pandas as pd
from scipy.stats import norm
from LDtools.utils import *
#export
def p2z(pval,beta,twoside=True):
if twoside:
pval = pval/2
z=np.abs(norm.ppf(pval))
ind=beta<0
z[ind]=-z[ind]
return z
class Sumstat:
def __init__(self,sumstat_path,config_file=None,rename=True):
self.ss = self.read_sumstat(sumstat_path,config_file,rename)
def __repr__(self):
return "sumstat:% s" % (self.ss)
#functions to read sumstats
def read_sumstat(self,file, config_file,rename):
if config_file is not None:
config_file = yaml.safe_load(open(config_file, 'r'))
return read_sumstat(file,config_file,rename)
def extractbyregion(self,region):
sumstats = self.ss
idx = (sumstats.CHR == region[0]) & (sumstats.POS >= region[1]) & (sumstats.POS <= region[2])
print('this region',region,'has',sum(idx),'SNPs in Sumstat')
self.ss = sumstats[idx]
def extractbyvariants(self,variants,notin=False):
idx = self.ss.SNP.isin(variants)
if notin:
idx = idx == False
#update sumstats
self.ss = self.ss[idx]
def calculateZ(self):
self.ss['Z'] = list(p2z(self.ss.P,self.ss.BETA))
def match_ss(self,bim):
self.ss = check_ss1(self.ss,bim)
#export
def read_sumstat(file, config,rename=True):
try:
sumstats = pd.read_csv(file, compression='gzip', header=0, sep='\t', quotechar='"')
except:
sumstats = pd.read_csv(file, header=0, sep='\t', quotechar='"')
if config is not None:
try:
ID = config.pop('ID').split(',')
sumstats = sumstats.loc[:,list(config.values())]
sumstats.columns = list(config.keys())
sumstats.index = namebyordA0_A1(sumstats[ID],cols=ID)
except:
raise ValueError(f'According to config_file, input summary statistics should have the following columns: %s' % list(config.values()))
sumstats.columns = list(config.keys())
if rename:
sumstats.SNP = 'chr'+sumstats.CHR.astype(str) + ':' + sumstats.POS.astype(str) + ':' + sumstats.A0.astype(str) + ':' + sumstats.A1.astype(str)
sumstats.CHR = sumstats.CHR.astype(int)
sumstats.POS = sumstats.POS.astype(int)
return sumstats
```
| PypiClean |
/NAStools-0.2.1.tar.gz/NAStools-0.2.1/src/nastools/ict.py |
import datetime
# Functions for working with ICARTT files
# TODO: create similar to parse NAS header; there are a few subtle differences between the NAS and ICT, so a seperate nas function is best
# Operates on a Naspy.header object
def parse_header(self):
"""Parses an ICT header and returns a naspy.header object. """
# get the filetype and specify the seperator
f = self._fileObj_
self.HEADER_LINES, self.FFI = map(int, f.readline().split(','))
self.PI = f.readline().strip()
self.ORGANIZATION = f.readline().strip()
self.DATA_DESCRIPTION = f.readline().strip()
self.MISSION = f.readline().strip()
self.FILE_VOL_NO, self.NO_VOL = map(int, f.readline().split(','))
i = map(int, f.readline().split(','))
self.START_UTC = datetime.datetime(i[0],i[1],i[2]) # UTC date when data begin
self.REV_UTC = datetime.date(i[3],i[4],i[5]) # UTC date of data red or rev
self.DATA_INTERVAL = float(f.readline())
# Generate a dictionary for INDEPENDENT_VARIABLE
j = f.readline().strip().split(',') # Read Indepent_Variable line
for k in range(len(j),3): # Ensure all fields are filled
try:
j[k] = None
except IndexError:
j.append("None")
self.INDEPENDENT_VARIABLE = {'NAME':j[0], 'UNITS':j[1], 'DESC':j[2]}
self.TOTAL_NUM_VARIABLES = int(f.readline().strip())+1
self.SCALE_FACTORS = self.SCALE_FACTORS = map(float, f.readline().split(','))
# self.MISSING_DATA_FLAGS = map(float, f.readline().split(','))
self.MISSING_DATA_FLAGS = f.readline().strip().replace(" ","").split(',')
# Create a dictionary for dependent variables
DEP_VAR = []
for i in range(1,self.TOTAL_NUM_VARIABLES):
j = f.readline().strip().split(',')
for k in range(len(j),3): # Ensure all fields are filled
try:
j[k] = None
except IndexError:
j.append("None") # must be a string
DEP_VAR.append({'NAME':j[0], 'UNITS':j[1], 'DESC':j[2]})
self.DEPENDENT_VARIABLE = DEP_VAR
self.SPECIAL_COMMENT_LINES = int(f.readline().strip())
SPECIAL_COMMENTS = []
for i in range(self.SPECIAL_COMMENT_LINES):
SPECIAL_COMMENTS.append(f.readline().strip())
self.SPECIAL_COMMENTS = SPECIAL_COMMENTS
self.NORMAL_COMMENT_LINES = int(f.readline().strip())
NORMAL_COMMENTS = []
for i in range(self.NORMAL_COMMENT_LINES):
NORMAL_COMMENTS.append(f.readline().strip())
self.NORMAL_COMMENTS = NORMAL_COMMENTS
if len(NORMAL_COMMENTS) != 0:
parse_normal_comments(self)
# Get column variables from last line of file
COL_VARS = NORMAL_COMMENTS[-1].strip().split(',') # last column line
if len(COL_VARS) == self.TOTAL_NUM_VARIABLES:
self.COLUMN_VARIABLES = COL_VARS
self.NORMAL_COMMENTS.pop() # pop off the variable names, since they're here
return None
def parse_normal_comments(self):
for i in self.NORMAL_COMMENTS:
comment = i.split(':',1)
if len(comment) == 2:
setattr(self, comment[0].upper().strip(), comment[1].strip())
return None | PypiClean |
/ExtensysPlots-1.0.1.tar.gz/ExtensysPlots-1.0.1/README.md | # Extensys Plots
[](https://doi.org/10.5281/zenodo.4572436)
Matplotlib extensys style for making figures
This repo has Matplotlib Extensys style to format your figure for scientific publications and presentation.
## Getting Started
The easist way to install ExtensysPlots is to use [pip](https://pip.pypa.io/en/stable/):
```
# to install the latest release (from PyPI)
pip install ExtensysPlots
# in Ubuntu/Debian
python3 -m pip install ExtensysPlots
# to install latest commit (from GitHub)
pip install git+https://github.com/mcekwonu/ExtensysPlots.git
```
The pip installation will automatically move all of the Matplotlib style files ```*.mplstyle``` into the appropriate directory on your computer.
Please see the [FAQ](https://github.com/mcekwonu/ExtensysPlots#faq) section for more information and troubleshooting.
## Using the Style
"extensys" is the main style from this repo. Whenever you want to use it, simply add the following to the top of your python script:
```python
import matplotlib.pyplot as plt
plt.style.use('extensys')
```
To use any of the styles temporarily, you can use:
```python
with plt.style.context(['extensys']):
plt.figure()
plt.plot(x, y)
plt.show()
```
The default format to save figure is ```.png``` with ```dpi=500```. Other formats by obtained by passing it in the ```plt.savefig``` as well as the ```dpi```. For example:
```python
plt.savefig("figures/fig1" + ".pdf", dpi=1000)
```
## Help and Contribution
Please feel free to contribute to the ExtensysPlots repo! Before starting a new style or making any changes, please create an issue through the [GitHub issue tracker](https://github.com/mcekwonu/ExtensysPlots/issues).
If you need any help with ExtensysPlots, please first check the [FAQ](https://github.com/mcekwonu/ExtensysPlots#faq) and search through the [previous GitHub issues](https://github.com/mcekwonu/ExtensysPlots/issues). If you can't find an answer, create a new issue through the [GitHub issue tracker](https://github.com/mcekwonu/ExtensysPlots/issues).
You can checkout [Matplotlib's documentation](https://matplotlib.org) for more information on plotting settings.
## FAQ
1. Installing ExtensysPlots manually
* If you like, you can install the ```*.mplstyle``` files manually. First, clone the repository and then copy all of the ```*.mplstyle``` files into your Matplotlib style directory.
If you're not sure where this is, in an interactive python console type:
```python
import matplotlib
print(matplotlib.get_configdir())
```
In my case it returned ```/home/mce/.config/matplotlib```
* You should get back something like ```/home/mce/.config/matplotlib```. You would then put the ```*.mplstyle``` files in ```/home/mce/.config/matplotlib/stylelib/``` (you need to create the stylelib directory):
```python
cp styles/*.mplstyle ~/.config/matplotlib/stylelib/
```
2. Using different fonts:
* ExtensysPlots uses the default sans-serif font. If you would like to specify a different font, you can use:
```python
import matplotlib.pyplot as plt
plt.style.use('extensys')
plt.rcParams.update({
"font.family": "serif", # specify font family here
"font.serif": ["Times"], # specify font here
"font.size":12}) # specify font size here
```
3. Adjusting the legend placement:
* You can adjust the legend borderpad when you have more than four legend parameters, for proper placement. You will need to try different values manually and see that it is placed correctly.
```python
import matplotlib.pyplot as plt
plt.style.use('extensys')
plt.rcParams.update({"legend.borderaxespad": -4.0})
```
4. Installing ExtensysPlots within Google Colab, IPython, Jupyter Notebooks, etc.:
* After installing ExtensysPlots within one of these environments, you may need to reload the Matplotlib style library. For example:
```python
!pip install ExtensysPlots
import matplotlib.pyplot as plt
plt.style.reload_library()
plt.style.use('extensys')
```
## ExtensysPlots in Academic Papers
If you use ```ExtensysPlots``` in your paper/thesis, feel free to add it to the list!
## Citation
You don't have to cite ExtensysPlots if you use it but it's nice if you do:
```latex
@article{ExtensysPlots,
author = {Michael Chukwuemeka Ekwonu},
title = {{mcekwonu/ExtensysPlots}},
month = {mar},
year = {2021},
publisher = {},
version = {1.0.0},
doi = {10.5281/zenodo.4572436},
url = {https://doi.org/10.5281/zenodo.4572436}
}
```
## License
[MIT](https://choosealicense.com/licenses/mit/)
| PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap/site/content/docs/5.0/components/accordion.md | ---
layout: docs
title: Accordion
description: Build vertically collapsing accordions in combination with our Collapse JavaScript plugin.
group: components
aliases:
- "/components/"
- "/docs/5.0/components/"
toc: true
---
## How it works
The accordion uses [collapse]({{< docsref "/components/collapse" >}}) internally to make it collapsible. To render an accordion that's expanded, add the `.open` class on the `.accordion`.
{{< callout info >}}
{{< partial "callout-info-prefersreducedmotion.md" >}}
{{< /callout >}}
## Example
Click the accordions below to expand/collapse the accordion content.
{{< example >}}
<div class="accordion" id="accordionExample">
<div class="accordion-item">
<h2 class="accordion-header" id="headingOne">
<button class="accordion-button" type="button" data-bs-toggle="collapse" data-bs-target="#collapseOne" aria-expanded="true" aria-controls="collapseOne">
Accordion Item #1
</button>
</h2>
<div id="collapseOne" class="accordion-collapse collapse show" aria-labelledby="headingOne" data-bs-parent="#accordionExample">
<div class="accordion-body">
<strong>This is the first item's accordion body.</strong> It is shown by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="headingTwo">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#collapseTwo" aria-expanded="false" aria-controls="collapseTwo">
Accordion Item #2
</button>
</h2>
<div id="collapseTwo" class="accordion-collapse collapse" aria-labelledby="headingTwo" data-bs-parent="#accordionExample">
<div class="accordion-body">
<strong>This is the second item's accordion body.</strong> It is hidden by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="headingThree">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#collapseThree" aria-expanded="false" aria-controls="collapseThree">
Accordion Item #3
</button>
</h2>
<div id="collapseThree" class="accordion-collapse collapse" aria-labelledby="headingThree" data-bs-parent="#accordionExample">
<div class="accordion-body">
<strong>This is the third item's accordion body.</strong> It is hidden by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
</div>
{{< /example >}}
### Flush
Add `.accordion-flush` to remove the default `background-color`, some borders, and some rounded corners to render accordions edge-to-edge with their parent container.
{{< example class="bg-light" >}}
<div class="accordion accordion-flush" id="accordionFlushExample">
<div class="accordion-item">
<h2 class="accordion-header" id="flush-headingOne">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#flush-collapseOne" aria-expanded="false" aria-controls="flush-collapseOne">
Accordion Item #1
</button>
</h2>
<div id="flush-collapseOne" class="accordion-collapse collapse" aria-labelledby="flush-headingOne" data-bs-parent="#accordionFlushExample">
<div class="accordion-body">Placeholder content for this accordion, which is intended to demonstrate the <code>.accordion-flush</code> class. This is the first item's accordion body.</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="flush-headingTwo">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#flush-collapseTwo" aria-expanded="false" aria-controls="flush-collapseTwo">
Accordion Item #2
</button>
</h2>
<div id="flush-collapseTwo" class="accordion-collapse collapse" aria-labelledby="flush-headingTwo" data-bs-parent="#accordionFlushExample">
<div class="accordion-body">Placeholder content for this accordion, which is intended to demonstrate the <code>.accordion-flush</code> class. This is the second item's accordion body. Let's imagine this being filled with some actual content.</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="flush-headingThree">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#flush-collapseThree" aria-expanded="false" aria-controls="flush-collapseThree">
Accordion Item #3
</button>
</h2>
<div id="flush-collapseThree" class="accordion-collapse collapse" aria-labelledby="flush-headingThree" data-bs-parent="#accordionFlushExample">
<div class="accordion-body">Placeholder content for this accordion, which is intended to demonstrate the <code>.accordion-flush</code> class. This is the third item's accordion body. Nothing more exciting happening here in terms of content, but just filling up the space to make it look, at least at first glance, a bit more representative of how this would look in a real-world application.</div>
</div>
</div>
</div>
{{< /example >}}
### Always open
Omit the `data-bs-parent` attribute on each `.accordion-collapse` to make accordion items stay open when another item is opened.
{{< example >}}
<div class="accordion" id="accordionPanelsStayOpenExample">
<div class="accordion-item">
<h2 class="accordion-header" id="panelsStayOpen-headingOne">
<button class="accordion-button" type="button" data-bs-toggle="collapse" data-bs-target="#panelsStayOpen-collapseOne" aria-expanded="true" aria-controls="panelsStayOpen-collapseOne">
Accordion Item #1
</button>
</h2>
<div id="panelsStayOpen-collapseOne" class="accordion-collapse collapse show" aria-labelledby="panelsStayOpen-headingOne">
<div class="accordion-body">
<strong>This is the first item's accordion body.</strong> It is shown by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="panelsStayOpen-headingTwo">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#panelsStayOpen-collapseTwo" aria-expanded="false" aria-controls="panelsStayOpen-collapseTwo">
Accordion Item #2
</button>
</h2>
<div id="panelsStayOpen-collapseTwo" class="accordion-collapse collapse" aria-labelledby="panelsStayOpen-headingTwo">
<div class="accordion-body">
<strong>This is the second item's accordion body.</strong> It is hidden by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
<div class="accordion-item">
<h2 class="accordion-header" id="panelsStayOpen-headingThree">
<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#panelsStayOpen-collapseThree" aria-expanded="false" aria-controls="panelsStayOpen-collapseThree">
Accordion Item #3
</button>
</h2>
<div id="panelsStayOpen-collapseThree" class="accordion-collapse collapse" aria-labelledby="panelsStayOpen-headingThree">
<div class="accordion-body">
<strong>This is the third item's accordion body.</strong> It is hidden by default, until the collapse plugin adds the appropriate classes that we use to style each element. These classes control the overall appearance, as well as the showing and hiding via CSS transitions. You can modify any of this with custom CSS or overriding our default variables. It's also worth noting that just about any HTML can go within the <code>.accordion-body</code>, though the transition does limit overflow.
</div>
</div>
</div>
</div>
{{< /example >}}
## Accessibility
Please read the [collapse accessibility section]({{< docsref "/components/collapse#accessibility" >}}) for more information.
## Sass
### Variables
{{< scss-docs name="accordion-variables" file="scss/_variables.scss" >}}
| PypiClean |
/Epubzilla-0.1.1.tar.gz/Epubzilla-0.1.1/epubzilla/epubzilla.py | from lxml import etree
import os.path
import zipfile
PATH_TO_CONTAINER_XML = "META-INF/container.xml"
XML_NAMESPACES = {
'n':'urn:oasis:names:tc:opendocument:xmlns:container',
'opf':'http://www.idpf.org/2007/opf',
'dc':'http://purl.org/dc/elements/1.1/'}
class Epub(object):
def __init__(self):
self.filename = None
self.package = Package()
self.metadata = MetaData()
self.manifest = Manifest()
self.spine = Spine()
self.guide = Guide()
self.parts = []
self.images = []
self.css = []
self._cover = None
@property
def title(self):
"""Returns the EPUBS title"""
return self.metadata.get('title')
@property
def author(self):
"""Returns the value of the 'file-as' attribute of the first creator
listed in the manifest file. If the attribute is not present, it returns
the text value enclosed by the creator tag."""
for item in self.metadata:
if item.tag.localname == "creator":
if 'file-as' in item.tag:
return item.tag['file-as']
else:
return item.tag.text
@property
def cover(self):
if self._cover:
return self._cover
for element in self.metadata:
if element.tag.localname == 'meta' and 'name' in element.tag.attributes:
if element.tag['name'] == 'cover':
self._cover = self.manifest.getElementById(element.tag['content'])
return self._cover
return None
@staticmethod
def from_file(epub_file):
"""Creates an instance of Epub from an epub file
Accepts epub_file as the fullpath to file or a file object
"""
self = Epub()
#TODO: zipfile.ZipFile accepts a file or a fileobject.
# That seems ambiguous. We should probably create a
# separate method to create an EPUB from a file object to be more
# clear.
if (isinstance(epub_file, file)):
self.filename = file.name
if (isinstance(epub_file, str)):
self.filename = epub_file
try:
archive = zipfile.ZipFile(epub_file)
except Exception as e:
print 'Could not open zipfile "%s" \n' %self.filename
print e
# parse container.xml for full path to content.opf file
container_xml = archive.read(PATH_TO_CONTAINER_XML)
container_xml_tree = etree.fromstring(container_xml)
fullpath = container_xml_tree.xpath('n:rootfiles/n:rootfile/@full-path',
namespaces=XML_NAMESPACES)[0]
# Each major XML element in the content.opf file is mapped to its own class.
# This dict maps those classes to the XPaths that point to the corresponding XML
# element.
#
# for example: the XPath "opf:package" points to the '<package>' XML element
# which is mapped to the Package class
element_map = [ {'name': 'package',
'class': Package,
'element_xpath': '/opf:package'},
{'name': 'metadata',
'class': MetaData,
'element_xpath': '/opf:package/opf:metadata',
'sub_element_class': Element,
'sub_element_xpath': "./*"},
{'name': 'manifest',
'class': Manifest,
'element_xpath': '/opf:package/opf:manifest',
'sub_element_class': ManifestElement,
'sub_element_xpath': 'opf:item'},
{'name': 'spine',
'class': Spine,
'element_xpath': '/opf:package/opf:spine',
'sub_element_class': Element,
'sub_element_xpath': 'opf:itemref'},
{'name': 'guide',
'class': Guide,
'element_xpath': '/opf:package/opf:guide',
'sub_element_class': Element,
'sub_element_xpath': 'opf:reference',
'optional': True}]
tree = etree.fromstring(archive.read(fullpath))
for element in element_map:
try:
element_tree = tree.xpath(element['element_xpath'], namespaces=XML_NAMESPACES)[0]
except IndexError as e:
# If the element is marked as optional, just keep going if we don't find it.
if element['optional']:
continue
else:
print element
element_class = element['class']()
element_class.as_xhtml = etree.tostring(element_tree)
# Step through the attrib dict and replace each key with its localname version
# i.e. if the key is '{namespace}event', replace it with 'event'.
# There *shouldn't* be any collisions.
element_class.tag.attributes = { etree.QName(key).localname: value for key,value in element_tree.attrib.iteritems() }
element_class.tag.localname = etree.QName(element_tree).localname
element_class.tag.namespace = etree.QName(element_tree).namespace
element_class.text = element_tree.text
if 'sub_element_class' in element:
sub_element_tree = element_tree.xpath(element['sub_element_xpath'], namespaces=XML_NAMESPACES)
for k in sub_element_tree:
sub_element_class = element['sub_element_class']()
sub_element_class.as_xhtml = etree.tostring(k)
sub_element_class.tag.attributes = { etree.QName(key).localname: value for key,value in k.attrib.iteritems() }
sub_element_class.tag.localname = etree.QName(k.tag).localname
sub_element_class.tag.namespace = etree.QName(k.tag).namespace
sub_element_class.tag.text = k.text
element_class.append(sub_element_class)
# if we just created a ManifestElement, we need to additionally
# pass it a reference to the epub archive and the dirname
# contained in the fullpath in order for it to access the file
# it points to
if type(sub_element_class) == ManifestElement:
# fullpath is the path to the content.opf file.
# This should also be the path to the manifest item files.
sub_element_class.basedir = os.path.dirname(fullpath)
sub_element_class.archive = archive
# Assigns the class we just created as an attribute of the Epub object.
# The attr name is taken from the 'name' value in the element_map above.
setattr(self, element['name'], element_class)
# If we just created the spine element, we need to pass it
# a reference to the manifest. This will enable the spine element to access
# manifeset elements directly
# note: this assumes the manifest element has alreay been created
if element['name'] == 'spine':
self.spine.manifest = self.manifest
# read in the items from the manifest
for element in self.manifest:
if element.isDocument():
pass
if element.isImage():
self.images.append(element)
if element.isCSS():
self.css.append(element)
if element.isTOC():
pass
# create an array called parts that references elements
# listed in the spine
for itemref in self.spine.list:
self.parts.append(self.manifest.getElementById(itemref.tag.attributes['idref']))
return self
class Tag(object):
def __init__(self):
self.localname = ""
self.namespace = ""
self.attributes = {}
self._text = ""
def __repr__(self):
return "class <Epub.Tag>"
@property
def text(self):
return self._text
@text.setter
def text(self, value):
if value == None:
self._text = ""
else:
self._text = value
@property
def name(self):
return '{%s}%s' %(self.namespace, self.localname)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
def iteritems(self):
return self.attributes.iteritems()
def __contains__(self, key):
return key in self.attributes
def iterkeys(self):
return self.attributes.iterkeys()
class Element(object):
def __init__(self):
self.as_xhtml = ""
self.tag = Tag()
self.list = []
def __repr__(self):
return "class <Epub.Element>"
def append(self, i):
self.list.append(i)
def __len__(self):
return len(self.list)
def __getitem__(self, key):
return self.list[key]
def __setitem__(self, key, value):
self.list[key] = value
def __getattr__(self, name):
newlist = []
for element in self.list:
if element.tag.localname == name:
newlist.append(element)
return newlist
class ManifestElement(Element):
"""A class representing the 'item' XHTML element in an epub manifest.
"""
def __init__(self):
"""
ManifestElement constructor
@data_member archive: an instance of the epub archive as returned by
zipfile.Zipfile(file.epub).
@data_member dirname: dirname contained in the epub's fullpath
attribute found in the container.xml file. The fullpath is the path
to the epub content.opf file.
"""
super(ManifestElement, self).__init__()
self.archive = None
self.basedir = ""
def isImage(self):
return 0 in [x.find('image') for x in self.tag.attributes.values()]
def isDocument(self):
return 0 in [x.find('application/xhtml+xml') for x in self.tag.attributes.values()]
def isTOC(self):
return 0 in [x.find('application/x-dtbncx+xml') for x in self.tag.attributes.values()]
def isCSS(self):
return 0 in [x.find('text/css') for x in self.tag.attributes.values()]
def get_file(self):
"""
@return: the file referenced in the manifest as an str
"""
return self.archive.read(self.basedir
+ "/"
+ self.tag['href'])
def get_file_stripped(self):
if self.isDocument():
# strip all tags from the xhtml
xhtml = self.get_file()
tree = etree.fromstring(xhtml)
return tree.xpath("string()")
else:
raise Exception("Element is not an XHTML document")
class Package(Element):
"""A class representing the package XHTML element found in the contents.opf
file of an epub
"""
def __init__(self):
super(Package, self).__init__()
def __repr__(self):
return "class <Epub.Package>"
class MetaData(Element):
"""A class representing the metadata XHTML element found in the contents.opf
file of an epub
"""
def __init__(self):
super(MetaData, self).__init__()
def __repr__(self):
return "class <Epub.MetaData>"
def get(self, tag):
for element in self.list:
if tag == element.tag.localname:
return element.tag.text
class Spine(Element):
"""A class representing the spine XHTML element found in the contents.opf
file of an epub
"""
def __init__(self):
super(Spine, self).__init__()
self.manifest = None
def __repr__(self):
return "class <Epub.Spine>"
def get_manifest_element(self, element):
for item in self.manifest:
if element.tag['idref'] == item.tag['id']:
return item
return None
class Guide(Element):
"""A class representing the guide XHTML element found in the contents.opf
file of an epub
"""
def __init__(self):
super(Guide, self).__init__()
def __repr__(self):
return "class <Epub.Guide>"
class Manifest(Element):
"""A class representing the manifest XHTML element found in the contents.opf
file of an epub
"""
def __init__(self):
super(Manifest, self).__init__()
def __repr__(self):
return "class <Epub.Manifest>"
def getElementById(self, element_id):
for element in self.list:
if element.tag['id'] == element_id:
return element
raise Exception("Could not find element with id=%s in the manifest" %element_id) | PypiClean |
/Delorean-1.0.0.tar.gz/Delorean-1.0.0/docs/quickstart.rst | Usage
=====
`Delorean` aims to provide you with convient ways to get significant dates and times and easy ways to move dates from state to state.
In order to get the most of the documentation we will define some terminology.
1. **naive datetime** -- a datetime object without a timezone.
2. **localized datetime** -- a datetime object with a timezone.
3. **localizing** -- associating a naive datetime object with a timezone.
4. **normalizing** -- shifting a localized datetime object from one timezone to another, this changes both tzinfo and datetime object.
Making Some Time
^^^^^^^^^^^^^^^^
Making time with `delorean` is much easier than in life.
Start with importing delorean::
>>> from delorean import Delorean
Now lets create a create `datetime` with the current datetime and UTC timezone
::
>>> d = Delorean()
>>> d
Delorean(datetime=datetime.datetime(2013, 1, 12, 6, 10, 33, 110674), timezone='UTC')
Do you want to normalize this timezone to another timezone? Simply do the following
::
>>> d = d.shift("US/Eastern")
>>> d
Delorean(datetime=datetime.datetime(2013, 1, 12, 1, 10, 38, 102223), timezone='US/Eastern')
Now that you have successfully shifted the timezone you can easily return a localized datetime object or date with ease.
::
>>> d.datetime
datetime.datetime(2013, 1, 12, 01, 10, 38, 102223, tzinfo=<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>)
>>> d.date
datetime.date(2013, 1, 12)
For the purists out there you can do things like so.
::
>>> d.naive
datetime.datetime(2013, 1, 12, 1, 10, 38, 102223)
>>> d.epoch
1357971038.102223
You can also create Delorean object using unix timestamps.
::
from delorean import epoch
>>> epoch(1357971038.102223).shift("US/Eastern")
Delorean(datetime=datetime.datetime(2013, 1, 12, 1, 10, 38, 102223), timezone='US/Eastern')
As you can see `delorean` returns a Delorean object which you can shift to the appropriate timezone to get back your original datetime object from above.
.. note::
If you are comparing Delorean objects the time since epoch will be used internally
for comparison. This allows for the greatest accuracy when comparing Delorean
objects from different timezones!
`Delorean` also now accepts localized datetimes. This means if you had a previously localized datetime object, Delorean will now accept these values and set the associated timezone and datetime information on the Delorean object.
.. note::
If you pass in a timezone with a localized datetime the timezone will be ignored, since the datetime object you are passing already has timezone information already associated with it.
::
>>> tz = timezone("US/Pacific")
>>> dt = tz.localize(datetime.utcnow())
datetime.datetime(2013, 3, 16, 5, 28, 11, 536818, tzinfo=<DstTzInfo 'US/Pacific' PDT-1 day, 17:00:00 DST>)
>>> d = Delorean(datetime=dt)
>>> d
Delorean(datetime=datetime.datetime(2013, 3, 16, 5, 28, 11, 536818), timezone='US/Pacific')
>>> d = Delorean(datetime=dt, timezone="US/Eastern")
>>> d
Delorean(datetime=datetime.datetime(2013, 3, 16, 5, 28, 11, 536818), timezone='US/Pacific')
Time Arithmetic
^^^^^^^^^^^^^^^
`Delorean` can also handle timedelta arithmetic. A timedelta may be added to or subtracted from a `Delorean` object.
Additionally, you may subtract a `Delorean` object from another Delorean object to obtain the timedelta between them.
::
>>> d = Delorean()
>>> d
Delorean(datetime=datetime.datetime(2014, 6, 3, 19, 22, 59, 289779), timezone='UTC')
>>> d += timedelta(hours=2)
>>> d
Delorean(datetime=datetime.datetime(2014, 6, 3, 21, 22, 59, 289779), timezone='UTC')
>>> d - timedelta(hours=2)
Delorean(datetime=datetime.datetime(2014, 6, 3, 19, 22, 59, 289779), timezone='UTC')
>>> d2 = d + timedelta(hours=2)
>>> d2 - d
datetime.timedelta(0, 7200)
`Delorean` objects are considered equal if they represent the same time in UTC.
::
>>> d1 = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d2 = Delorean(datetime(2015, 1, 1, 8), timezone='UTC')
>>> d1 == d2
True
Natural Language
^^^^^^^^^^^^^^^^
`Delorean` provides many ways to get certain date relative to another, often getting something simple like the next year or the next thursday can be quite troublesome.
`Delorean` provides several conveniences for this type of behaviour. For example if you wanted to get next Tuesday from today you would simply do the following
::
>>> d = Delorean()
>>> d
Delorean(datetime=datetime.datetime(2013, 1, 20, 19, 41, 6, 207481), timezone='UTC')
>>> d.next_tuesday()
Delorean(datetime=datetime.datetime(2013, 1, 22, 19, 41, 6, 207481), timezone='UTC')
Last Tuesday? Two Tuesdays ago at midnight? No problem.
::
>>> d.last_tuesday()
Delorean(datetime=datetime.datetime(2013, 1, 15, 19, 41, 6, 207481), timezone='UTC')
>>> d.last_tuesday(2).midnight
datetime.datetime(2013, 1, 8, 0, 0, tzinfo=<UTC>)
Replace Parts
^^^^^^^^^^^^^
Using the `replace` method on `Delorean` objects, we can replace the `hour`, `minute`, `second`, `year` etc
like the the `replace` method on `datetime`.
::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
Truncation
^^^^^^^^^^
Often we dont care how many milliseconds or even seconds that are present in our datetime object. For example it is a nuisance to retrieve `datetimes` that occur in the same minute. You would have to go through the annoying process of replacing zero for the units you don't care for before doing a comparison.
`Delorean` comes with a method that allows you to easily truncate to different unit of time: millisecond, second, minute, hour, etc.
::
>>> d = Delorean()
>>> d
Delorean(datetime=datetime.datetime(2013, 1, 21, 3, 34, 30, 418069), timezone='UTC')
>>> d.truncate('second')
Delorean(datetime=datetime.datetime(2013, 1, 21, 3, 34, 30), timezone='UTC')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2013, 1, 21, 3, 0), timezone='UTC')
Though it might seem obvious `delorean` also provides truncation to the month and year levels as well.
::
>>> d = Delorean(datetime=datetime(2012, 5, 15, 03, 50, 00, 555555), timezone="US/Eastern")
>>> d
Delorean(datetime=datetime.datetime(2012, 5, 15, 3, 50, 0, 555555), timezone='US/Eastern')
>>> d.truncate('month')
Delorean(datetime=datetime.datetime(2012, 5, 1), timezone='US/Eastern')
>>> d.truncate('year')
Delorean(datetime=datetime.datetime(2012, 1, 1), timezone='US/Eastern')
Strings and Parsing
^^^^^^^^^^^^^^^^^^^
Another pain is dealing with strings of datetimes. `Delorean` can help you parse all the datetime strings you get from various APIs.
::
>>> from delorean import parse
>>> parse("2011/01/01 00:00:00 -0700")
Delorean(datetime=datetime.datetime(2011, 1, 1, 7), timezone='UTC')
As shown above if the string passed has offset data `delorean` will convert the resulting object to UTC, if there is no timezone information passed in UTC is assumed.
Ambiguous cases
"""""""""""""""
There might be cases where the string passed to parse is a bit ambiguous for example. In the case where `2013-05-06` is passed is this May 6th, 2013 or is June 5th, 2013?
`Delorean` makes the assumptions that ``dayfirst=True`` and ``yearfirst=True`` this will lead to the following precedence.
If dayfirst is True and yearfirst is True:
- YY-MM-DD
- DD-MM-YY
- MM-DD-YY
So for example with default parameters `Delorean` will return '2013-05-06' as May 6th, 2013.
::
>>> parse("2013-05-06")
Delorean(datetime=datetime.datetime(2013, 5, 6), timezone='UTC')
Here are the precedence for the remaining combinations of ``dayfirst`` and ``yearfirst``.
If dayfirst is False and yearfirst is False:
- MM-DD-YY
- DD-MM-YY
- YY-MM-DD
If dayfirst is True and yearfirst is False:
- DD-MM-YY
- MM-DD-YY
- YY-MM-DD
If dayfirst is False and yearfirst is True:
- YY-MM-DD
- MM-DD-YY
- DD-MM-YY
Making A Few Stops
^^^^^^^^^^^^^^^^^^
Delorean wouldn't be complete without making a few stop in all the right places.
::
>>> import delorean
>>> from delorean import stops
>>> for stop in stops(freq=delorean.HOURLY, count=10): print stop
...
Delorean(datetime=datetime.datetime(2013, 1, 21, 6, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 7, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 8, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 9, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 10, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 11, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 12, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 13, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 14, 25, 33), timezone='UTC')
Delorean(datetime=datetime.datetime(2013, 1, 21, 15, 25, 33), timezone='UTC')
This allows you to do clever composition like daily, hourly, etc. This method is a generator that produces `Delorean` objects. Excellent for things like getting every Tuesday for the next 10 weeks, or every other hour for the next three months.
With Power Comes
""""""""""""""""
Now that you can do this you can also specify ``timezones`` as well ``start`` and ``stop`` dates for iteration.
::
>>> import delorean
>>> from delorean import stops
>>> from datetime import datetime
>>> d1 = datetime(2012, 5, 06)
>>> d2 = datetime(2013, 5, 06)
.. note::
The ``stops`` method only accepts naive datetime ``start`` and ``stop`` values.
Now in the case where you provide `timezone`, `start`, and `stop` all is good in the world!
::
>>> for stop in stops(freq=delorean.DAILY, count=10, timezone="US/Eastern", start=d1, stop=d2): print stop
...
Delorean(datetime=datetime.datetime(2012, 5, 6), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 7), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 8), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 9), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 10), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 11), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 12), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 13), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 14), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2012, 5, 15), timezone='US/Eastern')
.. note::
if no ``start`` or ``timezone`` value is specified start is assumed to be localized UTC object. If timezone is provided
a normalized UTC to the correct timezone.
Now in the case where a naive stop value is provided you can see why the follow error occurs if you take into account the above note.
.. doctest::
:options: +SKIP
>>> for stop in stops(freq=delorean.DAILY, timezone="US/Eastern", stop=d2): print stop
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "delorean/interface.py", line 63, in stops
bysecond=None, until=until, dtstart=start):
TypeError: can't compare offset-naive and offset-aware datetimes
You will be better off in scenarios of this nature to skip using either and use count to limit the range of the values returned.
.. doctest::
:options: +SKIP
>>> from delorean import stops
>>> for stop in stops(freq=delorean.DAILY, count=2, timezone="US/Eastern"): print stop
...
Delorean(datetime=datetime.datetime(2013, 1, 22, 0, 10, 10), timezone='US/Eastern')
Delorean(datetime=datetime.datetime(2013, 1, 23, 0, 10, 10), timezone='US/Eastern')
| PypiClean |
/DOMinclude-1.0.1.zip/DOMinclude-1.0.1/dominclude/widgets.py | import pkg_resources
from turbogears import expose
from turbogears.widgets import CSSLink, JSLink, Widget, WidgetDescription, \
register_static_directory, JSSource
static_dir = pkg_resources.resource_filename("dominclude",
"static")
register_static_directory("dominclude", static_dir)
dominc_css = CSSLink("dominclude", "css/DOMinclude.css")
dominc_js = JSLink("dominclude", "javascript/DOMinclude.js")
class DOMincConfig(JSSource):
"""Configuration for DOMinclude. When creating this widget,
you can specify frame_size (example '[320,180]' for 320 width,
and 180 height), display_prefix which is shown when the popup
is displayed (default "Hide "), popup_class, open_popup_link_class,
image_types (default "jpg|JPG|JPEG|jpeg|gif|GIF|png|PNG") and
trigger_class (the name of the CSS class that is used to find links
to change).
"""
def __init__(self, trigger_class="DOMpop", popup_class="popup",
open_popup_link_class="popuplink", display_prefix="Hide ",
image_types="jpg|JPG|JPEG|jpeg|gif|GIF|png|PNG",
frame_size="[320,180]"):
src = """
DOMinccfg={
// CSS classes
// trigger DOMinclude
triggerClass:'%s',
// class of the popup
popupClass:'%s',
// class of the link when the popup
// is open
openPopupLinkClass:'%s',
// text to add to the link when the
// popup is open
displayPrefix:'%s',
// filter to define which files should
// not open in an iframe
imagetypes:'%s',
// dimensions of the popup
frameSize:%s
}
""" % (trigger_class, popup_class, open_popup_link_class, display_prefix,
image_types, frame_size)
super(DOMincConfig, self).__init__(src)
class DOMincConfigDesc(WidgetDescription):
for_widget = DOMincConfig()
template = "<div>Configuration for the DOMinclude widget.</div>"
full_class_name = "dominclude.DOMincConfig"
default_config = DOMincConfig()
class DOMinclude(Widget):
"""Creates a DOM-based "popup" window when a link is clicked.
You can pass in a DOMincConfig instance as 'config' to change
the settings. You can also pass in your own CSS. You must only
use one DOMincConfig per page, otherwise you will get
unpredictable results.
"""
params = ["href", "text"]
params_doc = dict(href="URL of the resource to display on click",
text="Text of the link to be displayed")
template = """<a href="${href}" class="DOMpop">${text}</a>"""
def __init__(self, config=default_config, css=dominc_css, **params):
if isinstance(css, Widget):
css = [css]
self.css = css
self.javascript = [config, dominc_js]
super(DOMinclude, self).__init__(**params)
class DOMincludeDesc(WidgetDescription):
for_widget = DOMinclude()
template = """<div>Need to do a
${for_widget.display(href='http://www.google.com/', text='Google search')}?
How about the ${for_widget.display(href='dominclude.DOMinclude/answer', text='answer')} to the
ultimate question of life, the universe and everything?
</div>
"""
full_class_name = "dominclude.DOMinclude"
@expose()
def answer(self):
return "42" | PypiClean |
/CloseableQueue-0.9.1.1.tar.gz/CloseableQueue-0.9.1.1/test/__init__.py | from CloseableQueue import CloseableQueue, Closed
from CloseableQueue import CloseableLifoQueue, CloseablePriorityQueue
from test_queue import BlockingTestMixin, BaseQueueTest
from test_queue import FailingQueue, FailingQueueTest
import unittest
# Because the method queue_test.BaseQueueTest.simple_queue_test
# uses the queue class name,
# it has to be the name of one of the Queue classes.
# In order to avoid Heisenbugs, we don't create new classes;
# we just rename the existing ones during the test.
def base_queue_class_name(cls):
"""Provide the base queue class name by removing "*able"."""
cls_name = cls.__name__
return cls_name[cls_name.index('able') + 4:]
class RenamingBaseQueueTest(BaseQueueTest):
"""Rename Queue.*-derived class instances for the duration of the tests."""
def setUp(self):
assert not hasattr(self, '_old_typename')
self._old_typename = self.type2test.__name__
self.type2test.__name__ = base_queue_class_name(self.type2test)
super(RenamingBaseQueueTest, self).setUp()
def tearDown(self):
super(RenamingBaseQueueTest, self).tearDown()
self.type2test.__name__ = self._old_typename
del self._old_typename
class RegressionCloseableQueueTest(RenamingBaseQueueTest):
type2test = CloseableQueue
class RegressionCloseableLifoQueueTest(RenamingBaseQueueTest):
type2test = CloseableLifoQueue
class RegressionCloseablePriorityQueueTest(RenamingBaseQueueTest):
type2test = CloseablePriorityQueue
# The next two classes implement a different regression test,
# this one based on test_queue.FailingQueueTest.
class FailingCloseableQueue(CloseableQueue):
"""Derivation of CloseableQueue analogous to `test_queue.FailingQueue`."""
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
CloseableQueue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException, "You Lose"
return CloseableQueue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException, "You Lose"
return CloseableQueue._get(self)
class FailingCloseableQueueTest(FailingQueueTest):
"""Another regression test class.
test_queue doesn't implement this for the Lifo and Priority queues,
so we don't either.
"""
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingCloseableQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
# Non-regression testing code starts here, with some utility functions.
def put_iterable(q, it, putargs={}, close=-1, last=-1):
"""Puts the iterable to the queue `q`.
`last` and `close`, as positive integers,
indicate the number of puts before, respectively,
the `last` parameter is passed to `put`
or a `close` is called after `put`.
"""
for i in iter(it):
ret = q.put(i, last=last==0, **putargs)
if close == 0:
q.close(*closeargs)
close -= 1
last -= 1
return ret
def get_iterable(q, getargs={}, count=-1):
"""The converse of put_iterable; also used in test functions."""
while True:
if count == 0:
break
yield q.get(**getargs)
count -= 1
def get_tuple(q, getargs={}, count=-1):
"""Wrapper function for get_iterable to be passed to threads and such."""
return tuple(get_iterable(q, getargs, count))
class CloseableQueueTest(unittest.TestCase, BlockingTestMixin):
"""The main test suite for the closeability functionality."""
type2test = CloseableQueue
# Sorting method to accomodate Lifo/Priority queues in a more sensible way
# than that used in `test_queue.BaseTestCase`.
# This method is applied to tuples of expected values
# so they will match the result of putting and then getting those values.
tuple_sort = tuple
def setUp(self):
# set up cumulative counts for `test_join_after_close`
import threading
self.cum = 0
self.cumlock = threading.Lock()
def test_take_until_before_last(self):
"""Close the queue with `last` and then get its stored values."""
q = self.type2test()
# To ensure that the last is actually put before we start the get,
# we do this manually, without threads.
q.put(2)
q.put(1)
q.put(3, last=True)
result = get_tuple(q, {'block': False}, 3)
self.assertEqual(self.tuple_sort((2, 1, 3)), result)
def test_take_until_after_last(self):
"""`Get` after a last `put`.
Since the second `get` doesn't block, this should verify
(as well as possible) that
`put(last=True)` closes the queue atomically.
In practice this is mostly useful as a regression test, since
it's pretty obvious by reading `put` that it's working atomically.
"""
def get_then_get(q):
"""Serves to verify the atomicity of `put(last=True)`."""
q.get(timeout=2)
q.get(block=False)
q = self.type2test()
try:
self.do_exceptional_blocking_test(get_then_get, (q,),
q.put, (1, False, None, True),
Closed)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def test_put_after_last(self):
q = self.type2test()
q.put(1, last=True)
try:
q.put(2)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def test_get_after_close_on_empty_queue(self):
"""Test that `get` calls made after a `close` raise `Closed`."""
q = self.type2test()
q.close()
try:
q.get(timeout=0.1)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def test_get_after_close_on_nonempty_queue(self):
q = self.type2test()
q.put(1)
q.close()
self.assertEqual(1, q.get(block=False))
def test_put_after_close(self):
q = self.type2test()
q.close()
try:
q.put(1, timeout=0.1)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def test_close_after_get_on_empty_queue(self):
"""Test that calling `close` raises `Closed` in a blocked thread."""
q = self.type2test()
try:
self.do_exceptional_blocking_test(q.get, (True, 2), q.close, (),
Closed)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def test_close_after_put_on_full_queue(self):
"""This should also cause a release with a `Closed` exception."""
q = self.type2test(1)
q.put(1)
try:
self.do_exceptional_blocking_test(q.put, (2, True, 0.4),
q.close, (), Closed)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
def worker(self, q):
"""Worker based on `test_queue.BaseQueueTest.worker`.
Only used for `test_join_after_close`.
"""
try:
while True:
x = q.get()
if x is None:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
except Closed:
pass
def test_join_after_close(self):
"""Based on `test_queue.BaseQueueTest.queue_join_test`."""
import threading
q = self.type2test()
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in xrange(100):
q.put(i)
q.close()
q.join()
self.assertEquals(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
try:
for i in (0,1):
q.put(None) # instruct the threads to close
except Closed:
pass
else:
self.fail('Closed exception not raised.')
q.join() # verify that you can join twice
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
class CloseableLifoQueueTest(CloseableQueueTest):
type2test = CloseableLifoQueue
tuple_sort = lambda self, it: tuple(reversed(it))
class CloseablePriorityQueueTest(CloseableQueueTest):
type2test = CloseablePriorityQueue
tuple_sort = lambda self, it: tuple(sorted(it))
class CloseableQueueIterationTest(unittest.TestCase, BlockingTestMixin):
"""Tests the `enqueue` and `dequeue` functions."""
type2test = CloseableQueue
tuple_sort = tuple
@staticmethod
def dequeue_to_tuple(q, getargs={'timeout': 0.2}, on_empty='raise'):
from CloseableQueue import dequeue
return tuple(dequeue(q, getargs, on_empty))
def do_iterable_test(self, it, q=None,
getargs={'timeout': 0.2}, putargs={'timeout': 0.2},
on_empty='raise', join=False, close=True):
"""Verifies that the iterable is the same after being en/dequeued."""
from CloseableQueue import enqueue, dequeue
if q is None:
q = self.type2test()
tup = tuple(it)
result = self.do_blocking_test(self.dequeue_to_tuple, (q, getargs, on_empty),
enqueue, (it, q, putargs, join, close))
self.assertEqual(self.tuple_sort(tup), result)
if close:
try:
q.get(timeout=0.2)
except Closed:
pass
else:
self.fail('Closed exception not raised.')
return result
def test_empty_iterable(self):
self.do_iterable_test(())
def test_nonempty_iterable(self):
self.do_iterable_test((2, 1, 3))
def test_timeout_iterable(self):
q = self.type2test()
self.do_iterable_test((2, 1, 3), q, on_empty='stop', close=False)
self.do_iterable_test((6, 4, 5), q, on_empty='stop', close=False)
self.do_iterable_test((9, 8, 7), q, on_empty='stop', close=True)
def test_EnqueueThread(self):
"""Perfunctory test of the EnqueueThread convenience function."""
from CloseableQueue import EnqueueThread
q = self.type2test()
result = self.do_blocking_test(self.dequeue_to_tuple, (q, {'timeout': 0.2}),
EnqueueThread, ((3, 1, 2), q))
self.assertEqual(self.tuple_sort((3, 1, 2)), result)
class CloseableLifoQueueIterationTest(CloseableQueueIterationTest):
type2test = CloseableLifoQueue
tuple_sort = lambda self, it: tuple(reversed(it))
class CloseablePriorityQueueIterationTest(CloseableQueueIterationTest):
type2test = CloseablePriorityQueue
tuple_sort = lambda self, it: tuple(sorted(it))
def make_test_suite():
from unittest import TestSuite, defaultTestLoader
from itertools import chain
load = defaultTestLoader.loadTestsFromTestCase
regression_cases = (RegressionCloseableQueueTest,
RegressionCloseableLifoQueueTest,
RegressionCloseablePriorityQueueTest,
FailingCloseableQueue)
regression_suite = TestSuite(load(case) for case in regression_cases)
closeability_cases = (CloseableQueueTest,
CloseableLifoQueueTest,
CloseablePriorityQueueTest)
iteration_cases = (CloseableQueueIterationTest,
CloseableLifoQueueIterationTest,
CloseablePriorityQueueIterationTest)
new_functionality_cases = chain(closeability_cases, iteration_cases)
new_functionality_suite = TestSuite(load(case)
for case in new_functionality_cases)
return TestSuite((regression_suite, new_functionality_suite))
def test_main():
from unittest import TextTestRunner
TextTestRunner().run(make_test_suite())
if __name__ == "__main__":
test_main() | PypiClean |
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/util.py | import logging
import os
import inspect
import datetime
from contextlib import contextmanager
import pkg_resources
from enum import Enum as _Enum
from functools import wraps, partial
from operator import attrgetter
from itertools import chain, count, repeat
from collections import OrderedDict, namedtuple
import warnings
# Exposed here for convenience. Prefer patching to try-finally blocks
from unittest.mock import patch # pylint: disable=unused-import
# Backwards-compat
from Orange.data.util import scale # pylint: disable=unused-import
log = logging.getLogger(__name__)
class OrangeWarning(UserWarning):
pass
class OrangeDeprecationWarning(OrangeWarning, DeprecationWarning):
pass
warnings.simplefilter('default', OrangeWarning)
if os.environ.get('ORANGE_DEPRECATIONS_ERROR'):
warnings.simplefilter('error', OrangeDeprecationWarning)
def _log_warning(msg):
"""
Replacement for `warnings._showwarnmsg_impl` that logs the warning
Logs the warning in the appropriate list, or passes it to the original
function if the warning wasn't issued within the log_warnings context.
"""
for frame in inspect.stack():
if frame.frame in warning_loggers:
warning_loggers[frame.frame].append(msg)
break
else:
__orig_showwarnmsg_impl(msg)
@contextmanager
def log_warnings():
"""
logs all warnings that occur within context, including warnings from calls.
```python
with log_warnings() as warnings:
...
```
Unlike `warnings.catch_warnings(record=True)`, this manager is thread-safe
and will only log warning from this thread. It does so by storing the
stack frame within which the context is created, and then checking the
stack when the warning is issued.
Nesting of `log_warnings` within the same function will raise an error.
If `log_wanings` are nested within function calls, the warning is logged
in the inner-most context.
If `catch_warnings` is used within the `log_warnings` context, logging is
disabled until the `catch_warnings` exits. This looks inevitable (without
patching `catch_warnings`, which I'd prefer not to do).
If `catch_warnings` is used outside this context, everything, including
warning filtering, should work as expected.
Note: the method imitates `catch_warnings` by patching the `warnings`
module's internal function `_showwarnmsg_impl`. Python (as of version 3.9)
doesn't seem to offer any other way of catching the warnings. This function
was introduced in Python 3.6, so we cover all supported versions. If it is
ever removed, unittests will crash, so we'll know. :)
"""
# currentframe().f_back is `contextmanager`'s __enter__
frame = inspect.currentframe().f_back.f_back
if frame in warning_loggers:
raise ValueError("nested log_warnings")
try:
warning_loggers[frame] = []
yield warning_loggers[frame]
finally:
del warning_loggers[frame]
# pylint: disable=protected-access
warning_loggers = {}
__orig_showwarnmsg_impl = warnings._showwarnmsg_impl
warnings._showwarnmsg_impl = _log_warning
def resource_filename(path):
"""
Return the resource filename path relative to the Orange package.
"""
return pkg_resources.resource_filename("Orange", path)
def get_entry_point(dist, group, name):
"""
Load and return the entry point from the distribution.
Unlike `pkg_resources.load_entry_point`, this function does not check
for requirements. Calling this function is preferred because of developers
who experiment with different versions and have inconsistent configurations.
"""
dist = pkg_resources.get_distribution(dist)
ep = dist.get_entry_info(group, name)
return ep.resolve()
def deprecated(obj):
"""
Decorator. Mark called object deprecated.
Parameters
----------
obj: callable or str
If callable, it is marked as deprecated and its calling raises
OrangeDeprecationWarning. If str, it is the alternative to be used
instead of the decorated function.
Returns
-------
f: wrapped callable or decorator
Returns decorator if obj was str.
Examples
--------
>>> @deprecated
... def old():
... return 'old behavior'
>>> old() # doctest: +SKIP
/... OrangeDeprecationWarning: Call to deprecated ... old ...
'old behavior'
>>> class C:
... @deprecated('C.new()')
... def old(self):
... return 'old behavior'
... def new(self):
... return 'new behavior'
>>> C().old() # doctest: +SKIP
/... OrangeDeprecationWarning: Call to deprecated ... C.old ...
Instead, use C.new() ...
'old behavior'
"""
alternative = ('; Instead, use ' + obj) if isinstance(obj, str) else ''
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
name = '{}.{}'.format(
func.__self__.__class__,
func.__name__) if hasattr(func, '__self__') else func
warnings.warn('Call to deprecated {}{}'.format(name, alternative),
OrangeDeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator if alternative else decorator(obj)
def literal_eval(literal):
import ast
# ast.literal_eval does not parse empty set ¯\_(ツ)_/¯
if literal == "set()":
return set()
return ast.literal_eval(literal)
op_map = {
'==': lambda a, b: a == b,
'>=': lambda a, b: a >= b,
'<=': lambda a, b: a <= b,
'>': lambda a, b: a > b,
'<': lambda a, b: a < b
}
_Requirement = namedtuple("_Requirement", ["name", "op", "value"])
bool_map = {
"True": True,
"true": True,
1: True,
"False": False,
"false": False,
0: False
}
def requirementsSatisfied(required_state, local_state, req_type=None):
"""
Checks a list of requirements against a dictionary representing local state.
Args:
required_state ([str]): List of strings representing required state
using comparison operators
local_state (dict): Dictionary representing current state
req_type (type): Casts values to req_type before comparing them.
Defaults to local_state type.
"""
for req_string in required_state:
# parse requirement
req = None
for op_str in op_map:
split = req_string.split(op_str)
# if operation is not in req_string, continue
if len(split) == 2:
req = _Requirement(split[0], op_map[op_str], split[1])
break
if req is None:
log.error("Invalid requirement specification: %s", req_string)
return False
compare_type = req_type or type(local_state[req.name])
# check if local state satisfies required state (specification)
if compare_type is bool:
# boolean is a special case, where simply casting to bool does not produce target result
required_value = bool_map[req.value]
else:
required_value = compare_type(req.value)
local_value = compare_type(local_state[req.name])
# finally, compare the values
if not req.op(local_value, required_value):
return False
return True
def try_(func, default=None):
"""Try return the result of func, else return default."""
try:
return func()
except Exception: # pylint: disable=broad-except
return default
def flatten(lst):
"""Flatten iterable a single level."""
return chain.from_iterable(lst)
class Registry(type):
"""Metaclass that registers subtypes."""
def __new__(mcs, name, bases, attrs):
cls = type.__new__(mcs, name, bases, attrs)
if not hasattr(cls, 'registry'):
cls.registry = OrderedDict()
else:
cls.registry[name] = cls
return cls
def __iter__(cls):
return iter(cls.registry)
def __str__(cls):
if cls in cls.registry.values():
return cls.__name__
return '{}({{{}}})'.format(cls.__name__, ', '.join(cls.registry))
def namegen(prefix='_', *args, spec_count=count, **kwargs):
"""Continually generate names with `prefix`, e.g. '_1', '_2', ..."""
spec_count = iter(spec_count(*args, **kwargs))
while True:
yield prefix + str(next(spec_count))
def export_globals(globals, module_name):
"""
Return list of important for export globals (callables, constants) from
`globals` dict, defined in module `module_name`.
Usage
-----
In some module, on the second-to-last line:
__all__ = export_globals(globals(), __name__)
"""
return [getattr(v, '__name__', k)
for k, v in globals.items() # export
if ((callable(v) and v.__module__ == module_name # callables from this module
or k.isupper()) and # or CONSTANTS
not getattr(v, '__name__', k).startswith('_'))] # neither marked internal
_NOTSET = object()
def deepgetattr(obj, attr, default=_NOTSET):
"""Works exactly like getattr(), except that attr can be a nested attribute
(e.g. "attr1.attr2.attr3").
"""
try:
return attrgetter(attr)(obj)
except AttributeError:
if default is _NOTSET:
raise
return default
def color_to_hex(color):
return "#{:02X}{:02X}{:02X}".format(*color)
def hex_to_color(s):
return int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)
def inherit_docstrings(cls):
"""Inherit methods' docstrings from first superclass that defines them"""
for method in cls.__dict__.values():
if inspect.isfunction(method) and method.__doc__ is None:
for parent in cls.__mro__[1:]:
__doc__ = getattr(parent, method.__name__, None).__doc__
if __doc__:
method.__doc__ = __doc__
break
return cls
class Enum(_Enum):
"""Enum that represents itself with the qualified name, e.g. Color.red"""
__repr__ = _Enum.__str__
def interleave(seq1, seq2):
"""
Interleave elements of `seq2` between consecutive elements of `seq1`.
Example
-------
>>> list(interleave([1, 3, 5], [2, 4]))
[1, 2, 3, 4, 5]
>>> list(interleave([1, 2, 3, 4], repeat("<")))
[1, '<', 2, '<', 3, '<', 4]
"""
iterator1, iterator2 = iter(seq1), iter(seq2)
try:
leading = next(iterator1)
except StopIteration:
pass
else:
for element in iterator1:
yield leading
try:
yield next(iterator2)
except StopIteration:
return
leading = element
yield leading
def Reprable_repr_pretty(name, itemsiter, printer, cycle):
# type: (str, Iterable[Tuple[str, Any]], Ipython.lib.pretty.PrettyPrinter, bool) -> None
if cycle:
printer.text("{0}(...)".format("name"))
else:
def printitem(field, value):
printer.text(field + "=")
printer.pretty(value)
def printsep():
printer.text(",")
printer.breakable()
itemsiter = (partial(printitem, *item) for item in itemsiter)
sepiter = repeat(printsep)
with printer.group(len(name) + 1, "{0}(".format(name), ")"):
for part in interleave(itemsiter, sepiter):
part()
class _Undef:
def __repr__(self):
return "<?>"
_undef = _Undef()
class Reprable:
"""A type that inherits from this class has its __repr__ string
auto-generated so that it "[...] should look like a valid Python
expression that could be used to recreate an object with the same
value [...]" (see See Also section below).
This relies on the instances of type to have attributes that
match the arguments of the type's constructor. Only the values that
don't match the arguments' defaults are printed, i.e.:
>>> class C(Reprable):
... def __init__(self, a, b=2):
... self.a = a
... self.b = b
>>> C(1, 2)
C(a=1)
>>> C(1, 3)
C(a=1, b=3)
If Reprable instances define `_reprable_module`, that string is used
as a fully-qualified module name and is printed. `_reprable_module`
can also be True in which case the type's home module is used.
>>> class C(Reprable):
... _reprable_module = True
>>> C()
Orange.util.C()
>>> class C(Reprable):
... _reprable_module = 'something_else'
>>> C()
something_else.C()
>>> class C(Reprable):
... class ModuleResolver:
... def __str__(self):
... return 'magic'
... _reprable_module = ModuleResolver()
>>> C()
magic.C()
See Also
--------
https://docs.python.org/3/reference/datamodel.html#object.__repr__
"""
_reprable_module = ''
def _reprable_fields(self):
# type: () -> Iterable[Tuple[str, Any]]
cls = self.__class__
sig = inspect.signature(cls.__init__)
for param in sig.parameters.values():
# Skip self, *args, **kwargs
if param.name != 'self' and \
param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
yield param.name, param.default
def _reprable_omit_param(self, name, default, value):
if default is value:
return True
if type(default) is type(value):
try:
return default == value
except (ValueError, TypeError):
return False
else:
return False
def _reprable_items(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
warnings.simplefilter("error", PendingDeprecationWarning)
for name, default in self._reprable_fields():
try:
value = getattr(self, name)
except (DeprecationWarning, PendingDeprecationWarning):
continue
except AttributeError:
value = _undef
if not self._reprable_omit_param(name, default, value):
yield name, default, value
def _repr_pretty_(self, p, cycle):
"""IPython pretty print hook."""
module = self._reprable_module
if module is True:
module = self.__class__.__module__
nameparts = (([str(module)] if module else []) +
[self.__class__.__name__])
name = ".".join(nameparts)
Reprable_repr_pretty(
name, ((f, v) for f, _, v in self._reprable_items()),
p, cycle)
def __repr__(self):
module = self._reprable_module
if module is True:
module = self.__class__.__module__
nameparts = (([str(module)] if module else []) +
[self.__class__.__name__])
name = ".".join(nameparts)
return "{}({})".format(
name, ", ".join("{}={!r}".format(f, v) for f, _, v in self._reprable_items())
)
def wrap_callback(progress_callback, start=0, end=1):
"""
Wraps a progress callback function to allocate it end-start proportion
of an execution time.
:param progress_callback: callable
:param start: float
:param end: float
:return: callable
"""
@wraps(progress_callback)
def func(progress, *args, **kwargs):
adjusted_progress = start + progress * (end - start)
return progress_callback(adjusted_progress, *args, **kwargs)
return func
def dummy_callback(*_, **__):
""" A dummy callable. """
return 1
def utc_from_timestamp(timestamp) -> datetime.datetime:
"""
Return the UTC datetime corresponding to the POSIX timestamp.
"""
return datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + \
datetime.timedelta(seconds=float(timestamp))
# For best result, keep this at the bottom
__all__ = export_globals(globals(), __name__)
# ONLY NON-EXPORTED VALUES BELOW HERE | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/athlet1.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def athlet1(path):
"""athlet1
Data loads lazily. Type data(athlet1) into the console.
A data.frame with 118 rows and 23 variables:
- year. 1992 or 1993
- apps. # applics for admission
- top25. perc frsh class in 25 hs perc
- ver500. perc frsh >= 500 on verbal SAT
- mth500. perc frsh >= 500 on math SAT
- stufac. student-faculty ratio
- bowl. = 1 if bowl game in prev yr
- btitle. = 1 if men's cnf chmps prv yr
- finfour. = 1 if men's final 4 prv yr
- lapps. log(apps)
- d93. =1 if year = 1993
- avg500. (ver500+mth500)/2
- cfinfour. change in finfour
- clapps. change in lapps
- cstufac. change in stufac
- cbowl. change in bowl
- cavg500. change in avg500
- cbtitle. change in btitle
- lapps\_1. lapps lagged
- school. name of university
- ctop25. change in top25
- bball. =1 if btitle or finfour
- cbball. change in bball
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `athlet1.csv`.
Returns:
Tuple of np.ndarray `x_train` with 118 rows and 23 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'athlet1.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/athlet1.csv'
maybe_download_and_extract(path, url,
save_file_name='athlet1.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/Mopidy-Jellyfin-1.0.4.tar.gz/Mopidy-Jellyfin-1.0.4/mopidy_jellyfin/playlists.py | from __future__ import unicode_literals
import logging
import operator
from mopidy import backend
from mopidy.models import Playlist, Ref
logger = logging.getLogger(__name__)
class JellyfinPlaylistsProvider(backend.PlaylistsProvider):
def __init__(self, *args, **kwargs):
super(JellyfinPlaylistsProvider, self).__init__(*args, **kwargs)
self._playlists = {}
self.refresh()
def as_list(self):
'''
Returns a list of playlist names sorted in alpha order
'''
refs = [
Ref.playlist(uri=i.uri, name=i.name)
for i in self._playlists.values()]
return sorted(refs, key=operator.attrgetter('name'))
def get_items(self, uri):
'''
Query local playlist cache for a given playlist, returns tracks
'''
playlist = self._playlists.get(uri)
if not playlist:
logger.info('Jellyfin: No playlists found')
return None
return [Ref.track(uri=i.uri, name=i.name) for i in playlist.tracks]
def lookup(self, uri):
'''
Query playlist cache for a given playlist, return full object
'''
playlist = self._playlists.get(uri)
return Playlist(
uri=playlist.uri,
name=playlist.name,
tracks=playlist.tracks
)
def refresh(self):
'''
Generates a list of the playlists stored on the server and their
contents and caches it locally
'''
playlists = {}
raw_playlists = self.backend.remote.get_playlists()
if raw_playlists:
for playlist in raw_playlists:
playlist_id = playlist.get('Id')
playlist_uri = 'jellyfin:playlist:%s' % playlist.get('Id')
contents = self.backend.remote.get_playlist_contents(
playlist_id
)
# Create local Mopidy tracks for audio and book files
tracks = [
self.backend.remote.create_track(track)
for track in contents if track['Type'] in ['Audio', 'Book']
]
# Only create a playlist if it has valid tracks
if tracks:
playlists[playlist_uri] = Playlist(
uri='jellyfin:playlist:%s' % playlist.get('Id'),
name=playlist.get('Name'),
tracks=tracks
)
playlists.update(self.favorites())
self._playlists = playlists
backend.BackendListener.send('playlists_loaded')
return []
def create(self, name):
'''
Creates a new playlist, adds to the local cache
'''
playlist = self.backend.remote.create_playlist(name)
self.refresh()
return Playlist(
uri='jellyfin:playlist:{}'.format(playlist.get('Id')),
name=name,
tracks=[]
)
def delete(self, uri):
'''
Deletes a playlist from the server and local cache
'''
playlist_id = uri.split(':')[-1]
if 'favorite-' in playlist_id:
logger.warning('Favorite playlists are dynamically generated and cannot be deleted')
else:
result = self.backend.remote.delete_playlist(playlist_id)
# True if the delete succeeded, False if there was an error
if result:
del self._playlists[uri]
self.refresh()
return True
return False
def save(self, playlist):
'''
Update the remote playlist when it's modified locally
'''
playlist_id = playlist.uri.split(':')[-1]
if 'favorite-' in playlist_id:
logger.warning('Favorite playlists cannot be modified')
return None
# Get the list of Jellyfin Ids for each track of the playlist
new_track_ids = [
i.uri.split(':')[-1] for i in playlist.tracks
]
self.backend.remote.update_playlist(
playlist_id, new_track_ids
)
# Update the playlist views
self.refresh()
return playlist
def favorites(self):
'''
Get list of playlists based on favorited items
'''
playlists = {}
favorite_playlists = self.backend.remote.get_favorites()
for name, contents in favorite_playlists.items():
if contents:
uri = f'jellyfin:playlist:favorite-{name}'
playlists[uri] = Playlist(
uri=uri, name=f'Favorites - {name}', tracks=contents)
return playlists | PypiClean |
/Figures-0.4.4-py3-none-any.whl/figures/management/commands/repair_figures_backfilled_progress.py | from __future__ import absolute_import, print_function
from datetime import timedelta
from textwrap import dedent
from django.contrib.sites.models import Site
from django.db.models import Count, F
from figures.management.base import BaseBackfillCommand
from figures.models import CourseDailyMetrics
class Command(BaseBackfillCommand):
'''Set all CourseDailyMetrics average_progress values to None where CDM was created
more than one day after the date_for value. See module docstring for rationale.
'''
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help=('Dry run. Output but don\'t save changes')
)
super(Command, self).add_arguments(parser)
def handle(self, *args, **options):
'''
'''
site_id = self.get_site_ids(options['site'])[0]
site = Site.objects.get(id=site_id)
print('FIGURES: Repairing backfilled CDM.average_progress for site {}'.format(site))
backfills = CourseDailyMetrics.objects.filter(
site=site, created__gt=F('date_for') + timedelta(days=2),
average_progress__isnull=False
).annotate(courses_count=Count('course_id', distinct=True))
num_backfills = backfills.count()
if num_backfills == 0:
print('FIGURES: Found no CDM records with average_progress to repair.')
return
logmsg = (
'FIGURES: Found {count} records from dates between {date_start} and {date_end} '
'to update with None values for average_progress, from courses:\n\n{courses}.'
'{dry_run_msg}'.format(
count=num_backfills,
date_start=backfills.earliest('date_for').date_for,
date_end=backfills.latest('date_for').date_for,
courses='\n'.join(set(backfills.values_list('course_id', flat=True))),
dry_run_msg='\n\nDRY RUN. Not updating records.' if options['dry_run'] else ''
)
)
print(logmsg)
if not options['dry_run']:
print('FIGURES: set average_progress to None for {} CourseDailyMetrics records'.format(
num_backfills
))
backfills.update(average_progress=None) | PypiClean |
/MNN-0.0.7-cp27-cp27mu-manylinux2010_x86_64.whl/MNNTools/MNN_FB/Convolution2D.py |
# namespace: MNN
import flatbuffers
class Convolution2D(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsConvolution2D(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Convolution2D()
x.Init(buf, n + offset)
return x
# Convolution2D
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Convolution2D
def Common(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .Convolution2DCommon import Convolution2DCommon
obj = Convolution2DCommon()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Convolution2D
def Weight(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Convolution2D
def WeightAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# Convolution2D
def WeightLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Convolution2D
def Bias(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Convolution2D
def BiasAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# Convolution2D
def BiasLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Convolution2D
def QuanParameter(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .IDSTQuan import IDSTQuan
obj = IDSTQuan()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Convolution2D
def SymmetricQuan(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .QuantizedFloatParam import QuantizedFloatParam
obj = QuantizedFloatParam()
obj.Init(self._tab.Bytes, x)
return obj
return None
def Convolution2DStart(builder): builder.StartObject(5)
def Convolution2DAddCommon(builder, common): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(common), 0)
def Convolution2DAddWeight(builder, weight): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(weight), 0)
def Convolution2DStartWeightVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def Convolution2DAddBias(builder, bias): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(bias), 0)
def Convolution2DStartBiasVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def Convolution2DAddQuanParameter(builder, quanParameter): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(quanParameter), 0)
def Convolution2DAddSymmetricQuan(builder, symmetricQuan): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(symmetricQuan), 0)
def Convolution2DEnd(builder): return builder.EndObject() | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/ui/v1/src/plugins/movies/components/movie-list/movie-list.component.spec.js | describe('Plugin: Movie-list.Component', function () {
var component, deferred;
var movieList = mockMovieListData.getMovieListById();
var movies = mockMovieListData.getMovieListMovies();
var movie = mockMovieListData.getMovieListMovieById();
beforeEach(function () {
bard.appModule('plugins.movies');
/* global $componentController, $mdDialog, $q, moviesService, $rootScope */
bard.inject('$componentController', '$mdDialog', '$q', 'moviesService', '$rootScope');
sinon.stub(moviesService, 'getListMovies').returns($q.when(movies));
});
beforeEach(function () {
component = $componentController('movieList', null,
{
list: movieList,
deleteMovieList: sinon.stub()
});
});
it('should exist', function () {
expect(component).to.exist;
});
describe('activation', function () {
it('should have called the movies service when tabIndex is 0', function () {
component.tabIndex = 0;
component.$onInit();
expect(moviesService.getListMovies).to.have.been.calledOnce;
});
it('should not call the movies service when tabIndex is not 0', function () {
component.tabIndex = 3;
component.$onInit();
expect(moviesService.getListMovies).not.to.have.been.called;
});
});
describe('loadMovies()', function () {
it('should exist', function () {
expect(component.loadMovies).to.exist;
expect(component.loadMovies).to.be.a('function');
});
it('should set variables', function () {
component.loadMovies();
$rootScope.$digest();
expect(component.movies).not.to.be.empty;
expect(component.currentPage).to.exist;
expect(component.totalMovies).to.exist;
expect(component.pageSize).to.exist;
});
});
describe('deleteMovie()', function () {
beforeEach(function () {
deferred = $q.defer();
sinon.stub(moviesService, 'deleteMovie').returns(deferred.promise);
});
it('should exist', function () {
expect(component.deleteMovie).to.exist;
expect(component.deleteMovie).to.be.a('function');
});
it('should call the dialog show function', function () {
sinon.spy($mdDialog, 'show');
component.deleteMovie(movieList, movie);
expect($mdDialog.show).to.have.been.calledOnce;
});
describe('confirmation', function () {
it('should call the movies service', function () {
sinon.stub($mdDialog, 'show').returns($q.resolve());
component.deleteMovie(movieList, movie);
$rootScope.$digest();
expect(moviesService.deleteMovie).to.have.been.calledOnce;
});
it('should remove the movie from the list', function () {
sinon.stub($mdDialog, 'show').returns($q.resolve());
deferred.resolve();
/* global angular */
component.movies = angular.copy(movies.movies);
component.deleteMovie(movieList, movie);
$rootScope.$digest();
expect(component.movies.length).to.equal(movies.movies.length - 1);
});
});
});
}); | PypiClean |
/Chiplotle3-0.4.3-py3-none-any.whl/chiplotle3/geometry/shapes/spiral_archimedean.py | from builtins import range
from chiplotle3.geometry.core.path import Path
import math
def spiral_archimedean(radius, num_turns = 5, wrapping_constant = 1, direction = "cw", segments = 500):
'''
Constructs an Archimedean (arithmetic) spiral with the given number of
turns using the specified number of points.
wrapping_constant controls how tightly the spiral is wound. Several classic
spirals can be created using different wrapping_constants:
lituus: -2
hyperbolic spiral: -1
Archimedes' spiral: 1
Fermat's spiral: 2
scaler controls how large the spiral is.
The general Archimedean spiral equation is:
r = a * theta^(1/n)
where r is the radius, a is the scaler, and n is the wrapping_constant.
More info:
http://mathworld.wolfram.com/ArchimedeanSpiral.html
'''
two_pi = math.pi * 2.0
total_rads = two_pi * num_turns
theta = 0.0
theta_incr = total_rads / float(segments - 1)
exponent = 1.0/wrapping_constant
scaler = float(radius) / math.pow(total_rads, exponent)
#Spirals with a negative wrapping_constant technically begin at infinity,
#which obviously isn't practical. So we nudge theta by a bit to get a
#reasonable starting point
if wrapping_constant < 0.0:
theta += theta_incr
pow = math.pow(theta_incr, abs(exponent))
scaler = float(radius) / (1.0/pow)
spiral_points = []
r = 0.0
for i in range(segments):
if exponent > 0:
r = scaler * math.pow(theta, exponent)
else:
pow = math.pow(theta, abs(exponent))
r = scaler * 1.0/pow
x = math.cos(theta) * r
y = math.sin(theta) * r
if direction == "ccw":
y *= -1.0
#print "r: %f theta: %f" % (r, theta)
spiral_points.append((x, y))
theta += theta_incr
result = Path(spiral_points)
return result
## RUN DEMO CODE
if __name__ == '__main__':
from chiplotle3.geometry.core.group import Group
from chiplotle3.geometry.transforms.offset import offset
from chiplotle3.geometry.transforms.rotate import rotate
from chiplotle3.tools import io
#one of each main spiral type
s1 = spiral_archimedean(500, wrapping_constant = 1)
s2 = spiral_archimedean(500, wrapping_constant = 2, direction = "ccw")
offset(s2, (0, -1000))
#these two are long, so we'll rotate them and move them to the side
#of the others
s3 = spiral_archimedean(1800, wrapping_constant = -1, direction = "ccw")
rotate(s3, math.pi * 1.5)
offset(s3, (650, 400))
s4 = spiral_archimedean(1500, wrapping_constant = -2, direction = "ccw")
rotate(s4, math.pi * .6)
offset(s4, (1000, -1100))
g = Group([s1, s2, s3, s4])
io.view(g) | PypiClean |
/CslBot-0.21-py3-none-any.whl/cslbot/commands/filter.py |
from ..helpers import arguments, textutils
from ..helpers.command import Command
def get_filters(handler, target):
# Strip the gen_ from the names
names = [x.__name__[4:] for x in handler.outputfilter[target]]
if not names:
names = ['passthrough']
return "Current filter(s): %s" % ", ".join(names)
@Command('filter', ['config', 'target', 'handler', 'is_admin', 'nick', 'type'])
def cmd(send, msg, args):
"""Changes the output filter.
Syntax: {command} [--channel channel] <filter|--show|--list|--reset|--chain filter,[filter2,...]>
"""
if args['type'] == 'privmsg':
send('Filters must be set in channels, not via private message.')
return
isadmin = args['is_admin'](args['nick'])
parser = arguments.ArgParser(args['config'])
parser.add_argument('--channel', nargs='?', default=args['target'])
group = parser.add_mutually_exclusive_group()
group.add_argument('filter', nargs='?')
group.add_argument('--show', action='store_true')
group.add_argument('--list', action='store_true')
group.add_argument('--reset', '--clear', action='store_true')
group.add_argument('--chain')
if not msg:
send(get_filters(args['handler'], args['target']))
return
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.list:
send("Available filters are %s" % ", ".join(textutils.output_filters.keys()))
elif cmdargs.reset and isadmin:
args['handler'].outputfilter[cmdargs.channel].clear()
send("Okay!")
elif cmdargs.chain and isadmin:
if not args['handler'].outputfilter[cmdargs.channel]:
send("Must have a filter set in order to chain.")
return
filter_list, output = textutils.append_filters(cmdargs.chain)
if filter_list is not None:
args['handler'].outputfilter[cmdargs.channel].extend(filter_list)
send(output)
elif cmdargs.show:
send(get_filters(args['handler'], cmdargs.channel))
elif isadmin:
# If we're just adding a filter without chain, blow away any existing filters.
filter_list, output = textutils.append_filters(cmdargs.filter)
if filter_list is not None:
args['handler'].outputfilter[cmdargs.channel].clear()
args['handler'].outputfilter[cmdargs.channel].extend(filter_list)
send(output)
else:
send('This command requires admin privileges.') | PypiClean |
/FeinCMS-23.8.0.tar.gz/FeinCMS-23.8.0/feincms/admin/tree_editor.py |
import json
import logging
from functools import reduce
from django.contrib.admin.actions import delete_selected
from django.contrib.admin.views import main
from django.contrib.auth import get_permission_codename
from django.db.models import Q
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
)
from django.templatetags.static import static
from django.utils.encoding import force_str
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext, gettext_lazy as _
from mptt.exceptions import InvalidMove
from mptt.forms import MPTTAdminForm
from feincms import settings
from feincms.extensions import ExtensionModelAdmin
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------
def django_boolean_icon(field_val, alt_text=None, title=None):
"""
Return HTML code for a nice representation of true/false.
"""
# Origin: contrib/admin/templatetags/admin_list.py
BOOLEAN_MAPPING = {True: "yes", False: "no", None: "unknown"}
alt_text = alt_text or BOOLEAN_MAPPING[field_val]
if title is not None:
title = 'title="%s" ' % title
else:
title = ""
icon_url = static("feincms/img/icon-%s.gif" % BOOLEAN_MAPPING[field_val])
return mark_safe(f'<img src="{icon_url}" alt="{alt_text}" {title}/>')
def _build_tree_structure(queryset):
"""
Build an in-memory representation of the item tree, trying to keep
database accesses down to a minimum. The returned dictionary looks like
this (as json dump):
{"6": [7, 8, 10]
"7": [12],
"8": [],
...
}
"""
all_nodes = {}
mptt_opts = queryset.model._mptt_meta
items = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr).values_list(
"pk", "%s_id" % mptt_opts.parent_attr
)
for p_id, parent_id in items:
all_nodes.setdefault(str(parent_id) if parent_id else 0, []).append(p_id)
return all_nodes
# ------------------------------------------------------------------------
def ajax_editable_boolean_cell(item, attr, text="", override=None):
"""
Generate a html snippet for showing a boolean value on the admin page.
Item is an object, attr is the attribute name we should display. Text
is an optional explanatory text to be included in the output.
This function will emit code to produce a checkbox input with its state
corresponding to the item.attr attribute if no override value is passed.
This input is wired to run a JS ajax updater to toggle the value.
If override is passed in, ignores the attr attribute and returns a
static image for the override boolean with no user interaction possible
(useful for "disabled and you can't change it" situations).
"""
if text:
text = " (%s)" % text
if override is not None:
a = [django_boolean_icon(override, text), text]
else:
value = getattr(item, attr)
a = [
'<input type="checkbox" data-inplace data-inplace-id="%s"'
' data-inplace-attribute="%s" %s>'
% (item.pk, attr, 'checked="checked"' if value else "")
]
a.insert(0, '<div id="wrap_%s_%d">' % (attr, item.pk))
a.append("</div>")
return mark_safe("".join(a))
# ------------------------------------------------------------------------
def ajax_editable_boolean(attr, short_description):
"""
Convenience function: Assign the return value of this method to a variable
of your ModelAdmin class and put the variable name into list_display.
Example::
class MyTreeEditor(TreeEditor):
list_display = ('__str__', 'active_toggle')
active_toggle = ajax_editable_boolean('active', _('is active'))
"""
def _fn(self, item):
return ajax_editable_boolean_cell(item, attr)
_fn.short_description = short_description
_fn.editable_boolean_field = attr
return _fn
# ------------------------------------------------------------------------
class ChangeList(main.ChangeList):
"""
Custom ``ChangeList`` class which ensures that the tree entries are always
ordered in depth-first order (order by ``tree_id``, ``lft``).
"""
def __init__(self, request, *args, **kwargs):
self.user = request.user
super().__init__(request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
mptt_opts = self.model._mptt_meta
qs = (
super()
.get_queryset(*args, **kwargs)
.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
)
# Force has_filters, so that the expand/collapse in sidebar is visible
self.has_filters = True
return qs
def get_results(self, request):
mptt_opts = self.model._mptt_meta
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
clauses = [
Q(
**{
mptt_opts.tree_id_attr: tree_id,
mptt_opts.left_attr + "__lte": lft,
mptt_opts.right_attr + "__gte": rght,
}
)
for lft, rght, tree_id in self.queryset.values_list(
mptt_opts.left_attr, mptt_opts.right_attr, mptt_opts.tree_id_attr
)
]
# We could optimise a bit here by explicitely filtering out
# any clauses that are for parents of nodes included in the
# queryset anyway. (ie: drop all clauses that refer to a node
# that is a parent to another node)
if clauses:
# Note: Django ORM is smart enough to drop additional
# clauses if the initial query set is unfiltered. This
# is good.
self.queryset |= self.model._default_manager.filter(
reduce(lambda p, q: p | q, clauses)
)
super().get_results(request)
# Pre-process permissions because we still have the request here,
# which is not passed in later stages in the tree editor
for item in self.result_list:
item.feincms_changeable = self.model_admin.has_change_permission(
request, item
)
item.feincms_addable = (
item.feincms_changeable
and self.model_admin.has_add_permission(request, item)
)
# ------------------------------------------------------------------------
class TreeEditor(ExtensionModelAdmin):
"""
The ``TreeEditor`` modifies the standard Django administration change list
to a drag-drop enabled interface for django-mptt_-managed Django models.
.. _django-mptt: https://github.com/django-mptt/django-mptt/
"""
form = MPTTAdminForm
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
# Make sure that no pagination is displayed. Slicing is disabled
# anyway, therefore this value does not have an influence on the
# queryset
list_per_page = 999999999
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display = list(self.list_display)
if "indented_short_title" not in self.list_display:
if self.list_display[0] == "action_checkbox":
self.list_display[1] = "indented_short_title"
else:
self.list_display[0] = "indented_short_title"
self.list_display_links = ("indented_short_title",)
opts = self.model._meta
self.change_list_template = [
"admin/feincms/%s/%s/tree_editor.html"
% (opts.app_label, opts.object_name.lower()),
"admin/feincms/%s/tree_editor.html" % opts.app_label,
"admin/feincms/tree_editor.html",
]
self.object_change_permission = (
opts.app_label + "." + get_permission_codename("change", opts)
)
self.object_add_permission = (
opts.app_label + "." + get_permission_codename("add", opts)
)
self.object_delete_permission = (
opts.app_label + "." + get_permission_codename("delete", opts)
)
def changeable(self, item):
return getattr(item, "feincms_changeable", True)
def indented_short_title(self, item):
"""
Generate a short title for an object, indent it depending on
the object's depth in the hierarchy.
"""
mptt_opts = item._mptt_meta
r = ""
try:
url = item.get_absolute_url()
except (AttributeError,):
url = None
if url:
r = (
'<input type="hidden" class="medialibrary_file_path"'
' value="%s" id="_refkey_%d" />'
) % (url, item.pk)
changeable_class = ""
if not self.changeable(item):
changeable_class = " tree-item-not-editable"
tree_root_class = ""
if not item.parent_id:
tree_root_class = " tree-root"
r += (
'<span id="page_marker-%d" class="page_marker%s%s"'
' style="width: %dpx;"> </span> '
) % (
item.pk,
changeable_class,
tree_root_class,
14 + getattr(item, mptt_opts.level_attr) * 18,
)
# r += '<span tabindex="0">'
if hasattr(item, "short_title") and callable(item.short_title):
r += escape(item.short_title())
else:
r += escape("%s" % item)
# r += '</span>'
return mark_safe(r)
indented_short_title.short_description = _("title")
def _collect_editable_booleans(self):
"""
Collect all fields marked as editable booleans. We do not
want the user to be able to edit arbitrary fields by crafting
an AJAX request by hand.
"""
if hasattr(self, "_ajax_editable_booleans"):
return
self._ajax_editable_booleans = {}
for field in self.list_display:
# The ajax_editable_boolean return value has to be assigned
# to the ModelAdmin class
try:
item = getattr(self.__class__, field)
except (AttributeError, TypeError):
continue
attr = getattr(item, "editable_boolean_field", None)
if attr:
if hasattr(item, "editable_boolean_result"):
result_func = item.editable_boolean_result
else:
def _fn(attr):
return lambda self, instance: [
ajax_editable_boolean_cell(instance, attr)
]
result_func = _fn(attr)
self._ajax_editable_booleans[attr] = result_func
def _toggle_boolean(self, request):
"""
Handle an AJAX toggle_boolean request
"""
try:
item_id = int(request.POST.get("item_id", None))
attr = str(request.POST.get("attr", None))
except Exception:
return HttpResponseBadRequest("Malformed request")
if not request.user.is_staff:
logger.warning(
'Denied AJAX request by non-staff "%s" to toggle boolean'
" %s for object #%s",
request.user,
attr,
item_id,
)
return HttpResponseForbidden(
_("You do not have permission to modify this object")
)
self._collect_editable_booleans()
if attr not in self._ajax_editable_booleans:
return HttpResponseBadRequest("not a valid attribute %s" % attr)
try:
obj = self.model._default_manager.get(pk=item_id)
except self.model.DoesNotExist:
return HttpResponseNotFound("Object does not exist")
if not self.has_change_permission(request, obj=obj):
logger.warning(
'Denied AJAX request by "%s" to toggle boolean %s for' " object %s",
request.user,
attr,
item_id,
)
return HttpResponseForbidden(
_("You do not have permission to modify this object")
)
new_state = not getattr(obj, attr)
logger.info(
'Toggle %s on #%d %s to %s by "%s"',
attr,
obj.pk,
obj,
"on" if new_state else "off",
request.user,
)
try:
before_data = self._ajax_editable_booleans[attr](self, obj)
setattr(obj, attr, new_state)
obj.save()
# Construct html snippets to send back to client for status update
data = self._ajax_editable_booleans[attr](self, obj)
except Exception:
logger.exception("Unhandled exception while toggling %s on %s", attr, obj)
return HttpResponseServerError(f"Unable to toggle {attr} on {obj}")
# Weed out unchanged cells to keep the updates small. This assumes
# that the order a possible get_descendents() returns does not change
# before and after toggling this attribute. Unlikely, but still...
return HttpResponse(
json.dumps([b for a, b in zip(before_data, data) if a != b]),
content_type="application/json",
)
def get_changelist(self, request, **kwargs):
return ChangeList
def changelist_view(self, request, extra_context=None, *args, **kwargs):
"""
Handle the changelist view, the django view for the model instances
change list/actions page.
"""
if "actions_column" not in self.list_display:
self.list_display.append("actions_column")
# handle common AJAX requests
if "__cmd" in request.POST:
cmd = request.POST.get("__cmd")
if cmd == "toggle_boolean":
return self._toggle_boolean(request)
elif cmd == "move_node":
return self._move_node(request)
return HttpResponseBadRequest("Oops. AJAX request not understood.")
extra_context = extra_context or {}
extra_context["tree_structure"] = mark_safe(
json.dumps(_build_tree_structure(self.get_queryset(request)))
)
extra_context["node_levels"] = mark_safe(
json.dumps(
dict(
self.get_queryset(request)
.order_by()
.values_list("pk", self.model._mptt_meta.level_attr)
)
)
)
return super().changelist_view(request, extra_context, *args, **kwargs)
def has_add_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_add_permission, but also passes the obj parameter in.
"""
perm = self.object_add_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_change_permission, but also passes the obj parameter in.
"""
perm = self.object_change_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in.
"""
perm = self.object_delete_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super().has_delete_permission(request, obj)
def _move_node(self, request):
if hasattr(self.model.objects, "move_node"):
tree_manager = self.model.objects
else:
tree_manager = self.model._tree_manager
queryset = self.get_queryset(request)
cut_item = queryset.get(pk=request.POST.get("cut_item"))
pasted_on = queryset.get(pk=request.POST.get("pasted_on"))
position = request.POST.get("position")
if not self.has_change_permission(request, cut_item):
self.message_user(request, _("No permission"))
return HttpResponse("FAIL")
if position in ("last-child", "left", "right"):
try:
tree_manager.move_node(cut_item, pasted_on, position)
except InvalidMove as e:
self.message_user(request, "%s" % e)
return HttpResponse("FAIL")
# Ensure that model save methods have been run (required to
# update Page._cached_url values, might also be helpful for other
# models inheriting MPTTModel)
for item in queryset.filter(id__in=(cut_item.pk, pasted_on.pk)):
item.save()
self.message_user(
request, gettext("%s has been moved to a new position.") % cut_item
)
return HttpResponse("OK")
self.message_user(request, _("Did not understand moving instruction."))
return HttpResponse("FAIL")
def _actions_column(self, instance):
if self.changeable(instance):
return ['<div class="drag_handle"></div>']
return []
def actions_column(self, instance):
return mark_safe(" ".join(self._actions_column(instance)))
actions_column.short_description = _("actions")
def delete_selected_tree(self, modeladmin, request, queryset):
"""
Deletes multiple instances and makes sure the MPTT fields get
recalculated properly. (Because merely doing a bulk delete doesn't
trigger the post_delete hooks.)
"""
# If this is True, the confirmation page has been displayed
if request.POST.get("post"):
n = 0
# TODO: The disable_mptt_updates / rebuild is a work around
# for what seems to be a mptt problem when deleting items
# in a loop. Revisit this, there should be a better solution.
with queryset.model.objects.disable_mptt_updates():
for obj in queryset:
if self.has_delete_permission(request, obj):
obj.delete()
n += 1
obj_display = force_str(obj)
self.log_deletion(request, obj, obj_display)
else:
logger.warning(
'Denied delete request by "%s" for object #%s',
request.user,
obj.id,
)
if n > 0:
queryset.model.objects.rebuild()
self.message_user(
request, _("Successfully deleted %(count)d items.") % {"count": n}
)
# Return None to display the change list page again
return None
else:
# (ab)using the built-in action to display the confirmation page
return delete_selected(self, request, queryset)
def get_actions(self, request):
actions = super().get_actions(request)
if "delete_selected" in actions:
actions["delete_selected"] = (
self.delete_selected_tree,
"delete_selected",
_("Delete selected %(verbose_name_plural)s"),
)
return actions | PypiClean |
/GeophPy-0.32.2.tar.gz/GeophPy-0.32.2/geophpy/processing/magnetism.py | import numpy as np
#from scipy.linalg import lu_solve, lu_factor
#from geophpy.operation.general import *
import geophpy.operation.general as genop
#------------------------------------------------------------------------------#
# User defined parameters #
#------------------------------------------------------------------------------#
# list of available magnetic survey configuraiton
#prosptechlist = ["Magnetic field", "Magnetic field gradient", "Vertical component gradient"]
sensorconfiglist = ["TotalField", "TotalFieldGradient", "Fluxgate"]
structuralindexlist = ['-1','1','2','3']
def logtransform(dataset, multfactor=5, setnan=True, valfilt=False):
''' Apply a logarihtmic transformation to the dataset.
cf. :meth:`~geophpy.dataset.DataSet.logtransform`
'''
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if (val[iy][ix] != np.nan) : # if valid value
## val[iy][ix] = val[iy][ix] * multfactor # multiplying
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((val[iy][ix] != np.nan) and (val[iy][ix] < -1)):
## val[iy][ix] = -np.log10(-val[iy][ix])
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((val[iy][ix] != np.nan) and (val[iy][ix] > 1)):
## val[iy][ix] = np.log10(val[iy][ix])
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((val[iy][ix] != np.nan) and (val[iy][ix] > -1) and (val[iy][ix] < 1)) :
## val[iy][ix] = np.nan
# Value to replace data between -1 and 1 ###################################
if setnan:
replace_val = np.nan
else:
replace_val = 0
# Filter values ...TBD... ##################################################
if valfilt:
pass
# Filter zimage ############################################################
else:
val = dataset.data.z_image*multfactor
indx = np.where(val<-1)
val[indx] = -np.log10(-val[indx])
indx = val>1
val[indx] = np.log10(val[indx])
condition = np.logical_and(val>-1, val<1)
indx = np.where(condition)
val[indx] = replace_val
dataset.data.z_image = val
return dataset
def polereduction(dataset, apod=0, inclination=65, declination=0, azimuth=0,
magazimuth=None, incl_magn=None, decl_magn=None):
''' Dataset Reduction to the magnetic Pole.
cf. :meth:`~geophpy.dataset.DataSet.polereduction`
'''
##########
##
## Original code has been re-implemented in a less fortranic way
## and source remanent magnetization option has been added
##
##########
##
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## apodisation2d(val, apod)
##
## # Meaning calculation
## mean = val.mean()
##
## # complex conversion, val[][] -> cval[][]
## cval = np.array(val, dtype=complex)
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
##
## # filter application
## # deg->rad angle conversions
## rinc = (inclineangle*np.pi)/180 # inclination in radians
## ralpha = (alphaangle*np.pi)/180 # alpha angle in radians
##
## cosu = np.absolute(np.cos(rinc) * np.cos(ralpha))
## cosv = np.absolute(np.cos(rinc) * np.sin(ralpha))
## cosz = np.sin(rinc)
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## cz = np.sqrt(np.square(cu) + np.square(cv))
## cred = complex(cz*cosz, cu*cosu + cv*cosv)
## cvalfft[iy][ix] = ((cvalfft[iy][ix] * np.square(cz))/np.square(cred))
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfft)
## val = icvalfft.real + mean # real part added to mean calculation
##
## val[nan_indexes] = np.nan # set the initial 'nan' values, as in the initial array
##
## dataset.data.z_image = val # to get the 'z_image' array in (x,y) format.
val = dataset.data.z_image
# Transformation to sprectral Domain #######################################
# Apodization before FT
if (apod > 0):
genop.apodisation2d(val, apod)
# Filling NaNs
valnan = np.copy(val)
nan_idx = np.isnan(valnan) # index of NaNs in the original dataset
valfill = genop.fillnanvalues(valnan, indexout=False) # Filled dataset
# De-meaning
valmean = np.nanmean(val)
valfill = valfill-valmean
# Fourier Transform computation
valTF = np.fft.fft2(valfill) # Frequency domain
# Wavenumbers computation
ny, nx = val.shape
dx = dataset.info.x_gridding_delta
dy = dataset.info.y_gridding_delta
kx, ky = genop.wavenumber(nx, ny, dx, dy) # x, y-directed wavenumber matrices
k = np.sqrt(kx**2 + ky**2) # radial wavenumber matrix
indk = k!=0 # index k valid for the RTP
# Magnetic azimuth #########################################################
## The angle between the survey profile direction and the ambient magnetic
## field is phi = declination-azimuth.
## if the given angle is directly the angle between the ambient magnetic
## field the survey direction (phi), we assume
## declinaison = 0 so -phi = azimuth
if magazimuth is not None:
# ambient field
phi = magazimuth # given azimuth is magnetic azimuth
declination = 0 # declination is set to 0 for the _dircos routine
azimuth = -phi
# Reduction to the Pole Operator ###########################################
F = np.empty([ny, nx], dtype=complex)
# Neglecting source remanent magnetization
if incl_magn is None or decl_magn is None:
# Ambient field unit-vector components
fx, fy ,fz = _dircos(inclination, declination, azimuth)
# RTP operator
# Simplified form of Blakely 1996, eq12.31, p331, for mx=fx, my=fy and mz=mz
F[indk] = k[indk]**2 / ( k[indk]*fz + 1j*(kx[indk]*fx + ky[indk]*fy) )**2
# With sourceremanent magnetization
else:
# Ambient field and total magnetization unit-vectors components
fx, fy ,fz = _dircos(inclination, declination, azimuth)
mx, my ,mz = _dircos(incl_magn, decl_magn, azimuth)
# RTP operator [Blakely 1996, eq12.31, p331]
a1 = mz*fz - mx*fx
a2 = mz*fz - my*fy
a3 = -my*fx - mx*fy
b1 = mx*fz + mz*fx
b2 = my*fz + mz*fy
F[indk] = k[indk]**2 / (a1*kx[indk]**2
+ a2*ky[indk]**2
+ a3*kx[indk]*ky[indk]
+ 1j*k[indk]*(b1*kx[indk] + b2*ky[indk]) )
# Applying filter###########################################################
valTF_filt = valTF*F
# Transformation nack to spatial domain ####################################
valfilt = np.fft.ifft2(valTF_filt) # Spatial domain
valfilt = np.real(valfilt) + valmean # Re-meaning
valfilt[nan_idx] = np.nan # unfilled dataset
dataset.data.z_image = valfilt
return dataset
def continuation(dataset, apod=0, distance=2, totalfieldconversionflag=False, separation=0.7):
''' Dataset continuation (upward/downward).
cf. :meth:`~geophpy.dataset.DataSet.continuation`
'''
##########
##
## Original code has been re-implemented in a less fortranic way
##
##########
##
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## apodisation2d(val, apod)
##
## dz = downsensoraltitude - upsensoraltitude
## zp = - continuationvalue + downsensoraltitude
##
## # complex conversion, val[][] -> cval[][]
## cval = np.array(val, dtype=complex)
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## cz = np.sqrt(np.square(cu) + np.square(cv)) * (2. * np.pi)
## if (continuationflag == True):
## ce = np.exp(zp * cz)
## cvalfft[iy][ix] = (cvalfft[iy][ix] * complex(ce, 0.))
## if (prosptech == prosptechlist[1]): # if prospection technic is magnetic field gradient
## cdz = 1. - np.exp(dz * cz)
## cvalfft[iy][ix] = (cvalfft[iy][ix] / complex(cdz, 0.))
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfft)
## val = icvalfft.real
##
## val[nan_indexes] = np.nan # set the initial 'nan' values, as in the initial array
##
## dataset.data.z_image = val # to get the 'z_image' array in (x,y) format.
val = dataset.data.z_image
# Transformation to sprectral Domain #######################################
# Apodization before FT
if (apod > 0):
genop.apodisation2d(val, apod)
# Filling NaNs
valnan = np.copy(val)
nan_idx = np.isnan(valnan) # index of NaNs in the original dataset
valfill = genop.fillnanvalues(valnan, indexout=False) # Filled dataset
## # De-meaning
## valmean = np.nanmean(val)
## valfill = valfill-valmean
# Fourier Transform computation
valTF = np.fft.fft2(valfill) # Frequency domain
# Wavenumbers computation
ny, nx = val.shape
dx = dataset.info.x_gridding_delta
dy = dataset.info.y_gridding_delta
kx, ky = genop.wavenumber(nx, ny, dx, dy) # x, y-directed wavenumber matrices
k = np.sqrt(kx**2 + ky**2) # radial wavenumber matrix
indk = k!=0 # index k valid for the RTP
# Continuation #############################################################
dh = distance # continuation distance
valTF_filt = valTF * np.exp(-dh*k) # [Blakely 1996, eq12.8, p317]
# equivalent to
# valTF_filt = valTF * _continu(dh, k, direction='auto')
# Magnetic total-field gradient survey #####################################
###
##
#...TBD... ??
# Should we keep the conversion ??
# habit in wumap to convert in tota-field but continuation applies
# on gardient data to.
# It is recomanded convert in toal-field for donwnward continuation though
# [Tabbagh 1999]
##
###
if totalfieldconversionflag:
ds = separation # sensor's separation
valTF_filt[indk] = valTF_filt[indk] / ( 1. - np.exp(-ds * k[indk]) ) # conversion from gradient to total-field
# equivalent to
# valTF_filt = valTF * _grad_to_field(ds, k)
# Transformation back to spatial domain ####################################
valfilt = np.fft.ifft2(valTF_filt) # Spatial domain
valfilt = np.real(valfilt) #+ valmean # Re-meaning
valfilt[nan_idx] = np.nan # unfilled dataset
dataset.data.z_image = valfilt
return dataset
def eulerdeconvolution(dataset, apod=0, structind=None, windows=None, xstep=None, ystep=None):
''' Classic Euler deconvolution.
cf. :meth:`~geophpy.dataset.DataSet.eulerdeconvolution`
'''
##########
##
## Original code has been re-implemented in a less fortranic way
##
##########
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## apodisation2d(val, apod)
##
## # complex conversion, val[][] -> cval[][]
## cval = np.array(val, dtype=complex)
## val = cval.real
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
## cvalfftdx = np.array(np.zeros((ny,nx)), dtype=complex)
## cvalfftdy = np.array(np.zeros((ny,nx)), dtype=complex)
## cvalfftdz = np.array(np.zeros((ny,nx)), dtype=complex)
##
## # derivations calculation
## for iy in range(ny): # for each column
## for ix in range(nx): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## cz = np.sqrt(np.square(cu) + np.square(cv))
## # x derivation
## cvalfftdx[iy][ix] = cvalfft[iy][ix]*complex(0., cu*np.pi*2.)
## # y derivation
## cvalfftdy[iy][ix] = cvalfft[iy][ix]*complex(0., cv*np.pi*2.)
## # z derivation
## cvalfftdz[iy][ix] = cvalfft[iy][ix]*cz*np.pi*2.
## else:
## cvalfftdx[0][0] = complex(0., 0.)
## cvalfftdy[0][0] = complex(0., 0.)
## cvalfftdz[0][0] = complex(0., 0.)
##
## # FFT inversion
## valdx = (np.fft.ifft2(cvalfftdx)).real
## valdy = (np.fft.ifft2(cvalfftdy)).real
## valdz = (np.fft.ifft2(cvalfftdz)).real
##
## ixmin, xnearestmin = _searchnearest(x, xmin)
## ixmax, xnearestmax = _searchnearest(x, xmax)
##
## iymin, ynearestmin = _searchnearest(y, ymin)
## iymax, ynearestmax = _searchnearest(y, ymax)
##
## if ((nflag == 0) or (nvalue==0)): # automatic calculation structural index
## # 4 unknowns equation to solve
## b = np.zeros(4)
## A = np.zeros((4,4))
## print('Estimated SI')
## for l in range(iymin, iymax+1):
## for c in range(ixmin, ixmax+1):
## coef = x[c] * valdx[l][c] + y[l] * valdy[l][c]
## b[0] += coef * val[l][c]
## b[1] += coef * valdx[l][c]
## b[2] += coef * valdy[l][c]
## b[3] += coef * valdz[l][c]
##
## A[0][0] += np.square(val[l][c])
## A[0][1] += val[l][c] * valdx[l][c]
## A[0][2] += val[l][c] * valdy[l][c]
## A[0][3] += val[l][c] * valdz[l][c]
## A[1][1] += np.square(valdx[l][c])
## A[1][2] += valdx[l][c] * valdy[l][c]
## A[1][3] += valdx[l][c] * valdz[l][c]
## A[2][2] += np.square(valdy[l][c])
## A[2][3] += valdy[l][c] * valdz[l][c]
## A[3][3] += np.square(valdz[l][c])
##
## # symmetry
## A[1][0] = A[0][1]
## A[2][0] = A[0][2]
## A[3][0] = A[0][3]
## A[2][1] = A[1][2]
## A[3][1] = A[1][3]
## A[3][2] = A[2][3]
##
## x = _gaussresolution(A, b)
## nvalue = -x[0]
## xpt = x[1]
## ypt = x[2]
## depth = x[3]
##
## else : # fixed structural index
## # 3 unknowns equation to solve
## b = np.zeros(3)
## A = np.zeros((3,3))
## print('Fixed SI')
## count= 0
## for l in range(iymin, iymax+1):
## for c in range(ixmin, ixmax+1):
## coef = nvalue * val[l][c] + x[c] * valdx[l][c] + y[l] * valdy[l][c]
## count += 1
## b[0] += coef * valdx[l][c]
## b[1] += coef * valdy[l][c]
## b[2] += coef * valdz[l][c]
##
## A[0][0] += np.square(valdx[l][c])
## A[0][1] += valdx[l][c] * valdy[l][c]
## A[0][2] += valdx[l][c] * valdz[l][c]
## A[1][1] += np.square(valdy[l][c])
## A[1][2] += valdy[l][c] * valdz[l][c]
## A[2][2] += np.square(valdz[l][c])
##
## # symmetry
## A[1][0] = A[0][1]
## A[2][0] = A[0][2]
## A[2][1] = A[1][2]
##
## x = _gaussresolution(A, b)
## xpt = x[0]
## ypt = x[1]
## depth = x[2]
val = dataset.data.z_image
# Transformation to sprectral Domain #######################################
# Apodization before FT
if (apod > 0):
genop.apodisation2d(val, apod)
# Filling NaNs
valnan = np.copy(val)
nan_idx = np.isnan(valnan) # index of NaNs in the original dataset
valfill = genop.fillnanvalues(valnan, indexout=False) # Filled dataset
## # De-meaning
## valmean = np.nanmean(val)
## valfill = valfill-valmean
# Fourier Transform computation
valTF = np.fft.fft2(valfill) # Frequency domain
# Wavenumbers computation
ny, nx = val.shape
dx = dataset.info.x_gridding_delta
dy = dataset.info.y_gridding_delta
kx, ky = genop.wavenumber(nx, ny, dx, dy) # x, y-directed wavenumber matrices
k = np.sqrt(kx**2 + ky**2) # radial wavenumber matrix
indk = k!=0 # index of valid k for the RTP
# Spatial derivatives in the frequency domain ##############################
# ... TBD...
## Use spatial computation fo the horizontal derivatices
## to limit noise as suggested by [Cooper 2002] ?
dval_dxTF = valTF*1j*kx # x-directed gradient [Blakely 1996, eq12.15, p324]
dval_dyTF = valTF*1j*ky # y-directed gradient [Blakely 1996, eq12.16, p324]
dval_dzTF = valTF*k # vertical gradient [Blakely 1996, eq12.13, p323]
# Potential field derivatives in the spatial domain ########################
dval_dx = np.real(np.fft.ifft2(dval_dxTF))
dval_dy = np.real(np.fft.ifft2(dval_dyTF))
dval_dz = np.real(np.fft.ifft2(dval_dzTF))
# Data sub-set creation ####################################################
# Grid coordinates
xvect = dataset.get_xvect().ravel() # x- coordinates vect
yvect = dataset.get_yvect().ravel() # y- coordinates vect
xgrid = dataset.get_xgrid() # x-grid coordinates vect
ygrid = dataset.get_ygrid() # y-grid coordinates vect
# Creating a 2-D sliding window over the datset grid
if windows is None:
if xstep is None and ystep is None:
xstep = 7
ystep = round(xstep*dx/dy)
elif xstep is None and ystep is not None:
xstep = round(ystep*dy/dx)
elif xstep is not None and ystep is None:
ystep = round(xstep*dx/dy)
# x windows extent
xbounds = xvect[::xstep]
xmins = xbounds[:-1]
xmaxs = xbounds[1:]
# y windows extent
ybounds = yvect[::ystep]
ymins = ybounds[:-1]
ymaxs = ybounds[1:]
# Extending windows to dataset max spatial extent
## result in an smaller window extent at the dataset border
if xmaxs[-1]!=xvect.max():
xmins = np.append(xmins, xmaxs[-1])
xmaxs = np.append(xmaxs, xvect.max())
if ymaxs[-1]!=yvect.max():
ymins = np.append(ymins, ymaxs[-1])
ymaxs = np.append(ymaxs, yvect.max())
# windows extent
windows = []
for idx, x in enumerate(xmins):
for idy, y in enumerate(ymins):
windows.append([xmins[idx], xmaxs[idx], ymins[idy], ymaxs[idy]])
# Euler's deconvolution ####################################################
if np.asarray(windows).ndim ==1: # Unique window provided
windows = [windows]
results = []
for extent in windows:
# Current spatial sub-window
xmin, xmax, ymin, ymax = extent
indx = np.logical_and(xgrid<=xmax, xgrid>=xmin)
indy = np.logical_and(ygrid<=ymax, ygrid>=ymin)
indval = np.logical_and(indx, indy)
xi = xgrid[indval].reshape((-1,1))
yi = ygrid[indval].reshape((-1,1))
zi = 0*xi.reshape((-1,1))
vali = valfill[indval].reshape((-1,1))
dvdxi = dval_dx[indval].reshape((-1,1))
dvdyi = dval_dy[indval].reshape((-1,1))
dvdzi = dval_dz[indval].reshape((-1,1))
# Estimated Structural Index
if structind is None or structind not in [0, 1, 2, 3]:
A = np.hstack((dvdxi, dvdyi, dvdzi, -vali))
b = xi*dvdxi + yi*dvdyi + zi*dvdzi
X, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)
residuals = float(residuals)
x0, y0, z0, N = [float(i) for i in X]
# Fixed Structural Index
else:
N = structind
A = np.hstack((dvdxi, dvdyi, dvdzi))
b = xi*dvdxi + yi*dvdyi + zi*dvdzi + N*vali
X, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)
residuals = float(residuals)
x0, y0, z0 = [float(i) for i in X]
# Storing results
results.append([x0, y0, z0, N, residuals,
xi.min(), xi.max(), yi.min(), yi.max()])
return results
def _searchnearest(array, value):
''' Return the index of the nearest value of val in array the value itself. '''
# 1-D arrays
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
# 2-D arrays
if len(array.shape)>1:
idx = np.unravel_index(idx, array.shape)
return idx, array[idx]
##def _searchnearest(array, val):
## '''
## Search the nearset value of val, in the array
##
## Parameters :
##
## :array: 1D array ordered to find the nearest value of val
##
## :val: value to find
##
## Returns :
##
## :index: index in the array of the nearest value found
##
## :valnearest: value of the nearest value found
##
## '''
## n = len(array)
## for i in range(n):
## gap = abs(val-array[i])
## if ((i==0) or (gap<mingap)):
## mingap = gap
## index = i
## return index, array[index]
##def _gaussresolution(A, b):
## ''' Solve Ax=b equation using Gauss pivot and LU factorisation. '''
##
## n=len(A)
## # gauss pivot calculation
## for i1 in range(n-1):
## # find the partial pivot
## l=i1
## xam = np.absolute(A[i1][i1])
## for j in range(i1+1,n):
## x = np.absolute(A[j][i1])
## if (x>xam):
## xam = x
## l=j
##
## # set pivot at its place, swapping pivot and current lines
## if (l>i1):
## aux = b[l]
## b[l] = b[i1]
## b[i1] = aux
## for j in range(n):
## aux = A[l][j]
## A[l][j] = A[i1][j]
## A[i1][j] = aux
##
## # solves Ax=b using LU factorisation
## x = lu_solve(lu_factor(A), b)
##
## return x
def analyticsignal(dataset, apod=0):
''' Dataset Analytic Signal.
cf. :meth:`~geophpy.dataset.DataSet.analyticsignal`
'''
##########
##
## Original code has been re-implemented in a less fortranic way
##
##########
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## apodisation2d(val, apod)
##
## # complex conversion, val[][] -> cval[][]
## cval = np.array(val, dtype=complex)
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
## cvalfftdx = np.fft.fft2(cval)
## cvalfftdy = np.fft.fft2(cval)
## cvalfftdz = np.fft.fft2(cval)
##
## # dx derivations calculation
## cvalfftdx[0][0] = complex(0., 0.)
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## # x derivation
## cvalfftdx[iy][ix] = cvalfft[iy][ix]*complex(0., cu*np.pi*2)
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfftdx)
## val = np.square(icvalfft.real)
##
## # dy derivations calculation
## cvalfftdy[0][0] = complex(0., 0.)
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## # y derivation
## cvalfftdy[iy][ix] = cvalfft[iy][ix]*complex(0., cv*np.pi*2)
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfftdy)
## val = val + np.square(icvalfft.real)
##
## # dz derivations calculation
## cvalfftdz[0][0] = complex(0., 0.)
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## cz = np.sqrt(np.square(cu) + np.square(cv))
## # z derivation
## cvalfftdz[iy][ix] = cvalfft[iy][ix]*cz*np.pi*2
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfftdz)
## val = np.sqrt(val + np.square(icvalfft.real))
##
## val[nan_indexes] = np.nan # set the initial 'nan' values, as in the initial array
##
## dataset.data.z_image = val # to get the 'z_image' array in (x,y) format.
val = dataset.data.z_image
# Transformation to sprectral Domain #######################################
# Apodization before FT
if (apod > 0):
genop.apodisation2d(val, apod)
# Filling NaNs
valnan = np.copy(val)
nan_idx = np.isnan(valnan) # index of NaNs in the original dataset
valfill = genop.fillnanvalues(valnan, indexout=False) # Filled dataset
# De-meaning
valmean = np.nanmean(val)
valfill = valfill-valmean
# Fourier Transform computation
valTF = np.fft.fft2(valfill) # Frequency domain
# Wavenumbers computation
ny, nx = val.shape
dx = dataset.info.x_gridding_delta
dy = dataset.info.y_gridding_delta
kx, ky = genop.wavenumber(nx, ny, dx, dy) # x, y-directed wavenumber matrices
k = np.sqrt(kx**2 + ky**2) # radial wavenumber matrix
indk = k!=0 # index of valid k for the RTP
# Spatial derivatives in the frequency domain ##############################
#
# ... TBD ... make functions for deriv_x, _y, _z of order n of a potential field ?
#
# x-directed 1st derivative
dval_dxTF = valTF*1j*kx # x-directed gradient [Blakely 1996, eq12.15, p324]
# y-directed 1st derivative
dval_dyTF = valTF*1j*ky # y-directed gradient [Blakely 1996, eq12.16, p324]
# vertical 1st derivative
dval_dzTF = valTF*k # vertical gradient [Blakely 1996, eq12.13, p323]
# Potential field derivatives in the spatial domain ########################
dval_dx = np.real(np.fft.ifft2(dval_dxTF))
dval_dy = np.real(np.fft.ifft2(dval_dyTF))
dval_dz = np.real(np.fft.ifft2(dval_dzTF))
# Analytic signal amplitude in the spatial domain ##########################
A = np.sqrt( dval_dx**2 + dval_dy**2 + dval_dz**2)
# Unfilling dataset ########################################################
A[nan_idx] = np.nan
dataset.data.z_image = A
return dataset
def magconfigconversion(dataset, fromconfig, toconfig, apod=0, FromBottomSensorAlt=0.3, FromTopSensorAlt=1.0, ToBottomSensorAlt=0.3, ToTopSensorAlt=1.0, inclination=65, declination=0, azimuth=0, magazimuth=None):
''' Conversion between the different sensors configurations.
cf. :meth:`~geophpy.dataset.DataSet.magconfigconversion`
'''
##########
##
## Original code has been re-implemented in a less fortranic way
##
############
##
## case, sense = _definecaseandsense(prosptechused, prosptechsim)
##
## if ((case != None) and (sense != None)):
##
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## apodisation2d(val, apod)
##
## # complex conversion, val[][] -> cval[][]
## cval = np.array(val, dtype=complex)
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
##
## if (case >= 2):
## # deg->rad angle conversions
## rinc = (inclineangle*np.pi)/180 # inclination in radians
## ralpha = (alphaangle*np.pi)/180 # alpha angle in radians
## cosu = np.absolute(np.cos(rinc) * np.cos(ralpha))
## cosv = np.absolute(np.cos(rinc) * np.sin(ralpha))
## cosz = np.sin(rinc)
##
## for ix in range(nx): # for each column
## for iy in range(ny): # for each line
## if ((ix != 0) or (iy != 0)): # if not first point of array
## cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
## cz = np.sqrt(np.square(cu) + np.square(cv))
##
## if (case == 1):
## c1 = (downsensoraltsim - downsensoraltused)*2.*np.pi*cz
## c2 = (upsensoraltsim - downsensoraltsim)*2.*np.pi*cz
## c = np.exp(-c1) - np.exp(-c2)
## elif (case == 2):
## c1 = (downsensoraltsim - downsensoraltused)*2.*np.pi*cz
## c2 = (upsensoraltsim - downsensoraltsim)*2.*np.pi*cz
## cred = complex(cz*cosz, cu*cosu + cv*cosv)
## c = ((np.exp(-c1) - np.exp(-c2))*cz)/cred
## else : # case = 3
## cred = complex(cz*cosz, cu*cosu + cv*cosv)
## c = cz/cred
##
## if (sense == 0):
## coef = c
## else:
## coef = 1./c
##
## cvalfft[iy][ix] = cvalfft[iy][ix] * coef
##
## # FFT inversion
## icvalfft = np.fft.ifft2(cvalfft)
## val = icvalfft.real
##
## val[nan_indexes] = np.nan # set the initial 'nan' values, as in the initial array
##
## dataset.data.z_image = val # to get the 'z_image' array in (x,y) format.
val = dataset.data.z_image
# Transformation to sprectral Domain #######################################
# Apodization before FT
if (apod > 0):
genop.apodisation2d(val, apod)
# Filling NaNs
valnan = np.copy(val)
nan_idx = np.isnan(valnan) # index of NaNs in the original dataset
valfill = genop.fillnanvalues(valnan, indexout=False) # Filled dataset
## # De-meaning
## valmean = np.nanmean(val)
## valfill = valfill-valmean
# Fourier Transform computation
valTF = np.fft.fft2(valfill) # Frequency domain
# Magnetic azimuth #########################################################
## The angle between the survey profile direction and the ambient magnetic
## field is phi = declination-azimuth.
## if the given angle is directly the angle between the ambient magnetic
## field the survey direction (phi), we assume
## declinaison = 0 so -phi = azimuth
if magazimuth is not None:
# ambient field
phi = magazimuth # given azimuth is magnetic azimuth
declination = 0
azimuth = -phi
# Wavenumbers computation #################################################
ny, nx = val.shape
dx = dataset.info.x_gridding_delta
dy = dataset.info.y_gridding_delta
kx, ky = genop.wavenumber(nx, ny, dx, dy) # x, y-directed wavenumber matrices
k = np.sqrt(kx**2 + ky**2) # radial wavenumber matrix
indk = k!=0 # index k valid for the RTP
# Configuration transformation ###############################################
###
# ...TBD... use a dictionnary to choose the convertion method ?
#
# _gradmagfield_conversion_chooser(config_in, config_out)
#
###
# Total-field to total-field vertical gradient
if fromconfig=='TotalField' and toconfig=='TotalFieldGradient':
z0 = FromBottomSensorAlt
znew = ToBottomSensorAlt
separation = ToTopSensorAlt - ToBottomSensorAlt
F = _field_to_grad(separation, k, z0=z0, znew=znew)
# Total-field vertical gradient to total-field
elif fromconfig=='TotalFieldGradient' and toconfig=='TotalField':
z0 = FromBottomSensorAlt
znew = ToBottomSensorAlt
separation = FromTopSensorAlt - FromBottomSensorAlt
F = _grad_to_field(separation, k, z0=z0, znew=znew)
# Total-field vertical gradient to fluxgate
elif fromconfig=='TotalFieldGradient' and toconfig=='Fluxgate':
F = _grad_to_fluxgate(inclination, declination, azimuth, kx, ky)
# Fluxgate to total-field vertical gradient
elif fromconfig=='Fluxgate' and toconfig=='TotalFieldGradient':
F = _fluxgate_to_grad(inclination, declination, azimuth, kx, ky)
# Total-field to fluxgate
elif fromconfig=='TotalField' and toconfig=='Fluxgate':
z0 = FromBottomSensorAlt
znew = ToBottomSensorAlt
separation = ToTopSensorAlt - ToBottomSensorAlt
F = _field_to_fluxgate(inclination, declination, azimuth, kx, ky, separation, z0=z0, znew=znew)
# Fluxgate to total-field
elif fromconfig=='Fluxgate' and toconfig=='TotalField':
z0 = FromBottomSensorAlt
znew = ToBottomSensorAlt
separation = FromTopSensorAlt - FromBottomSensorAlt
F = _fluxgate_to_field(inclination, declination, azimuth, kx, ky, separation, z0=z0, znew=znew)
# Unknown configuration conversion
else:
F = 1
# Error unknown conversion
# Applying filter###########################################################
valTF_filt = valTF*F
# Transformation nack to spatial domain ####################################
valfilt = np.fft.ifft2(valTF_filt) # Spatial domain
valfilt = np.real(valfilt) #+ valmean # Re-meaning
valfilt[nan_idx] = np.nan # unfilled dataset
dataset.data.z_image = valfilt
return dataset
##def _definecaseandsense( prosptechused, prosptechsim):
## '''
## define case and sense of conversion
## case 1 = "Magnetic field" <-> "Magnetic field gradient"
## case 2 = "Magnetic field" <-> "Vertical component gradient"
## case 3 = "Magnetic field gradient" <-> "Vertical component gradient"
## '''
##
## case = None
## sense = None
## if ((prosptechused == prosptechlist[0]) and (prosptechsim == prosptechlist[1])) :
## case = 1
## sense = 0
## elif ((prosptechused == prosptechlist[0]) and (prosptechsim == prosptechlist[2])) :
## case = 2
## sense = 0
## elif ((prosptechused == prosptechlist[1]) and (prosptechsim == prosptechlist[0])) :
## case = 1
## sense = 1
## elif ((prosptechused == prosptechlist[1]) and (prosptechsim == prosptechlist[2])) :
## case = 3
## sense = 0
## elif ((prosptechused == prosptechlist[2]) and (prosptechsim == prosptechlist[0])) :
## case = 2
## sense = 1
## elif ((prosptechused == prosptechlist[2]) and (prosptechsim == prosptechlist[1])) :
## case = 3
## sense = 1
##
## return case, sense
##def _gradmagfield_conversion_chooser(config_in, config_out):
## '''
## Return the appropriate configuration converter.
##
## Parameters
## ----------
##
## config_in, config_out : str, {"TotalField", "FieldGradient", "VerticalComponentGradient"} (list from getsensorconfiglist())
## Input and ouput magnetic survey configuration.
##
## '''
##
## # Checking for unvalid survey configuration
## valid_config = ', '.join(getsensorconfiglist())
## for config in [config_in, config_out]:
## if config not in getsensorconfiglist():
## raise ValueError(('Invalid survey configuration encountered. '
## 'Valid survey configurations are %s, '
## 'but %s encountered') %(valid_config, config)
## )
##
## # Converter chooser definition
## config_chooser = {
## 'TotalField' : 'Field',
## 'TotalFieldGradient' : 'Grad',
## 'Fluxgate' : 'Fluxgate',
## }
##
## converter_chooser ={
## 'FieldToGrad' : _field_to_grad,
## 'GradToField' : _grad_to_field,
## 'FieldToFluxgate' : _field_to_fluxgate,
## 'FluxgateToField' : _vertical_to_field,
## 'GradToFluxgate' : _grad_to_fluxgate,
## 'FluxgateToGrad' : _fluxgate_to_grad,
## }
##
## # Choosing input configuration
## FromConfig = config_chooser[config_in]
## ToConfig = config_chooser[config_out]
## conversion_type = ''.join([FromConfig,'To',ToConfig])
##
## return converter_chooser[conversion_type]
def susceptibility(dataset, prosptech, apod=0, downsensoraltitude = 0.3, upsensoraltitude = 1.0, calculationdepth=.0, stratumthickness=1.0, inclineangle = 65, alphaangle = 0):
'''
Dataset magnetic susceptibilty of the equivalent stratum.
cf. :meth:`~geophpy.dataset.DataSet.susceptibility`
'''
## val = dataset.data.z_image
##
## # Calculation of lines and columns numbers
## ny = len(val)
## nx = len(val[0])
## # Calculation of total values number
## nt = nx * ny
##
## nan_indexes = np.isnan(val) # searches the 'nan' indexes in the initial array
## if(np.isnan(val).any()): # if at least 1 'nan' is detected in the array,
##
## x = np.linspace(dataset.info.x_min, dataset.info.x_max, nx, endpoint=True)
## y = np.linspace(dataset.info.y_min, dataset.info.y_max, ny, endpoint=True)
## _fillnanvalues(x, y, val.T) # fills the 'nan' values by interpolated values
##
## if (apod > 0):
## genop.apodisation2d(val, apod)
##
## # complex conversion, val[][] -> cval[][]
## coef = 1./(400.*np.pi)
## cval = np.array(val*coef, dtype=complex)
##
## # fast fourier series calculation
## cvalfft = np.fft.fft2(cval)
##
## # filter application
## # deg->rad angle conversions
## rinc = (inclineangle*np.pi)/180 # inclination in radians
## ralpha = (alphaangle*np.pi)/180 # alpha angle in radians
##
## cosu = np.absolute(np.cos(rinc) * np.cos(ralpha))
## cosv = np.absolute(np.cos(rinc) * np.sin(ralpha))
## cosz = np.sin(rinc)
##
## dz = downsensoraltitude - upsensoraltitude
## zp = calculationdepth + downsensoraltitude
##
## deltax = dataset.info.x_gridding_delta
## deltay = dataset.info.y_gridding_delta
##
## if (inclineangle == 0):
## teta = np.pi/2
## elif (inclineangle == 90) : # pi/2
## teta = np.pi
## else :
## teta = np.pi + np.arctan(-2/np.tan(rinc))
# Field Module, medium field along I inclination calcultation if bipolar field
FM = 14.722 * (4*(np.square(np.cos(teta)) + np.square(np.sin(teta))))
cvalfft[0][0] = 0.
for ix in range(nx): # for each column
for iy in range(ny): # for each line
if ((ix != 0) or (iy != 0)): # if not first point of array
cu, cv = _freq(ix, iy, deltax, deltay, nx, ny) # complex u and v calculation
# continuation
cz = np.sqrt(np.square(cu) + np.square(cv))
cvalfft[iy][ix] = cvalfft[iy][ix] * complex(np.exp(2*np.pi*cz*zp), 0.)
# pole reduction with potential calculation(and not with field as standard pole reduction)
cred = complex(cz*cosz, cu*cosu + cv*cosv)
cvalfft[iy][ix] = (cvalfft[iy][ix] * complex(cz,0))/(2*np.pi*np.square(cred))
if (prosptech == prosptechlist[1]): # if prospection technic is magnetic field gradient
cvalfft[iy][ix] = cvalfft[iy][ix] / (1-np.exp(dz*cz))
# FFT inversion
icvalfft = np.fft.ifft2(cvalfft)
# Equivalent stratum thickness
val = (icvalfft.real*2*100000)/(FM*stratumthickness) # 100000 because susceptibiliy in 10^-5 SI
val[nan_indexes] = np.nan # set the initial 'nan' values, as in the initial array
dataset.data.z_image = val # to get the 'z_image' array in (x,y) format.
def getsensorconfiglist():
''' Returns the list of available magnetic sensor configurations. '''
return sensorconfiglist
def getstructuralindexlist():
''' Returns the list of available tructural index for Euler deconvolution. '''
return structuralindexlist
##def getprosptechlist():
## '''
## Get list of prospection technicals availables
##
## Returns : list of prospection technicals
##
## '''
##
## return prosptechlist
def _continu(z, k, direction='auto'):
r''' Upward/Downward continuation operator in the spectral domain.
Parameters
----------
z : float
The continuation distance.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }.
direction : str {'auto', 'up', 'down'}, optional
Direction of the continuation. If 'auto', the direction is determined
from the continuation distance sign (if z>0, 'up'; if z<0, 'down').
'''
if direction=='auto':
if z>=0:
direction = 'up'
else:
direction = 'down'
dir_selector = {'up': _contiup, 'down': _contidwn}
return dir_selector[direction](z,k)
def _contiup(z, k):
r''' Upward continuation operator in the spectral domain.
Parameters
----------
z : float
The continuation distance.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
'''
dz = np.abs(z)
return np.exp(-dz*k) # [Blakely 1996, eq12.8, p317]
def _contidwn(z, k):
r''' Downward continuation operator in the spectral domain.
Parameters
----------
z : float
The continuation distance.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
'''
dz = np.abs(z)
return np.exp(+dz*k) # [Blakely 1996, p320]
def _field_to_grad(separation, k, z0=None, znew=None):
r''' Total-field to gradient operator.
Total-field magnitude to vertical gradient of the total-field magnitude
operator in the spectral domain.
The vertical gradient of the total-field (magnitude) is a simple subtraction of two
total-field (magnitude) measures at two different heights.
Parameters
----------
separation : float
Top-Bottom sensors separation.
If **separation > 0**, the classic **bottom-top** gradient operator is returned.
If **separation < 0**, the **top-bottom** gradient operator is returned.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
z0 : float
Bottom sensor altitude.
znew : float
Bottom sensor new wanted altitude for the gradient.
Returns
-------
F : array
The total-field to gradient operator in the spectral domain.
Notes
-----
If current and new survey altitudes are given, the vertical gradient of the
total-field is computed using three simple steps:
1. simulation of the bottom sensor total-field to the new altitude
by continuation;
2. simulation of the top sensor total-field to the new altitude + separation
by continuation;
3. subtraction of the simulated top and bottom sensor total-field.
References
----------
See also
--------
_grad_to_field, _grad_to_fluxgate, _fluxgate_to_grad, _field_to_fluxgate, _fluxgate_to_field
'''
# top-bottom sensor separation
ds = separation
# Changes in altitude ######################################################
if z0 is not None and znew is not None:
# Bottom sensor distance to new altitude
dh = znew-z0
# bottom sensor at new altitude - top sensor (with new altitude)
F = _continu(dh, k) - _continu(dh+ds, k)
# No changes in altitude ###################################################
else:
# bottom sensor - top sensor
F = 1 - _continu(dz, k)
return F
def _grad_to_field(separation, k, z0=None, znew=None):
r''' Gradient to total-field operator.
Vertical gradient of the total-filed magnitude to
magnitude of the total-field operator in the spectral domain.
Parameters
----------
separation : float
Sensor separation.
If **separation > 0**, the classic **bottom-top** gradient operator is returned.
If **separation < 0**, the **top-bottom** gradient operator is returned.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
z0 : float
Bottom sensor altitude.
znew : float
Bottom sensor new wanted altitude for the gradient.
Returns
-------
F : array
The gradient to total-field operator in the spectral domain.
Notes
-----
References
----------
See also
--------
_field_to_grad, grad_to_fluxgate, _fluxgate_to_grad, _field_to_fluxgate, _fluxgate_to_field
'''
indk = k!=0 # index k valid for the RTP
F = np.empty(k.shape, dtype=complex)
F[indk] = ( _field_to_grad(separation, k, z0=z0, znew=znew)[indk] )**-1
return F
def _grad_to_fluxgate(inclination, declination, azimuth, kx, ky):
''' Gradient to fluxgate operator.
Vertical gradient of the total-filed magnitude to fluxgate
(gradient of the vertical component of the field) operator
in the spectral domain.
Parameters
----------
inclination : float,
Inclination in degrees positive below horizontal.
declination : float,
Declination in degrees positive east of geographic north.
azimuth : float,
Azimuth of x axis in degrees positive east of north.
kx, ky : array_like
The wavenumbers coordinates in the x and y-directions.
Returns
-------
F : array
The gradient to fluxgate operator in the spectral domain.
Notes
-----
See also
--------
_fluxgate_to_grad, _field_to_grad, _grad_to_field, _field_to_fluxgate, _fluxgate_to_field
'''
# Unit-vector components of the ambient field
fx, fy, fz = _dircos(inclination, declination, azimuth)
# Radial wavenumber matrix
k = np.sqrt(kx**2 + ky**2)
indk = k!=0 # index k valid for the RTP
# RTP operator for the anomaly of the vertical component
# from [Tabbagh et al 1997]
F = np.empty(k.shape, dtype=complex)
F[indk] = k[indk] / ( k[indk]*fz + 1j*(kx[indk]*fx + ky[indk]*fy) )
return F
def _fluxgate_to_grad(inclination, declination, azimuth, kx, ky):
''' Fluxgate to gradient operator.
Fluxgate (gradient of the vertical component of the field) to
vertical gradient of the total-filed magnitude operator
in the spectral domain.
Parameters
----------
inclination : float,
Inclination in degrees positive below horizontal.
declination : float,
Declination in degrees positive east of geographic north.
azimuth : float,
Azimuth of x axis in degrees positive east of north.
kx, ky : array_like
The wavenumbers coordinates in the x and y-directions.
Returns
-------
F : array
The fluxgate to gradient operator in the spectral domain.
Notes
-----
See also
--------
_grad_to_fluxgate, _field_to_grad, _grad_to_field, _field_to_fluxgate, _fluxgate_to_field
'''
indk = k!=0 # index k valid for the RTP
F = np.empty(k.shape, dtype=complex)
F[indk] = ( _grad_to_fluxgate(inclination, declination, azimuth, kx, ky)[indk] )**-1
return F
def _field_to_fluxgate(inclination, declination, azimuth, kx, ky, separation, z0=None, znew=None):
''' Total-field to fluxgate operator.
Total-field magnitude to fluxgate (gradient of the vertical
component of the field) operator in the spectral domain.
Parameters
----------
inclination : float,
Inclination in degrees positive below horizontal.
declination : float,
Declination in degrees positive east of geographic north.
azimuth : float,
Azimuth of x axis in degrees positive east of north.
kx, ky : array_like
The wavenumbers coordinates in the x and y-directions.
separation : float
Sensor separation.
If **separation > 0**, the classic **bottom-top** gradient operator is returned.
If **separation < 0**, the **top-bottom** gradient operator is returned.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
z0 : float
Bottom sensor altitude.
znew : float
Bottom sensor new wanted altitude for the gradient.
Returns
-------
F : array
The field to fluxgate operator in the spectral domain.
See also
--------
_fluxgate_to_field, _field_to_grad, _grad_to_field, _grad_to_fluxgate, _fluxgate_to_grad
'''
# Radial wavenumber matrix
k = np.sqrt(kx**2 + ky**2)
# Field to Grad Conversion
F_field_grad = _field_to_grad(separation, k, z0=z0, znew=znew)
# Grad to Fluxgate Conversion
F_grad_flux = _grad_to_fluxgate(inclination, declination, azimuth, kx, ky)
# Field to Fluxgate Conversion
F = F_field_grad * F_grad_flux
return F
def _fluxgate_to_field(inclination, declination, azimuth, kx, ky, separation, z0=None, znew=None):
''' Fluxgate to total-field operator.
Fluxgate (gradient of the vertical component of the field) to
total-field operator in the spectral domain.
Parameters
----------
inclination : float,
Inclination in degrees positive below horizontal.
declination : float,
Declination in degrees positive east of geographic north.
azimuth : float,
Azimuth of x axis in degrees positive east of north.
kx, ky : array_like
The wavenumbers coordinates in the x and y-directions.
separation : float
Sensor separation.
If **separation > 0**, the classic **bottom-top** gradient operator is returned.
If **separation < 0**, the **top-bottom** gradient operator is returned.
k : array
The radial wavenumber (:math:`k = \sqrt{k_x^2 + k_y^2 }`.
z0 : float
Bottom sensor altitude.
znew : float
Bottom sensor new wanted altitude for the gradient.
Returns
-------
F : array
The fluxgate to field operator in the spectral domain.
See also
--------
_field_to_fluxgate, _field_to_grad, _grad_to_field, _grad_to_fluxgate, _fluxgate_to_grad
'''
indk = k!=0 # index k valid for the RTP
F = np.empty(k.shape, dtype=complex)
F = ( _grad_to_fluxgate(inclination, declination, azimuth, kx, ky) )**-1
F[indk] = ( _field_to_fluxgate(inclination, declination, azimuth, kx, ky, separation, z0=z0, znew=znew)[indk] )**-1
return F
def _dircos(inclination, declination, azimuth):
'''
Computes the direction cosines (unit-vector components) from
inclination, declination and azimuth.
Parameters
----------
inclination : float,
Inclination in degrees positive below horizontal.
declination : float,
Declination in degrees positive east of geographic north.
azimuth : float,
Azimuth of x axis in degrees positive east of north.
Returns
-------
vx, vy, vz : array_like,
The three unit-vector components.
Notes
-----
This function is a direct adaptation from the Subroutine B.9.
"Subroutine to calculate the three direction cosines of a vector from
its inclination and declination" in (Blakely, 96)[#]_.
References
----------
.. [#] Blakely R. J. 1996.
Potential Theory in Gravity and Magnetic Applications.
Appendix B, p381.
Cambridge University Press.
'''
# Conversion of angles to radians
incl, decl, azim = np.deg2rad([inclination, declination, azimuth])
# Unit-vector components calculation
vx = 1*np.cos(incl)*np.cos(decl-azim) # x-direction cosine
vy = 1*np.cos(incl)*np.sin(decl-azim) # y-direction cosine
vz = 1*np.sin(incl) # z-direction cosine
return np.array([vx, vy, vz])
def _freq(ix, iy, deltax, deltay, nc, nl):
''' Calculation of spatial frequencies u and v. '''
nyd = 1 + nl/2
nxd = 1 + nc/2
if (iy < nyd):
cv = (float(iy))/((nl-1)*deltay)
else:
cv = float(iy-nl)/((nl-1)*deltay)
if (ix < nxd):
cu = (float(ix))/((nc-1)*deltax)
else:
cu = float(ix-nc)/((nc-1)*deltax)
return cu, cv
def _fillnanvalues(xi, yi, val):
'''
Fills the 'nan' values by interpolated values using simple spline interpolation method !
'''
for profile in val: # for each profile,
if (np.isnan(profile).any()): # if one 'nan' at least in the profile, does the completion
nan_indexes = np.isnan(profile)
data_indexes = np.logical_not(nan_indexes)
valid_data = profile[data_indexes]
interpolated = np.interp(nan_indexes.nonzero()[0], data_indexes.nonzero()[0], valid_data)
profile[nan_indexes] = interpolated | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/nodes/AttributeNodes.py | from .AttributeLookupNodes import ExpressionAttributeLookup
from .ExpressionBases import (
ExpressionChildHavingBase,
ExpressionChildrenHavingBase,
)
from .NodeBases import StatementChildHavingBase, StatementChildrenHavingBase
from .NodeMakingHelpers import (
makeCompileTimeConstantReplacementNode,
wrapExpressionWithNodeSideEffects,
)
class StatementAssignmentAttribute(StatementChildrenHavingBase):
"""Assignment to an attribute.
Typically from code like: source.attribute_name = expression
Both source and expression may be complex expressions, the source
is evaluated first. Assigning to an attribute has its on slot on
the source, which gets to decide if it knows it will work or not,
and what value it will be.
"""
__slots__ = ("attribute_name",)
kind = "STATEMENT_ASSIGNMENT_ATTRIBUTE"
named_children = ("source", "expression")
def __init__(self, expression, attribute_name, source, source_ref):
StatementChildrenHavingBase.__init__(
self,
values={"expression": expression, "source": source},
source_ref=source_ref,
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionSetAttribute(
set_node=self,
attribute_name=self.attribute_name,
value_node=self.subnode_source,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute assignment statement"
class StatementDelAttribute(StatementChildHavingBase):
"""Deletion of an attribute.
Typically from code like: del source.attribute_name
The source may be complex expression. Deleting an attribute has its on
slot on the source, which gets to decide if it knows it will work or
not, and what value it will be.
"""
kind = "STATEMENT_DEL_ATTRIBUTE"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionDelAttribute(
set_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute del statement"
def makeExpressionAttributeLookup(expression, attribute_name, source_ref):
from .AttributeNodesGenerated import attribute_classes
attribute_class = attribute_classes.get(attribute_name)
if attribute_class is not None:
assert attribute_class.attribute_name == attribute_name
return attribute_class(expression=expression, source_ref=source_ref)
else:
return ExpressionAttributeLookup(
expression=expression, attribute_name=attribute_name, source_ref=source_ref
)
class ExpressionAttributeLookupSpecial(ExpressionAttributeLookup):
"""Special lookup up an attribute of an object.
Typically from code like this: with source: pass
These directly go to slots, and are performed for with statements
of Python2.7 or higher.
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL"
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttributeSpecial(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
class ExpressionBuiltinGetattr(ExpressionChildrenHavingBase):
"""Built-in "getattr".
Typical code like this: getattr(object_arg, name, default)
The default is optional, but computed before the lookup is done.
"""
kind = "EXPRESSION_BUILTIN_GETATTR"
named_children = ("expression", "name", "default")
def __init__(self, expression, name, default, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "name": name, "default": default},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
default = self.subnode_default
if default is None or not default.mayHaveSideEffects():
attribute = self.subnode_name
attribute_name = attribute.getStringValue()
if attribute_name is not None:
source = self.subnode_expression
if source.isKnownToHaveAttribute(attribute_name):
# If source has side effects, they must be evaluated, before
# the lookup, meaning, a temporary variable should be assigned.
# For now, we give up in this case.
side_effects = source.extractSideEffects()
if not side_effects:
result = makeExpressionAttributeLookup(
expression=source,
attribute_name=attribute_name,
source_ref=self.source_ref,
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
return (
result,
"new_expression",
"""Replaced call to built-in 'getattr' with constant \
attribute '%s' to mere attribute lookup"""
% attribute_name,
)
return self, None, None
class ExpressionBuiltinSetattr(ExpressionChildrenHavingBase):
"""Built-in "setattr".
Typical code like this: setattr(source, attribute, value)
"""
kind = "EXPRESSION_BUILTIN_SETATTR"
named_children = ("expression", "attribute", "value")
def __init__(self, expression, name, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name, "value": value},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
# Note: Might be possible to predict or downgrade to mere attribute set.
return self, None, None
class ExpressionBuiltinHasattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_HASATTR"
named_children = ("expression", "attribute")
def __init__(self, expression, name, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
# We do at least for compile time constants optimization here, but more
# could be done, were we to know shapes.
source = self.subnode_expression
if source.isCompileTimeConstant():
attribute = self.subnode_attribute
attribute_name = attribute.getStringValue()
# TODO: Something needs to be done if it has no string value.
if attribute_name is not None:
# If source or attribute have side effects, they must be
# evaluated, before the lookup.
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), attribute_name
),
description="Call to 'hasattr' pre-computed.",
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=source
)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAttributeCheck(ExpressionChildHavingBase):
kind = "EXPRESSION_ATTRIBUTE_CHECK"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
source = self.subnode_expression
# For things that know their attributes, we can statically optimize this
# into true or false, preserving side effects of course.
has_attribute = source.isKnownToHaveAttribute(self.attribute_name)
if has_attribute is not None:
result = makeCompileTimeConstantReplacementNode(
value=has_attribute, node=self, user_provided=False
)
# If source has side effects, they must be evaluated.
result = wrapExpressionWithNodeSideEffects(new_node=result, old_node=source)
return result, "new_constant", "Attribute check has been pre-computed."
# Attribute check is implemented by getting an attribute.
if source.mayRaiseExceptionAttributeLookup(BaseException, self.attribute_name):
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
def getAttributeName(self):
return self.attribute_name | PypiClean |
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/thing_update_request.py | import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.thing_request_actions import ThingRequestActions
from openapi_client.model.thing_request_events import ThingRequestEvents
from openapi_client.model.thing_request_properties import ThingRequestProperties
globals()['ThingRequestActions'] = ThingRequestActions
globals()['ThingRequestEvents'] = ThingRequestEvents
globals()['ThingRequestProperties'] = ThingRequestProperties
class ThingUpdateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'title': (str,), # noqa: E501
'description': (str,), # noqa: E501
'properties': (ThingRequestProperties,), # noqa: E501
'actions': (ThingRequestActions,), # noqa: E501
'events': (ThingRequestEvents,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'title': 'title', # noqa: E501
'description': 'description', # noqa: E501
'properties': 'properties', # noqa: E501
'actions': 'actions', # noqa: E501
'events': 'events', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ThingUpdateRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
title (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
properties (ThingRequestProperties): [optional] # noqa: E501
actions (ThingRequestActions): [optional] # noqa: E501
events (ThingRequestEvents): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value) | PypiClean |
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/plugins/keepkey/plugin.py | from binascii import hexlify, unhexlify
from electrum_zcash.util import bfh, bh2u
from electrum_zcash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum_zcash import constants
from electrum_zcash.i18n import _
from electrum_zcash.plugins import BasePlugin
from electrum_zcash.transaction import deserialize, Transaction
from electrum_zcash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_zcash.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if KeepKey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "ZcashTestnet" if constants.net.TESTNET else "Zcash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/autocomp/csscomp.py | __author__ = "Cody Precord <cprecord@editra.org>"
__cvsid__ = "$Id: csscomp.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#--------------------------------------------------------------------------#
# Imports
import re
import wx
import wx.stc
# Local Imports
import completer
#--------------------------------------------------------------------------#
# Regular Expressions
RE_LINK_PSEUDO = re.compile("a:(link|visited|active|hover|focus)*")
RE_CSS_COMMENT = re.compile("\/\*[^*]*\*+([^/][^*]*\*+)*\/")
RE_CSS_BLOCK = re.compile("\{[^}]*\}")
PSUEDO_SYMBOLS = completer.CreateSymbols([ u'active', u'focus', u'hover',
u'link', u'visited' ],
)
#--------------------------------------------------------------------------#
class Completer(completer.BaseCompleter):
"""CSS Code completion provider"""
def __init__(self, stc_buffer):
super(Completer, self).__init__(stc_buffer)
# Setup
self.SetAutoCompKeys([ord(':'), ord('.') ])
self.SetAutoCompStops(' {}#')
self.SetAutoCompFillups('')
self.SetCallTipKeys([ord('('), ])
self.SetCallTipCancel([ord(')'), wx.WXK_RETURN])
def GetAutoCompList(self, command):
"""Returns the list of possible completions for a command string.
@param command: command lookup is done on
"""
buff = self.GetBuffer()
keywords = buff.GetKeywords()
if command in [None, u'']:
return completer.CreateSymbols(keywords, completer.TYPE_UNKNOWN)
cpos = buff.GetCurrentPos()
cline = buff.GetCurrentLine()
lstart = buff.PositionFromLine(cline)
tmp = buff.GetTextRange(lstart, cpos).rstrip()
# Check for the case of a pseudo class
if IsPsuedoClass(command, tmp):
return PSUEDO_SYMBOLS
# Give some help on some common properties
if tmp.endswith(u':'):
word = GetWordLeft(tmp.rstrip().rstrip(u':'))
comps = PROP_OPTS.get(word, list())
comps = list(set(comps))
comps.sort()
return completer.CreateSymbols(comps, completer.TYPE_PROPERTY)
# Look for if we are completing a tag class
if tmp.endswith(u'.'):
classes = list()
if not buff.IsString(cpos):
txt = buff.GetText()
txt = RE_CSS_COMMENT.sub(u'', txt)
txt = RE_CSS_BLOCK.sub(u' ', txt)
for token in txt.split():
if u'.' in token:
classes.append(token.split(u'.', 1)[-1])
classes = list(set(classes))
classes.sort()
return completer.CreateSymbols(classes, completer.TYPE_CLASS)
return completer.CreateSymbols(keywords, completer.TYPE_UNKNOWN)
def GetCallTip(self, command):
"""Returns the formated calltip string for the command."""
if command == u'url':
return u'url(\'../path\')'
else:
return u''
def ShouldCheck(self, cpos):
"""Should completions be attempted
@param cpos: current buffer position
@return: bool
"""
buff = self.GetBuffer()
rval = True
if buff is not None:
if buff.IsComment(cpos):
rval = False
return rval
#--------------------------------------------------------------------------#
def IsPsuedoClass(cmd, line):
"""Check the line to see if its a link pseudo class
@param cmd: current command
@param line: line of the command
@return: bool
"""
if cmd.endswith(u':'):
token = line.split()[-1]
pieces = token.split(u":")
if pieces[0] == 'a' or pieces[0].startswith('a.'):
return True
return False
def GetWordLeft(line):
"""Get the first valid word to the left of the end of line
@param line: Line text
@return: string
"""
for idx in range(1, len(line)+1):
ch = line[idx*-1]
if ch.isspace() or ch in u'{;':
return line[-1*idx:].strip()
else:
return u''
#--------------------------------------------------------------------------#
# Properties to provide some input help on
PROP_OPTS = { u'border-style' : [u'none', u'hidden', u'dotted', u'dashed',
u'solid', u'double', u'groove', u'ridge',
u'inset', u'outset'],
u'float' : [u'left', u'right', u'none'],
u'font-style' : [u'normal', u'italic', u'oblique'],
u'font-weight' : [u'normal', u'bold', u'lighter', u'bolder'],
u'list-style-type' : [u'none', u'disc', u'circle', u'square',
u'decimal', u'decimal-leading-zero',
u'lower-roman', u'upper-roman',
u'lower-alpha', u'upper-alpha',
u'lower-greek', u'lower-latin', u'hebrew',
u'armenian', u'georgian', u'cjk-ideographic',
u'hiragana', u'katakana',
u'hiragana-iroha', u'katakana-iroha'],
u'text-decoration' : [u'none', u'underline', u'line-through',
u'overline', u'blink'],
u'text-align' : [u'left', u'right', u'center', u'justify'],
u'vertical-align' : [u'baseline', u'sub', u'super', u'top',
u'text-top', u'middle', u'bottom',
u'text-bottom', ]
} | PypiClean |
/Mah_lib-0.1.tar.gz/Mah_lib-0.1/Mah_lib/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Foundations-2.1.0.tar.gz/Foundations-2.1.0/foundations/core.py | #**********************************************************************************************************************
#*** Future imports.
#**********************************************************************************************************************
from __future__ import unicode_literals
#**********************************************************************************************************************
#*** External imports.
#**********************************************************************************************************************
import sys
import time
#**********************************************************************************************************************
#*** Internal imports.
#**********************************************************************************************************************
import foundations.verbose
#**********************************************************************************************************************
#*** Module attributes.
#**********************************************************************************************************************
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER",
"exit",
"wait"]
LOGGER = foundations.verbose.installLogger()
#**********************************************************************************************************************
#*** Module classes and definitions.
#**********************************************************************************************************************
def exit(exitCode=0):
"""
Shuts down current process logging, associated handlers and then exits to system.
:param exitCode: System exit code.
:type exitCode: Integer or String or Object
:note: **exitCode** argument is passed to Python :func:`sys.exit` definition.
"""
LOGGER.debug("> {0} | Exiting current process!".format(__name__))
LOGGER.debug("> Stopping logging handlers and logger!")
for handler in LOGGER.handlers:
foundations.verbose.removeLoggingHandler(handler)
sys.exit(exitCode)
def wait(waitTime):
"""
Halts current process exection for an user defined time.
:param waitTime: Current sleep time in seconds.
:type waitTime: float
:return: Definition success.
:rtype: bool
"""
LOGGER.debug("> Waiting '{0}' seconds!".format(waitTime))
time.sleep(waitTime)
return True | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/contrib/gis/gdal/raster/const.py | from ctypes import (
c_double, c_float, c_int16, c_int32, c_ubyte, c_uint16, c_uint32,
)
# See http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4
GDAL_PIXEL_TYPES = {
0: 'GDT_Unknown', # Unknown or unspecified type
1: 'GDT_Byte', # Eight bit unsigned integer
2: 'GDT_UInt16', # Sixteen bit unsigned integer
3: 'GDT_Int16', # Sixteen bit signed integer
4: 'GDT_UInt32', # Thirty-two bit unsigned integer
5: 'GDT_Int32', # Thirty-two bit signed integer
6: 'GDT_Float32', # Thirty-two bit floating point
7: 'GDT_Float64', # Sixty-four bit floating point
8: 'GDT_CInt16', # Complex Int16
9: 'GDT_CInt32', # Complex Int32
10: 'GDT_CFloat32', # Complex Float32
11: 'GDT_CFloat64', # Complex Float64
}
# A list of gdal datatypes that are integers.
GDAL_INTEGER_TYPES = [1, 2, 3, 4, 5]
# Lookup values to convert GDAL pixel type indices into ctypes objects.
# The GDAL band-io works with ctypes arrays to hold data to be written
# or to hold the space for data to be read into. The lookup below helps
# selecting the right ctypes object for a given gdal pixel type.
GDAL_TO_CTYPES = [
None, c_ubyte, c_uint16, c_int16, c_uint32, c_int32,
c_float, c_double, None, None, None, None
]
# List of resampling algorithms that can be used to warp a GDALRaster.
GDAL_RESAMPLE_ALGORITHMS = {
'NearestNeighbour': 0,
'Bilinear': 1,
'Cubic': 2,
'CubicSpline': 3,
'Lanczos': 4,
'Average': 5,
'Mode': 6,
}
# See http://www.gdal.org/gdal_8h.html#ace76452d94514561fffa8ea1d2a5968c
GDAL_COLOR_TYPES = {
0: 'GCI_Undefined', # Undefined, default value, i.e. not known
1: 'GCI_GrayIndex', # Greyscale
2: 'GCI_PaletteIndex', # Paletted
3: 'GCI_RedBand', # Red band of RGBA image
4: 'GCI_GreenBand', # Green band of RGBA image
5: 'GCI_BlueBand', # Blue band of RGBA image
6: 'GCI_AlphaBand', # Alpha (0=transparent, 255=opaque)
7: 'GCI_HueBand', # Hue band of HLS image
8: 'GCI_SaturationBand', # Saturation band of HLS image
9: 'GCI_LightnessBand', # Lightness band of HLS image
10: 'GCI_CyanBand', # Cyan band of CMYK image
11: 'GCI_MagentaBand', # Magenta band of CMYK image
12: 'GCI_YellowBand', # Yellow band of CMYK image
13: 'GCI_BlackBand', # Black band of CMLY image
14: 'GCI_YCbCr_YBand', # Y Luminance
15: 'GCI_YCbCr_CbBand', # Cb Chroma
16: 'GCI_YCbCr_CrBand', # Cr Chroma, also GCI_Max
}
# Fixed base path for buffer-based GDAL in-memory files.
VSI_FILESYSTEM_BASE_PATH = '/vsimem/'
# Should the memory file system take ownership of the buffer, freeing it when
# the file is deleted? (No, GDALRaster.__del__() will delete the buffer.)
VSI_TAKE_BUFFER_OWNERSHIP = False
# Should a VSI file be removed when retrieving its buffer?
VSI_DELETE_BUFFER_ON_READ = False | PypiClean |
/GENDIS-1.0.14.tar.gz/GENDIS-1.0.14/gendis/docs/_build/html/searchindex.js | Search.setIndex({docnames:["ccc","gendis","index","install","start"],envversion:53,filenames:["ccc.rst","gendis.rst","index.rst","install.rst","start.rst"],objects:{"gendis.genetic":{GeneticExtractor:[1,0,1,""]}},objnames:{"0":["py","class","Python class"]},objtypes:{"0":"py:class"},terms:{"boolean":1,"class":[1,2],"float":1,"import":[1,4],"int":1,For:[0,3,4],The:[1,2],abl:2,accuraci:4,accuracy_scor:4,achiev:2,add:3,add_noise_prob:[1,4],add_shapelet_prob:[1,4],added:1,addition:4,after:1,algorithm:[1,2],all:4,altern:3,ani:0,arrai:1,art:2,axi:[2,4],been:[1,2],befor:1,better:2,both:1,bug:0,calcul:1,can:[0,2,3,4],certain:2,chanc:1,cite:2,classif:2,classifi:2,clone:3,code:4,com:3,construct:2,contact:2,contain:2,contribut:2,correspond:2,creat:[0,2],cross:1,crossov:2,crossover_prob:[1,4],current:3,data:4,data_fil:4,datafil:4,dataset:[1,2],delet:1,descript:4,dict:1,dictionari:1,discrimin:2,dist:3,distanc:[1,2],distances_test:4,distances_train:4,document:4,domain:2,dot:0,drop:4,each:2,end:4,equal:1,evalu:4,everi:1,everywher:3,evolut:1,exampl:1,experienc:0,extract:[1,2],extractor:1,fals:[1,4],fashion:2,featur:[1,4],feel:0,find:1,first:[1,4],fit:[1,2],fit_transform:1,fittest:1,format:4,found:1,free:0,from:[1,3,4],futur:0,gaussian:1,gen:1,gendi:[0,3,4],gener:1,genet:[1,2,4],genetic_extractor:4,geneticextractor:[1,2],get:2,gill:0,git:3,github:3,good:1,has:[1,2],have:[0,1],host:3,http:3,ibcnservic:3,implement:[0,2],improv:1,increas:1,index:[],individu:1,insensit:2,instal:2,issu:0,iter:[1,4],its:2,just:3,label:[1,4],label_map:1,least:4,like:[0,1],linear_model:[1,4],list:4,load:2,logisticregress:[1,4],mai:1,map:1,matric:4,matrix:2,maximum:1,method:1,metric:4,modul:[],mutat:2,n_job:[1,4],n_ts_per_blob:1,nearbi:0,need:4,nois:1,noise_level:1,none:1,norm:[1,4],normal:1,notebook:4,now:0,number:1,numpi:1,object:[1,2],order:4,origin:2,other:2,our:1,over:1,packag:3,page:[],panda:4,paper:0,paramet:[1,2,4],per:1,perform:2,pip:3,pipelin:4,pleas:[0,4],plethora:2,plot:[1,4],popul:[1,2],population_s:[1,4],possibl:4,predict:[2,4],preprocess:2,print:[1,4],probabl:[1,2],project:2,publish:0,pull:0,pypi:3,python3:3,python:3,question:0,quickli:2,random:1,random_walk_blob:1,rang:1,read:4,read_csv:4,refer:[0,4],remove_shapelet_prob:[1,4],repositori:[0,2,3],request:0,requir:3,reshap:1,result:2,run:[1,3],runtim:1,score:1,search:2,seed:1,select:1,seri:2,set:[1,2],shape:1,shapelet:[1,2,4],show:1,shown:2,similar:2,simpl:1,size:[1,2],sklearn:[1,4],small:2,smaller:1,solut:1,some:1,sourc:1,space:2,split:4,start:2,state:2,statist:1,step:4,stop:1,subseri:2,support:3,take:0,target:4,techniqu:2,test:4,test_df:4,than:1,thi:[0,1,2],thread:1,time:2,timeseri:4,train_df:4,transform:4,tslearn:1,two:[1,3],txt:3,ugent:0,use:[1,3],vandewiel:0,vector:4,verbos:[1,4],wait:[1,4],well:4,where:2,whether:1,which:0,would:0,x_test:4,x_train:4,y_test:4,y_train:4,you:[0,3],your:3},titles:["Contributing, Citing and Contact","GENDIS","Welcome to GENDIS\u2019s documentation!","Installation","Getting Started"],titleterms:{cite:0,classifi:4,construct:4,contact:0,contribut:0,creat:4,dataset:4,distanc:4,document:2,fit:4,gendi:[1,2],geneticextractor:4,get:4,indic:[],instal:3,load:4,matrix:4,object:4,preprocess:4,start:4,tabl:[],welcom:2}}) | PypiClean |
/MGP_SDK-1.1.1.tar.gz/MGP_SDK-1.1.1/src/MGP_SDK/monitor_service/interface.py | from MGP_SDK.monitor_service.monitoring import Monitoring
class Interface:
"""
The primary interface for interacting with the Monitoring class.
Args:
username (string) = The username of your user
password (string) = The password associated with your username
client_id (string) = The client id associated with your user
"""
def __init__(self, auth):
self.auth = auth
self.monitoring = Monitoring(self.auth)
def new_monitor(self, source: str, validate=False, **kwargs):
"""
Creates a new monitor
Args:
source (string) = the ID of the event source to listen to
validate (bool) = Binary whether to validate tasking request. Defaults to False
Kwargs:
start_datetime (string) = ISO-8601-formatted datetime string indicating when the monitor should start
end_datetime (string) = ISO-8601-formatted datetime string indicating when the monitor should end
description (string) = A human-friendly description of the monitor
intersects (dict) = A GeoJSON geometry indicating the area of interest for the monitor
match_criteria (dict) = The fields and values to match against; criteria are specified using a JSON object
monitor_notifications (list) = Destination(s) where notifications should be sent
order_templates (list) = Orders to be placed automatically when an event matches the monitor's criteria
Returns:
JSON response
"""
return self.monitoring.create_new_monitor(source, validate, **kwargs)
def toggle_monitor_status(self, monitor_id: str, status: str):
"""
Toggles the 'enabled' status of a monitor
Args:
monitor_id (string) = the ID of the monitor
status (string) = enable or disable
Returns:
Whether the status change has been accepted
Throws:
Exception: If the status provided is already applied to the monitor
"""
current_status = self.get_monitor(monitor_id)['data']['enabled']
if (status == 'enable' and current_status is True) or (status == 'disable' and current_status is False):
raise Exception(f'Monitor {monitor_id} is already {status}d.')
return self.monitoring.toggle_monitor(monitor_id, status)
def get_monitor(self, monitor_id: str):
"""
Retrieves a monitor configuration
Args:
monitor_id (string) = the ID of the monitor
Returns:
JSON response
"""
return self.monitoring.get_monitor_by_id(monitor_id)
def get_monitor_list(self, **kwargs):
"""
Retrieves a list of monitor configurations
Kwargs:
limit (int) = number of monitors to return, defaults to 10
filter (string) | (string(list)) = filter results that match values contained in the given key separated by
a colon
sort (string) = asc (default) or desc
Returns:
JSON response
Throws:
ValueError: If limit is not an int and greater than 0.
Exception: If filter and sort are not formatted properly.
"""
return self.monitoring.monitor_list(**kwargs)
def get_monitor_events(self, monitor_id: str, **kwargs):
"""
Retrieves a list of events for a monitor
Args:
monitor_id (string) = the ID of the monitor
Kwargs:
filter (string) | (string(list)) = filter results that match values contained in the given key separated by
a colon. If multiple filters are needed, provide as a list of filters
sort (string) = asc (default) or desc
Returns:
JSON Response
Throws:
Exception: If filter and sort are not formatted properly.
"""
return self.monitoring.monitor_events_list(monitor_id, **kwargs) | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/mode/vbscript/vbscript.js | CodeMirror.defineMode("vbscript", function(conf, parserConf) {
var ERRORCLASS = 'error';
function wordRegexp(words) {
return new RegExp("^((" + words.join(")|(") + "))\\b", "i");
}
var singleOperators = new RegExp("^[\\+\\-\\*/&\\\\\\^<>=]");
var doubleOperators = new RegExp("^((<>)|(<=)|(>=))");
var singleDelimiters = new RegExp('^[\\.,]');
var brakets = new RegExp('^[\\(\\)]');
var identifiers = new RegExp("^[A-Za-z][_A-Za-z0-9]*");
var openingKeywords = ['class','sub','select','while','if','function', 'property', 'with', 'for'];
var middleKeywords = ['else','elseif','case'];
var endKeywords = ['next','loop','wend'];
var wordOperators = wordRegexp(['and', 'or', 'not', 'xor', 'is', 'mod', 'eqv', 'imp']);
var commonkeywords = ['dim', 'redim', 'then', 'until', 'randomize',
'byval','byref','new','property', 'exit', 'in',
'const','private', 'public',
'get','set','let', 'stop', 'on error resume next', 'on error goto 0', 'option explicit', 'call', 'me'];
//This list was from: http://msdn.microsoft.com/en-us/library/f8tbc79x(v=vs.84).aspx
var atomWords = ['true', 'false', 'nothing', 'empty', 'null'];
//This list was from: http://msdn.microsoft.com/en-us/library/3ca8tfek(v=vs.84).aspx
var builtinFuncsWords = ['abs', 'array', 'asc', 'atn', 'cbool', 'cbyte', 'ccur', 'cdate', 'cdbl', 'chr', 'cint', 'clng', 'cos', 'csng', 'cstr', 'date', 'dateadd', 'datediff', 'datepart',
'dateserial', 'datevalue', 'day', 'escape', 'eval', 'execute', 'exp', 'filter', 'formatcurrency', 'formatdatetime', 'formatnumber', 'formatpercent', 'getlocale', 'getobject',
'getref', 'hex', 'hour', 'inputbox', 'instr', 'instrrev', 'int', 'fix', 'isarray', 'isdate', 'isempty', 'isnull', 'isnumeric', 'isobject', 'join', 'lbound', 'lcase', 'left',
'len', 'loadpicture', 'log', 'ltrim', 'rtrim', 'trim', 'maths', 'mid', 'minute', 'month', 'monthname', 'msgbox', 'now', 'oct', 'replace', 'rgb', 'right', 'rnd', 'round',
'scriptengine', 'scriptenginebuildversion', 'scriptenginemajorversion', 'scriptengineminorversion', 'second', 'setlocale', 'sgn', 'sin', 'space', 'split', 'sqr', 'strcomp',
'string', 'strreverse', 'tan', 'time', 'timer', 'timeserial', 'timevalue', 'typename', 'ubound', 'ucase', 'unescape', 'vartype', 'weekday', 'weekdayname', 'year'];
//This list was from: http://msdn.microsoft.com/en-us/library/ydz4cfk3(v=vs.84).aspx
var builtinConsts = ['vbBlack', 'vbRed', 'vbGreen', 'vbYellow', 'vbBlue', 'vbMagenta', 'vbCyan', 'vbWhite', 'vbBinaryCompare', 'vbTextCompare',
'vbSunday', 'vbMonday', 'vbTuesday', 'vbWednesday', 'vbThursday', 'vbFriday', 'vbSaturday', 'vbUseSystemDayOfWeek', 'vbFirstJan1', 'vbFirstFourDays', 'vbFirstFullWeek',
'vbGeneralDate', 'vbLongDate', 'vbShortDate', 'vbLongTime', 'vbShortTime', 'vbObjectError',
'vbOKOnly', 'vbOKCancel', 'vbAbortRetryIgnore', 'vbYesNoCancel', 'vbYesNo', 'vbRetryCancel', 'vbCritical', 'vbQuestion', 'vbExclamation', 'vbInformation', 'vbDefaultButton1', 'vbDefaultButton2',
'vbDefaultButton3', 'vbDefaultButton4', 'vbApplicationModal', 'vbSystemModal', 'vbOK', 'vbCancel', 'vbAbort', 'vbRetry', 'vbIgnore', 'vbYes', 'vbNo',
'vbCr', 'VbCrLf', 'vbFormFeed', 'vbLf', 'vbNewLine', 'vbNullChar', 'vbNullString', 'vbTab', 'vbVerticalTab', 'vbUseDefault', 'vbTrue', 'vbFalse',
'vbEmpty', 'vbNull', 'vbInteger', 'vbLong', 'vbSingle', 'vbDouble', 'vbCurrency', 'vbDate', 'vbString', 'vbObject', 'vbError', 'vbBoolean', 'vbVariant', 'vbDataObject', 'vbDecimal', 'vbByte', 'vbArray'];
//This list was from: http://msdn.microsoft.com/en-us/library/hkc375ea(v=vs.84).aspx
var builtinObjsWords = ['WScript', 'err', 'debug', 'RegExp'];
var knownProperties = ['description', 'firstindex', 'global', 'helpcontext', 'helpfile', 'ignorecase', 'length', 'number', 'pattern', 'source', 'value', 'count'];
var knownMethods = ['clear', 'execute', 'raise', 'replace', 'test', 'write', 'writeline', 'close', 'open', 'state', 'eof', 'update', 'addnew', 'end', 'createobject', 'quit'];
var aspBuiltinObjsWords = ['server', 'response', 'request', 'session', 'application'];
var aspKnownProperties = ['buffer', 'cachecontrol', 'charset', 'contenttype', 'expires', 'expiresabsolute', 'isclientconnected', 'pics', 'status', //response
'clientcertificate', 'cookies', 'form', 'querystring', 'servervariables', 'totalbytes', //request
'contents', 'staticobjects', //application
'codepage', 'lcid', 'sessionid', 'timeout', //session
'scripttimeout']; //server
var aspKnownMethods = ['addheader', 'appendtolog', 'binarywrite', 'end', 'flush', 'redirect', //response
'binaryread', //request
'remove', 'removeall', 'lock', 'unlock', //application
'abandon', //session
'getlasterror', 'htmlencode', 'mappath', 'transfer', 'urlencode']; //server
var knownWords = knownMethods.concat(knownProperties);
builtinObjsWords = builtinObjsWords.concat(builtinConsts);
if (conf.isASP){
builtinObjsWords = builtinObjsWords.concat(aspBuiltinObjsWords);
knownWords = knownWords.concat(aspKnownMethods, aspKnownProperties);
};
var keywords = wordRegexp(commonkeywords);
var atoms = wordRegexp(atomWords);
var builtinFuncs = wordRegexp(builtinFuncsWords);
var builtinObjs = wordRegexp(builtinObjsWords);
var known = wordRegexp(knownWords);
var stringPrefixes = '"';
var opening = wordRegexp(openingKeywords);
var middle = wordRegexp(middleKeywords);
var closing = wordRegexp(endKeywords);
var doubleClosing = wordRegexp(['end']);
var doOpening = wordRegexp(['do']);
var noIndentWords = wordRegexp(['on error resume next', 'exit']);
var comment = wordRegexp(['rem']);
function indent(_stream, state) {
state.currentIndent++;
}
function dedent(_stream, state) {
state.currentIndent--;
}
// tokenizers
function tokenBase(stream, state) {
if (stream.eatSpace()) {
return 'space';
//return null;
}
var ch = stream.peek();
// Handle Comments
if (ch === "'") {
stream.skipToEnd();
return 'comment';
}
if (stream.match(comment)){
stream.skipToEnd();
return 'comment';
}
// Handle Number Literals
if (stream.match(/^((&H)|(&O))?[0-9\.]/i, false) && !stream.match(/^((&H)|(&O))?[0-9\.]+[a-z_]/i, false)) {
var floatLiteral = false;
// Floats
if (stream.match(/^\d*\.\d+/i)) { floatLiteral = true; }
else if (stream.match(/^\d+\.\d*/)) { floatLiteral = true; }
else if (stream.match(/^\.\d+/)) { floatLiteral = true; }
if (floatLiteral) {
// Float literals may be "imaginary"
stream.eat(/J/i);
return 'number';
}
// Integers
var intLiteral = false;
// Hex
if (stream.match(/^&H[0-9a-f]+/i)) { intLiteral = true; }
// Octal
else if (stream.match(/^&O[0-7]+/i)) { intLiteral = true; }
// Decimal
else if (stream.match(/^[1-9]\d*F?/)) {
// Decimal literals may be "imaginary"
stream.eat(/J/i);
// TODO - Can you have imaginary longs?
intLiteral = true;
}
// Zero by itself with no other piece of number.
else if (stream.match(/^0(?![\dx])/i)) { intLiteral = true; }
if (intLiteral) {
// Integer literals may be "long"
stream.eat(/L/i);
return 'number';
}
}
// Handle Strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenStringFactory(stream.current());
return state.tokenize(stream, state);
}
// Handle operators and Delimiters
if (stream.match(doubleOperators)
|| stream.match(singleOperators)
|| stream.match(wordOperators)) {
return 'operator';
}
if (stream.match(singleDelimiters)) {
return null;
}
if (stream.match(brakets)) {
return "bracket";
}
if (stream.match(noIndentWords)) {
state.doInCurrentLine = true;
return 'keyword';
}
if (stream.match(doOpening)) {
indent(stream,state);
state.doInCurrentLine = true;
return 'keyword';
}
if (stream.match(opening)) {
if (! state.doInCurrentLine)
indent(stream,state);
else
state.doInCurrentLine = false;
return 'keyword';
}
if (stream.match(middle)) {
return 'keyword';
}
if (stream.match(doubleClosing)) {
dedent(stream,state);
dedent(stream,state);
return 'keyword';
}
if (stream.match(closing)) {
if (! state.doInCurrentLine)
dedent(stream,state);
else
state.doInCurrentLine = false;
return 'keyword';
}
if (stream.match(keywords)) {
return 'keyword';
}
if (stream.match(atoms)) {
return 'atom';
}
if (stream.match(known)) {
return 'variable-2';
}
if (stream.match(builtinFuncs)) {
return 'builtin';
}
if (stream.match(builtinObjs)){
return 'variable-2';
}
if (stream.match(identifiers)) {
return 'variable';
}
// Handle non-detected items
stream.next();
return ERRORCLASS;
}
function tokenStringFactory(delimiter) {
var singleline = delimiter.length == 1;
var OUTCLASS = 'string';
return function(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^'"]/);
if (stream.match(delimiter)) {
state.tokenize = tokenBase;
return OUTCLASS;
} else {
stream.eat(/['"]/);
}
}
if (singleline) {
if (parserConf.singleLineStringErrors) {
return ERRORCLASS;
} else {
state.tokenize = tokenBase;
}
}
return OUTCLASS;
};
}
function tokenLexer(stream, state) {
var style = state.tokenize(stream, state);
var current = stream.current();
// Handle '.' connected identifiers
if (current === '.') {
style = state.tokenize(stream, state);
current = stream.current();
if (style.substr(0, 8) === 'variable' || style==='builtin' || style==='keyword'){//|| knownWords.indexOf(current.substring(1)) > -1) {
if (style === 'builtin' || style === 'keyword') style='variable';
if (knownWords.indexOf(current.substr(1)) > -1) style='variable-2';
return style;
} else {
return ERRORCLASS;
}
}
return style;
}
var external = {
electricChars:"dDpPtTfFeE ",
startState: function() {
return {
tokenize: tokenBase,
lastToken: null,
currentIndent: 0,
nextLineIndent: 0,
doInCurrentLine: false,
ignoreKeyword: false
};
},
token: function(stream, state) {
if (stream.sol()) {
state.currentIndent += state.nextLineIndent;
state.nextLineIndent = 0;
state.doInCurrentLine = 0;
}
var style = tokenLexer(stream, state);
state.lastToken = {style:style, content: stream.current()};
if (style==='space') style=null;
return style;
},
indent: function(state, textAfter) {
var trueText = textAfter.replace(/^\s+|\s+$/g, '') ;
if (trueText.match(closing) || trueText.match(doubleClosing) || trueText.match(middle)) return conf.indentUnit*(state.currentIndent-1);
if(state.currentIndent < 0) return 0;
return state.currentIndent * conf.indentUnit;
}
};
return external;
});
CodeMirror.defineMIME("text/vbscript", "vbscript"); | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/py/README.txt | =====================================
PyCrust - The Flakiest Python Shell
=====================================
Half-baked by Patrick K. O'Brien (pobrien@orbtech.com)
Orbtech - "Your source for Python programming expertise."
Sample all our half-baked Python goods at www.orbtech.com.
What is PyCrust?
----------------
PyCrust is an interactive Python environment written in Python.
PyCrust components can run standalone or be integrated into other
development environments and/or other Python applications.
PyCrust comes with an interactive Python shell (PyShell), an
interactive namespace/object tree control (PyFilling) and an
integrated, split-window combination of the two (PyCrust).
What is PyCrust good for?
-------------------------
Have you ever tried to bake a pie without one? Well, you shouldn't
build a Python program without a PyCrust either.
What else do I need to use PyCrust?
-----------------------------------
PyCrust requires Python 2.2 or later, and wxPython 2.4 or later.
PyCrust uses wxPython and the Scintilla wrapper (wxStyledTextCtrl).
Python is available at http://www.python.org/. wxPython is available
at http://www.wxpython.org/.
Where can I get the latest version of PyCrust?
----------------------------------------------
The latest production version ships with wxPython. The latest
developer version is available in the wxWindows CVS at:
http://cvs.wxwindows.org/viewcvs.cgi/
Where is the PyCrust project hosted?
------------------------------------
The old answer was "At SourceForge, of course." The SourceForge
summary page is still available at:
http://sourceforge.net/projects/pycrust/
The new answer is that there is no longer a need for a separate
project. Simply install wxPython and you'll have everything you need.
I found a bug in PyCrust, what do I do with it?
-----------------------------------------------
You can send it to me at pobrien@orbtech.com.
I want a new feature added to PyCrust. Will you do it?
------------------------------------------------------
Flattery and money will get you anything. Short of that, you can send
me a request and I'll see what I can do.
Does PyCrust have a mailing list full of wonderful people?
----------------------------------------------------------
As a matter of fact, we do. Join the PyCrust mailing lists at:
http://sourceforge.net/mail/?group_id=31263
| PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/MSCommon/MSVC/ScriptArguments.py | import os
import re
import enum
from collections import (
namedtuple,
)
from ..common import (
CONFIG_CACHE_FORCE_DEFAULT_ARGUMENTS,
debug,
)
from . import Util
from . import Config
from . import Registry
from . import WinSDK
from .Exceptions import (
MSVCInternalError,
MSVCSDKVersionNotFound,
MSVCToolsetVersionNotFound,
MSVCSpectreLibsNotFound,
MSVCArgumentError,
)
from . import Dispatcher
Dispatcher.register_modulename(__name__)
# Script argument: boolean True
_ARGUMENT_BOOLEAN_TRUE_LEGACY = (True, '1') # MSVC_UWP_APP
_ARGUMENT_BOOLEAN_TRUE = (True,)
# TODO: verify SDK 10 version folder names 10.0.XXXXX.0 {1,3} last?
re_sdk_version_100 = re.compile(r'^10[.][0-9][.][0-9]{5}[.][0-9]{1}$')
re_sdk_version_81 = re.compile(r'^8[.]1$')
re_sdk_dispatch_map = {
'10.0': re_sdk_version_100,
'8.1': re_sdk_version_81,
}
def _verify_re_sdk_dispatch_map():
debug('')
for sdk_version in Config.MSVC_SDK_VERSIONS:
if sdk_version in re_sdk_dispatch_map:
continue
err_msg = 'sdk version {} not in re_sdk_dispatch_map'.format(sdk_version)
raise MSVCInternalError(err_msg)
return None
# SxS version bugfix
_msvc_sxs_bugfix_map = {}
_msvc_sxs_bugfix_folder = {}
_msvc_sxs_bugfix_version = {}
for msvc_version, sxs_version, sxs_bugfix in [
# VS2019\Common7\Tools\vsdevcmd\ext\vcvars.bat AzDO Bug#1293526
# special handling of the 16.8 SxS toolset, use VC\Auxiliary\Build\14.28 directory and SxS files
# if SxS version 14.28 not present/installed, fallback selection of toolset VC\Tools\MSVC\14.28.nnnnn.
('14.2', '14.28.16.8', '14.28')
]:
_msvc_sxs_bugfix_map.setdefault(msvc_version, []).append((sxs_version, sxs_bugfix))
_msvc_sxs_bugfix_folder[(msvc_version, sxs_bugfix)] = sxs_version
_msvc_sxs_bugfix_version[(msvc_version, sxs_version)] = sxs_bugfix
# MSVC_SCRIPT_ARGS
re_vcvars_uwp = re.compile(r'(?:(?<!\S)|^)(?P<uwp>(?:uwp|store))(?:(?!\S)|$)',re.IGNORECASE)
re_vcvars_sdk = re.compile(r'(?:(?<!\S)|^)(?P<sdk>(?:[1-9][0-9]*[.]\S*))(?:(?!\S)|$)',re.IGNORECASE)
re_vcvars_toolset = re.compile(r'(?:(?<!\S)|^)(?P<toolset_arg>(?:[-]{1,2}|[/])vcvars_ver[=](?P<toolset>\S*))(?:(?!\S)|$)', re.IGNORECASE)
re_vcvars_spectre = re.compile(r'(?:(?<!\S)|^)(?P<spectre_arg>(?:[-]{1,2}|[/])vcvars_spectre_libs[=](?P<spectre>\S*))(?:(?!\S)|$)',re.IGNORECASE)
# Force default sdk argument
_MSVC_FORCE_DEFAULT_SDK = False
# Force default toolset argument
_MSVC_FORCE_DEFAULT_TOOLSET = False
# Force default arguments
_MSVC_FORCE_DEFAULT_ARGUMENTS = False
def _msvc_force_default_sdk(force=True):
global _MSVC_FORCE_DEFAULT_SDK
_MSVC_FORCE_DEFAULT_SDK = force
debug('_MSVC_FORCE_DEFAULT_SDK=%s', repr(force))
def _msvc_force_default_toolset(force=True):
global _MSVC_FORCE_DEFAULT_TOOLSET
_MSVC_FORCE_DEFAULT_TOOLSET = force
debug('_MSVC_FORCE_DEFAULT_TOOLSET=%s', repr(force))
def msvc_force_default_arguments(force=None):
global _MSVC_FORCE_DEFAULT_ARGUMENTS
prev_policy = _MSVC_FORCE_DEFAULT_ARGUMENTS
if force is not None:
_MSVC_FORCE_DEFAULT_ARGUMENTS = force
_msvc_force_default_sdk(force)
_msvc_force_default_toolset(force)
return prev_policy
if CONFIG_CACHE_FORCE_DEFAULT_ARGUMENTS:
msvc_force_default_arguments(force=True)
# UWP SDK 8.1 and SDK 10:
#
# https://stackoverflow.com/questions/46659238/build-windows-app-compatible-for-8-1-and-10
# VS2019 - UWP (Except for Win10Mobile)
# VS2017 - UWP
# VS2015 - UWP, Win8.1 StoreApp, WP8/8.1 StoreApp
# VS2013 - Win8/8.1 StoreApp, WP8/8.1 StoreApp
# SPECTRE LIBS (msvc documentation):
# "There are no versions of Spectre-mitigated libraries for Universal Windows (UWP) apps or
# components. App-local deployment of such libraries isn't possible."
# MSVC batch file arguments:
#
# VS2022: UWP, SDK, TOOLSET, SPECTRE
# VS2019: UWP, SDK, TOOLSET, SPECTRE
# VS2017: UWP, SDK, TOOLSET, SPECTRE
# VS2015: UWP, SDK
#
# MSVC_SCRIPT_ARGS: VS2015+
#
# MSVC_UWP_APP: VS2015+
# MSVC_SDK_VERSION: VS2015+
# MSVC_TOOLSET_VERSION: VS2017+
# MSVC_SPECTRE_LIBS: VS2017+
@enum.unique
class SortOrder(enum.IntEnum):
UWP = 1 # MSVC_UWP_APP
SDK = 2 # MSVC_SDK_VERSION
TOOLSET = 3 # MSVC_TOOLSET_VERSION
SPECTRE = 4 # MSVC_SPECTRE_LIBS
USER = 5 # MSVC_SCRIPT_ARGS
VS2019 = Config.MSVS_VERSION_INTERNAL['2019']
VS2017 = Config.MSVS_VERSION_INTERNAL['2017']
VS2015 = Config.MSVS_VERSION_INTERNAL['2015']
MSVC_VERSION_ARGS_DEFINITION = namedtuple('MSVCVersionArgsDefinition', [
'version', # full version (e.g., '14.1Exp', '14.32.31326')
'vs_def',
])
def _msvc_version(version):
verstr = Util.get_msvc_version_prefix(version)
vs_def = Config.MSVC_VERSION_INTERNAL[verstr]
version_args = MSVC_VERSION_ARGS_DEFINITION(
version = version,
vs_def = vs_def,
)
return version_args
def _toolset_version(version):
verstr = Util.get_msvc_version_prefix(version)
vs_def = Config.MSVC_VERSION_INTERNAL[verstr]
version_args = MSVC_VERSION_ARGS_DEFINITION(
version = version,
vs_def = vs_def,
)
return version_args
def _msvc_script_argument_uwp(env, msvc, arglist):
uwp_app = env['MSVC_UWP_APP']
debug('MSVC_VERSION=%s, MSVC_UWP_APP=%s', repr(msvc.version), repr(uwp_app))
if not uwp_app:
return None
if uwp_app not in _ARGUMENT_BOOLEAN_TRUE_LEGACY:
return None
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2015.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: msvc version constraint: %s < %s VS2015',
repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2015.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_UWP_APP ({}) constraint violation: MSVC_VERSION {} < {} VS2015".format(
repr(uwp_app), repr(msvc.version), repr(VS2015.vc_buildtools_def.vc_version)
)
raise MSVCArgumentError(err_msg)
# VS2017+ rewrites uwp => store for 14.0 toolset
uwp_arg = msvc.vs_def.vc_uwp
# store/uwp may not be fully installed
argpair = (SortOrder.UWP, uwp_arg)
arglist.append(argpair)
return uwp_arg
def _user_script_argument_uwp(env, uwp, user_argstr):
matches = [m for m in re_vcvars_uwp.finditer(user_argstr)]
if not matches:
return False
if len(matches) > 1:
debug('multiple uwp declarations: MSVC_SCRIPT_ARGS=%s', repr(user_argstr))
err_msg = "multiple uwp declarations: MSVC_SCRIPT_ARGS={}".format(repr(user_argstr))
raise MSVCArgumentError(err_msg)
if not uwp:
return True
env_argstr = env.get('MSVC_UWP_APP','')
debug('multiple uwp declarations: MSVC_UWP_APP=%s, MSVC_SCRIPT_ARGS=%s', repr(env_argstr), repr(user_argstr))
err_msg = "multiple uwp declarations: MSVC_UWP_APP={} and MSVC_SCRIPT_ARGS={}".format(
repr(env_argstr), repr(user_argstr)
)
raise MSVCArgumentError(err_msg)
def _msvc_script_argument_sdk_constraints(msvc, sdk_version):
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2015.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: msvc_version constraint: %s < %s VS2015',
repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2015.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_SDK_VERSION ({}) constraint violation: MSVC_VERSION {} < {} VS2015".format(
repr(sdk_version), repr(msvc.version), repr(VS2015.vc_buildtools_def.vc_version)
)
return err_msg
for msvc_sdk_version in msvc.vs_def.vc_sdk_versions:
re_sdk_version = re_sdk_dispatch_map[msvc_sdk_version]
if re_sdk_version.match(sdk_version):
debug('valid: sdk_version=%s', repr(sdk_version))
return None
debug('invalid: method exit: sdk_version=%s', repr(sdk_version))
err_msg = "MSVC_SDK_VERSION ({}) is not supported".format(repr(sdk_version))
return err_msg
def _msvc_script_argument_sdk_platform_constraints(msvc, toolset, sdk_version, platform_def):
if sdk_version == '8.1' and platform_def.is_uwp:
vs_def = toolset.vs_def if toolset else msvc.vs_def
if vs_def.vc_buildtools_def.vc_version_numeric > VS2015.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: uwp/store SDK 8.1 msvc_version constraint: %s > %s VS2015',
repr(vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2015.vc_buildtools_def.vc_version_numeric)
)
if toolset and toolset.vs_def != msvc.vs_def:
err_msg = "MSVC_SDK_VERSION ({}) and platform type ({}) constraint violation: toolset version {} > {} VS2015".format(
repr(sdk_version), repr(platform_def.vc_platform),
repr(toolset.version), repr(VS2015.vc_buildtools_def.vc_version)
)
else:
err_msg = "MSVC_SDK_VERSION ({}) and platform type ({}) constraint violation: MSVC_VERSION {} > {} VS2015".format(
repr(sdk_version), repr(platform_def.vc_platform),
repr(msvc.version), repr(VS2015.vc_buildtools_def.vc_version)
)
return err_msg
return None
def _msvc_script_argument_sdk(env, msvc, toolset, platform_def, arglist):
sdk_version = env['MSVC_SDK_VERSION']
debug(
'MSVC_VERSION=%s, MSVC_SDK_VERSION=%s, platform_type=%s',
repr(msvc.version), repr(sdk_version), repr(platform_def.vc_platform)
)
if not sdk_version:
return None
err_msg = _msvc_script_argument_sdk_constraints(msvc, sdk_version)
if err_msg:
raise MSVCArgumentError(err_msg)
sdk_list = WinSDK.get_sdk_version_list(msvc.vs_def, platform_def)
if sdk_version not in sdk_list:
err_msg = "MSVC_SDK_VERSION {} not found for platform type {}".format(
repr(sdk_version), repr(platform_def.vc_platform)
)
raise MSVCSDKVersionNotFound(err_msg)
err_msg = _msvc_script_argument_sdk_platform_constraints(msvc, toolset, sdk_version, platform_def)
if err_msg:
raise MSVCArgumentError(err_msg)
argpair = (SortOrder.SDK, sdk_version)
arglist.append(argpair)
return sdk_version
def _msvc_script_default_sdk(env, msvc, platform_def, arglist, force_sdk=False):
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2015.vc_buildtools_def.vc_version_numeric:
return None
sdk_list = WinSDK.get_sdk_version_list(msvc.vs_def, platform_def)
if not len(sdk_list):
return None
sdk_default = sdk_list[0]
debug(
'MSVC_VERSION=%s, sdk_default=%s, platform_type=%s',
repr(msvc.version), repr(sdk_default), repr(platform_def.vc_platform)
)
if force_sdk:
argpair = (SortOrder.SDK, sdk_default)
arglist.append(argpair)
return sdk_default
def _user_script_argument_sdk(env, sdk_version, user_argstr):
matches = [m for m in re_vcvars_sdk.finditer(user_argstr)]
if not matches:
return None
if len(matches) > 1:
debug('multiple sdk version declarations: MSVC_SCRIPT_ARGS=%s', repr(user_argstr))
err_msg = "multiple sdk version declarations: MSVC_SCRIPT_ARGS={}".format(repr(user_argstr))
raise MSVCArgumentError(err_msg)
if not sdk_version:
user_sdk = matches[0].group('sdk')
return user_sdk
env_argstr = env.get('MSVC_SDK_VERSION','')
debug('multiple sdk version declarations: MSVC_SDK_VERSION=%s, MSVC_SCRIPT_ARGS=%s', repr(env_argstr), repr(user_argstr))
err_msg = "multiple sdk version declarations: MSVC_SDK_VERSION={} and MSVC_SCRIPT_ARGS={}".format(
repr(env_argstr), repr(user_argstr)
)
raise MSVCArgumentError(err_msg)
_toolset_have140_cache = None
def _msvc_have140_toolset():
global _toolset_have140_cache
if _toolset_have140_cache is None:
suffix = Registry.vstudio_sxs_vc7('14.0')
vcinstalldirs = [record[0] for record in Registry.microsoft_query_paths(suffix)]
debug('vc140 toolset: paths=%s', repr(vcinstalldirs))
_toolset_have140_cache = True if vcinstalldirs else False
return _toolset_have140_cache
def _reset_have140_cache():
global _toolset_have140_cache
debug('reset: cache')
_toolset_have140_cache = None
def _msvc_read_toolset_file(msvc, filename):
toolset_version = None
try:
with open(filename) as f:
toolset_version = f.readlines()[0].strip()
debug(
'msvc_version=%s, filename=%s, toolset_version=%s',
repr(msvc.version), repr(filename), repr(toolset_version)
)
except OSError:
debug('OSError: msvc_version=%s, filename=%s', repr(msvc.version), repr(filename))
except IndexError:
debug('IndexError: msvc_version=%s, filename=%s', repr(msvc.version), repr(filename))
return toolset_version
def _msvc_sxs_toolset_folder(msvc, sxs_folder):
if Util.is_toolset_sxs(sxs_folder):
return sxs_folder, sxs_folder
key = (msvc.vs_def.vc_buildtools_def.vc_version, sxs_folder)
if key in _msvc_sxs_bugfix_folder:
sxs_version = _msvc_sxs_bugfix_folder[key]
return sxs_folder, sxs_version
debug('sxs folder: ignore version=%s', repr(sxs_folder))
return None, None
def _msvc_read_toolset_folders(msvc, vc_dir):
toolsets_sxs = {}
toolsets_full = []
build_dir = os.path.join(vc_dir, "Auxiliary", "Build")
if os.path.exists(build_dir):
for sxs_folder, sxs_path in Util.listdir_dirs(build_dir):
sxs_folder, sxs_version = _msvc_sxs_toolset_folder(msvc, sxs_folder)
if not sxs_version:
continue
filename = 'Microsoft.VCToolsVersion.{}.txt'.format(sxs_folder)
filepath = os.path.join(sxs_path, filename)
debug('sxs toolset: check file=%s', repr(filepath))
if os.path.exists(filepath):
toolset_version = _msvc_read_toolset_file(msvc, filepath)
if not toolset_version:
continue
toolsets_sxs[sxs_version] = toolset_version
debug(
'sxs toolset: msvc_version=%s, sxs_version=%s, toolset_version=%s',
repr(msvc.version), repr(sxs_version), repr(toolset_version)
)
toolset_dir = os.path.join(vc_dir, "Tools", "MSVC")
if os.path.exists(toolset_dir):
for toolset_version, toolset_path in Util.listdir_dirs(toolset_dir):
binpath = os.path.join(toolset_path, "bin")
debug('toolset: check binpath=%s', repr(binpath))
if os.path.exists(binpath):
toolsets_full.append(toolset_version)
debug(
'toolset: msvc_version=%s, toolset_version=%s',
repr(msvc.version), repr(toolset_version)
)
vcvars140 = os.path.join(vc_dir, "..", "Common7", "Tools", "vsdevcmd", "ext", "vcvars", "vcvars140.bat")
if os.path.exists(vcvars140) and _msvc_have140_toolset():
toolset_version = '14.0'
toolsets_full.append(toolset_version)
debug(
'toolset: msvc_version=%s, toolset_version=%s',
repr(msvc.version), repr(toolset_version)
)
toolsets_full.sort(reverse=True)
# SxS bugfix fixup (if necessary)
if msvc.version in _msvc_sxs_bugfix_map:
for sxs_version, sxs_bugfix in _msvc_sxs_bugfix_map[msvc.version]:
if sxs_version in toolsets_sxs:
# have SxS version (folder/file mapping exists)
continue
for toolset_version in toolsets_full:
if not toolset_version.startswith(sxs_bugfix):
continue
debug(
'sxs toolset: msvc_version=%s, sxs_version=%s, toolset_version=%s',
repr(msvc.version), repr(sxs_version), repr(toolset_version)
)
# SxS compatible bugfix version (equivalent to toolset search)
toolsets_sxs[sxs_version] = toolset_version
break
debug('msvc_version=%s, toolsets=%s', repr(msvc.version), repr(toolsets_full))
return toolsets_sxs, toolsets_full
def _msvc_read_toolset_default(msvc, vc_dir):
build_dir = os.path.join(vc_dir, "Auxiliary", "Build")
# VS2019+
filename = "Microsoft.VCToolsVersion.{}.default.txt".format(msvc.vs_def.vc_buildtools_def.vc_buildtools)
filepath = os.path.join(build_dir, filename)
debug('default toolset: check file=%s', repr(filepath))
if os.path.exists(filepath):
toolset_buildtools = _msvc_read_toolset_file(msvc, filepath)
if toolset_buildtools:
return toolset_buildtools
# VS2017+
filename = "Microsoft.VCToolsVersion.default.txt"
filepath = os.path.join(build_dir, filename)
debug('default toolset: check file=%s', repr(filepath))
if os.path.exists(filepath):
toolset_default = _msvc_read_toolset_file(msvc, filepath)
if toolset_default:
return toolset_default
return None
_toolset_version_cache = {}
_toolset_default_cache = {}
def _reset_toolset_cache():
global _toolset_version_cache
global _toolset_default_cache
debug('reset: toolset cache')
_toolset_version_cache = {}
_toolset_default_cache = {}
def _msvc_version_toolsets(msvc, vc_dir):
if msvc.version in _toolset_version_cache:
toolsets_sxs, toolsets_full = _toolset_version_cache[msvc.version]
else:
toolsets_sxs, toolsets_full = _msvc_read_toolset_folders(msvc, vc_dir)
_toolset_version_cache[msvc.version] = toolsets_sxs, toolsets_full
return toolsets_sxs, toolsets_full
def _msvc_default_toolset(msvc, vc_dir):
if msvc.version in _toolset_default_cache:
toolset_default = _toolset_default_cache[msvc.version]
else:
toolset_default = _msvc_read_toolset_default(msvc, vc_dir)
_toolset_default_cache[msvc.version] = toolset_default
return toolset_default
def _msvc_version_toolset_vcvars(msvc, vc_dir, toolset_version):
toolsets_sxs, toolsets_full = _msvc_version_toolsets(msvc, vc_dir)
if toolset_version in toolsets_full:
# full toolset version provided
toolset_vcvars = toolset_version
return toolset_vcvars
if Util.is_toolset_sxs(toolset_version):
# SxS version provided
sxs_version = toolsets_sxs.get(toolset_version, None)
if sxs_version and sxs_version in toolsets_full:
# SxS full toolset version
toolset_vcvars = sxs_version
return toolset_vcvars
return None
for toolset_full in toolsets_full:
if toolset_full.startswith(toolset_version):
toolset_vcvars = toolset_full
return toolset_vcvars
return None
def _msvc_script_argument_toolset_constraints(msvc, toolset_version):
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2017.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: msvc version constraint: %s < %s VS2017',
repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2017.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_TOOLSET_VERSION ({}) constraint violation: MSVC_VERSION {} < {} VS2017".format(
repr(toolset_version), repr(msvc.version), repr(VS2017.vc_buildtools_def.vc_version)
)
return err_msg
toolset_verstr = Util.get_msvc_version_prefix(toolset_version)
if not toolset_verstr:
debug('invalid: msvc version: toolset_version=%s', repr(toolset_version))
err_msg = 'MSVC_TOOLSET_VERSION {} format is not supported'.format(
repr(toolset_version)
)
return err_msg
toolset_vernum = float(toolset_verstr)
if toolset_vernum < VS2015.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: toolset version constraint: %s < %s VS2015',
repr(toolset_vernum), repr(VS2015.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_TOOLSET_VERSION ({}) constraint violation: toolset version {} < {} VS2015".format(
repr(toolset_version), repr(toolset_verstr), repr(VS2015.vc_buildtools_def.vc_version)
)
return err_msg
if toolset_vernum > msvc.vs_def.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: toolset version constraint: toolset %s > %s msvc',
repr(toolset_vernum), repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_TOOLSET_VERSION ({}) constraint violation: toolset version {} > {} MSVC_VERSION".format(
repr(toolset_version), repr(toolset_verstr), repr(msvc.version)
)
return err_msg
if toolset_vernum == VS2015.vc_buildtools_def.vc_version_numeric:
# tooset = 14.0
if Util.is_toolset_full(toolset_version):
if not Util.is_toolset_140(toolset_version):
debug(
'invalid: toolset version 14.0 constraint: %s != 14.0',
repr(toolset_version)
)
err_msg = "MSVC_TOOLSET_VERSION ({}) constraint violation: toolset version {} != '14.0'".format(
repr(toolset_version), repr(toolset_version)
)
return err_msg
return None
if Util.is_toolset_full(toolset_version):
debug('valid: toolset full: toolset_version=%s', repr(toolset_version))
return None
if Util.is_toolset_sxs(toolset_version):
debug('valid: toolset sxs: toolset_version=%s', repr(toolset_version))
return None
debug('invalid: method exit: toolset_version=%s', repr(toolset_version))
err_msg = "MSVC_TOOLSET_VERSION ({}) format is not supported".format(repr(toolset_version))
return err_msg
def _msvc_script_argument_toolset_vcvars(msvc, toolset_version, vc_dir):
err_msg = _msvc_script_argument_toolset_constraints(msvc, toolset_version)
if err_msg:
raise MSVCArgumentError(err_msg)
if toolset_version.startswith('14.0') and len(toolset_version) > len('14.0'):
new_toolset_version = '14.0'
debug(
'rewrite toolset_version=%s => toolset_version=%s',
repr(toolset_version), repr(new_toolset_version)
)
toolset_version = new_toolset_version
toolset_vcvars = _msvc_version_toolset_vcvars(msvc, vc_dir, toolset_version)
debug(
'toolset: toolset_version=%s, toolset_vcvars=%s',
repr(toolset_version), repr(toolset_vcvars)
)
if not toolset_vcvars:
err_msg = "MSVC_TOOLSET_VERSION {} not found for MSVC_VERSION {}".format(
repr(toolset_version), repr(msvc.version)
)
raise MSVCToolsetVersionNotFound(err_msg)
return toolset_vcvars
def _msvc_script_argument_toolset(env, msvc, vc_dir, arglist):
toolset_version = env['MSVC_TOOLSET_VERSION']
debug('MSVC_VERSION=%s, MSVC_TOOLSET_VERSION=%s', repr(msvc.version), repr(toolset_version))
if not toolset_version:
return None
toolset_vcvars = _msvc_script_argument_toolset_vcvars(msvc, toolset_version, vc_dir)
# toolset may not be installed for host/target
argpair = (SortOrder.TOOLSET, '-vcvars_ver={}'.format(toolset_vcvars))
arglist.append(argpair)
return toolset_vcvars
def _msvc_script_default_toolset(env, msvc, vc_dir, arglist, force_toolset=False):
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2017.vc_buildtools_def.vc_version_numeric:
return None
toolset_default = _msvc_default_toolset(msvc, vc_dir)
if not toolset_default:
return None
debug('MSVC_VERSION=%s, toolset_default=%s', repr(msvc.version), repr(toolset_default))
if force_toolset:
argpair = (SortOrder.TOOLSET, '-vcvars_ver={}'.format(toolset_default))
arglist.append(argpair)
return toolset_default
def _user_script_argument_toolset(env, toolset_version, user_argstr):
matches = [m for m in re_vcvars_toolset.finditer(user_argstr)]
if not matches:
return None
if len(matches) > 1:
debug('multiple toolset version declarations: MSVC_SCRIPT_ARGS=%s', repr(user_argstr))
err_msg = "multiple toolset version declarations: MSVC_SCRIPT_ARGS={}".format(repr(user_argstr))
raise MSVCArgumentError(err_msg)
if not toolset_version:
user_toolset = matches[0].group('toolset')
return user_toolset
env_argstr = env.get('MSVC_TOOLSET_VERSION','')
debug('multiple toolset version declarations: MSVC_TOOLSET_VERSION=%s, MSVC_SCRIPT_ARGS=%s', repr(env_argstr), repr(user_argstr))
err_msg = "multiple toolset version declarations: MSVC_TOOLSET_VERSION={} and MSVC_SCRIPT_ARGS={}".format(
repr(env_argstr), repr(user_argstr)
)
raise MSVCArgumentError(err_msg)
def _msvc_script_argument_spectre_constraints(msvc, toolset, spectre_libs, platform_def):
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2017.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: msvc version constraint: %s < %s VS2017',
repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2017.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_SPECTRE_LIBS ({}) constraint violation: MSVC_VERSION {} < {} VS2017".format(
repr(spectre_libs), repr(msvc.version), repr(VS2017.vc_buildtools_def.vc_version)
)
return err_msg
if toolset:
if toolset.vs_def.vc_buildtools_def.vc_version_numeric < VS2017.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: toolset version constraint: %s < %s VS2017',
repr(toolset.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2017.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_SPECTRE_LIBS ({}) constraint violation: toolset version {} < {} VS2017".format(
repr(spectre_libs), repr(toolset.version), repr(VS2017.vc_buildtools_def.vc_version)
)
return err_msg
if platform_def.is_uwp:
debug(
'invalid: spectre_libs=%s and platform_type=%s',
repr(spectre_libs), repr(platform_def.vc_platform)
)
err_msg = "MSVC_SPECTRE_LIBS ({}) are not supported for platform type ({})".format(
repr(spectre_libs), repr(platform_def.vc_platform)
)
return err_msg
return None
def _msvc_toolset_version_spectre_path(vc_dir, toolset_version):
spectre_dir = os.path.join(vc_dir, "Tools", "MSVC", toolset_version, "lib", "spectre")
return spectre_dir
def _msvc_script_argument_spectre(env, msvc, vc_dir, toolset, platform_def, arglist):
spectre_libs = env['MSVC_SPECTRE_LIBS']
debug('MSVC_VERSION=%s, MSVC_SPECTRE_LIBS=%s', repr(msvc.version), repr(spectre_libs))
if not spectre_libs:
return None
if spectre_libs not in _ARGUMENT_BOOLEAN_TRUE:
return None
err_msg = _msvc_script_argument_spectre_constraints(msvc, toolset, spectre_libs, platform_def)
if err_msg:
raise MSVCArgumentError(err_msg)
if toolset:
spectre_dir = _msvc_toolset_version_spectre_path(vc_dir, toolset.version)
if not os.path.exists(spectre_dir):
debug(
'spectre libs: msvc_version=%s, toolset_version=%s, spectre_dir=%s',
repr(msvc.version), repr(toolset.version), repr(spectre_dir)
)
err_msg = "Spectre libraries not found for MSVC_VERSION {} toolset version {}".format(
repr(msvc.version), repr(toolset.version)
)
raise MSVCSpectreLibsNotFound(err_msg)
spectre_arg = 'spectre'
# spectre libs may not be installed for host/target
argpair = (SortOrder.SPECTRE, '-vcvars_spectre_libs={}'.format(spectre_arg))
arglist.append(argpair)
return spectre_arg
def _user_script_argument_spectre(env, spectre, user_argstr):
matches = [m for m in re_vcvars_spectre.finditer(user_argstr)]
if not matches:
return None
if len(matches) > 1:
debug('multiple spectre declarations: MSVC_SCRIPT_ARGS=%s', repr(user_argstr))
err_msg = "multiple spectre declarations: MSVC_SCRIPT_ARGS={}".format(repr(user_argstr))
raise MSVCArgumentError(err_msg)
if not spectre:
return None
env_argstr = env.get('MSVC_SPECTRE_LIBS','')
debug('multiple spectre declarations: MSVC_SPECTRE_LIBS=%s, MSVC_SCRIPT_ARGS=%s', repr(env_argstr), repr(user_argstr))
err_msg = "multiple spectre declarations: MSVC_SPECTRE_LIBS={} and MSVC_SCRIPT_ARGS={}".format(
repr(env_argstr), repr(user_argstr)
)
raise MSVCArgumentError(err_msg)
def _msvc_script_argument_user(env, msvc, arglist):
# subst None -> empty string
script_args = env.subst('$MSVC_SCRIPT_ARGS')
debug('MSVC_VERSION=%s, MSVC_SCRIPT_ARGS=%s', repr(msvc.version), repr(script_args))
if not script_args:
return None
if msvc.vs_def.vc_buildtools_def.vc_version_numeric < VS2015.vc_buildtools_def.vc_version_numeric:
debug(
'invalid: msvc version constraint: %s < %s VS2015',
repr(msvc.vs_def.vc_buildtools_def.vc_version_numeric),
repr(VS2015.vc_buildtools_def.vc_version_numeric)
)
err_msg = "MSVC_SCRIPT_ARGS ({}) constraint violation: MSVC_VERSION {} < {} VS2015".format(
repr(script_args), repr(msvc.version), repr(VS2015.vc_buildtools_def.vc_version)
)
raise MSVCArgumentError(err_msg)
# user arguments are not validated
argpair = (SortOrder.USER, script_args)
arglist.append(argpair)
return script_args
def _msvc_process_construction_variables(env):
for cache_variable in [
_MSVC_FORCE_DEFAULT_TOOLSET,
_MSVC_FORCE_DEFAULT_SDK,
]:
if cache_variable:
return True
for env_variable in [
'MSVC_UWP_APP',
'MSVC_TOOLSET_VERSION',
'MSVC_SDK_VERSION',
'MSVC_SPECTRE_LIBS',
]:
if env.get(env_variable, None) is not None:
return True
return False
def msvc_script_arguments(env, version, vc_dir, arg):
arguments = [arg] if arg else []
arglist = []
arglist_reverse = False
msvc = _msvc_version(version)
if 'MSVC_SCRIPT_ARGS' in env:
user_argstr = _msvc_script_argument_user(env, msvc, arglist)
else:
user_argstr = None
if _msvc_process_construction_variables(env):
# MSVC_UWP_APP
if 'MSVC_UWP_APP' in env:
uwp = _msvc_script_argument_uwp(env, msvc, arglist)
else:
uwp = None
if user_argstr:
user_uwp = _user_script_argument_uwp(env, uwp, user_argstr)
else:
user_uwp = None
is_uwp = True if uwp else False
platform_def = WinSDK.get_msvc_platform(is_uwp)
# MSVC_TOOLSET_VERSION
if 'MSVC_TOOLSET_VERSION' in env:
toolset_version = _msvc_script_argument_toolset(env, msvc, vc_dir, arglist)
else:
toolset_version = None
if user_argstr:
user_toolset = _user_script_argument_toolset(env, toolset_version, user_argstr)
else:
user_toolset = None
if not toolset_version and not user_toolset:
default_toolset = _msvc_script_default_toolset(env, msvc, vc_dir, arglist, _MSVC_FORCE_DEFAULT_TOOLSET)
if _MSVC_FORCE_DEFAULT_TOOLSET:
toolset_version = default_toolset
else:
default_toolset = None
if user_toolset:
toolset = None
elif toolset_version:
toolset = _toolset_version(toolset_version)
elif default_toolset:
toolset = _toolset_version(default_toolset)
else:
toolset = None
# MSVC_SDK_VERSION
if 'MSVC_SDK_VERSION' in env:
sdk_version = _msvc_script_argument_sdk(env, msvc, toolset, platform_def, arglist)
else:
sdk_version = None
if user_argstr:
user_sdk = _user_script_argument_sdk(env, sdk_version, user_argstr)
else:
user_sdk = None
if _MSVC_FORCE_DEFAULT_SDK:
if not sdk_version and not user_sdk:
sdk_version = _msvc_script_default_sdk(env, msvc, platform_def, arglist, _MSVC_FORCE_DEFAULT_SDK)
# MSVC_SPECTRE_LIBS
if 'MSVC_SPECTRE_LIBS' in env:
spectre = _msvc_script_argument_spectre(env, msvc, vc_dir, toolset, platform_def, arglist)
else:
spectre = None
if user_argstr:
_user_script_argument_spectre(env, spectre, user_argstr)
if msvc.vs_def.vc_buildtools_def.vc_version == '14.0':
if user_uwp and sdk_version and len(arglist) == 2:
# VS2015 toolset argument order issue: SDK store => store SDK
arglist_reverse = True
if len(arglist) > 1:
arglist.sort()
if arglist_reverse:
arglist.reverse()
arguments.extend([argpair[-1] for argpair in arglist])
argstr = ' '.join(arguments).strip()
debug('arguments: %s', repr(argstr))
return argstr
def _msvc_toolset_internal(msvc_version, toolset_version, vc_dir):
msvc = _msvc_version(msvc_version)
toolset_vcvars = _msvc_script_argument_toolset_vcvars(msvc, toolset_version, vc_dir)
return toolset_vcvars
def _msvc_toolset_versions_internal(msvc_version, vc_dir, full=True, sxs=False):
msvc = _msvc_version(msvc_version)
if len(msvc.vs_def.vc_buildtools_all) <= 1:
return None
toolset_versions = []
toolsets_sxs, toolsets_full = _msvc_version_toolsets(msvc, vc_dir)
if sxs:
sxs_versions = list(toolsets_sxs.keys())
sxs_versions.sort(reverse=True)
toolset_versions.extend(sxs_versions)
if full:
toolset_versions.extend(toolsets_full)
return toolset_versions
def _msvc_toolset_versions_spectre_internal(msvc_version, vc_dir):
msvc = _msvc_version(msvc_version)
if len(msvc.vs_def.vc_buildtools_all) <= 1:
return None
_, toolsets_full = _msvc_version_toolsets(msvc, vc_dir)
spectre_toolset_versions = [
toolset_version
for toolset_version in toolsets_full
if os.path.exists(_msvc_toolset_version_spectre_path(vc_dir, toolset_version))
]
return spectre_toolset_versions
def reset():
debug('')
_reset_have140_cache()
_reset_toolset_cache()
def verify():
debug('')
_verify_re_sdk_dispatch_map() | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/examples/phase/missOrientation/circle.py | r"""
In this example, a phase equation is solved in one dimension with a
misorientation between two solid domains.
The phase equation is given by:
.. math::
\tau_{\phi} \frac{\partial \phi}{\partial t}
= \alpha^2 \nabla^2 \phi + \phi ( 1 - \phi ) m_1 ( \phi , T)
- 2 s \phi | \nabla \theta | - \epsilon^2 \phi | \nabla \theta |^2
where
.. math::
m_1(\phi, T) = \phi - \frac{1}{2} - T \phi ( 1 - \phi )
The initial conditions are:
.. math::
\phi &= 1 \qquad \forall x \\
\theta &= \begin{cases}
1 & \text{for $(x - L / 2)^2 + (y - L / 2)^2 > (L / 4)^2$} \\
0 & \text{for $(x - L / 2)^2 + (y - L / 2)^2 \le (L / 4)^2$}
\end{cases} \\
T &= 1 \qquad \forall x
and boundary conditions
:math:`\phi = 1` for :math:`x = 0` and :math:`x = L`.
.. Further details of the numerical method for this problem can be found in
"Extending Phase Field Models of Solidification to Polycrystalline
Materials", J. A. Warren *et al.*, *Acta Materialia*, **51** (2003)
6035-6058.
Here the phase equation is solved with an explicit technique.
The solution is allowed to evolve for ``steps = 100`` time steps.
>>> from builtins import range
>>> for step in range(steps):
... phaseEq.solve(phase, dt = timeStepDuration)
The solution is compared with test data. The test data was created
with a FORTRAN code written by Ryo Kobayashi for phase field
modeling. The following code opens the file :file:`circle.gz` extracts the
data and compares it with the ``phase`` variable.
>>> import os
>>> from future.utils import text_to_native_str
>>> testData = numerix.loadtxt(os.path.splitext(__file__)[0] + text_to_native_str('.gz'))
>>> print(phase.allclose(testData))
1
"""
from __future__ import division
from __future__ import unicode_literals
from builtins import range
__docformat__ = 'restructuredtext'
from fipy import input
from fipy import CellVariable, ModularVariable, Grid2D, TransientTerm, ExplicitDiffusionTerm, ImplicitSourceTerm, Viewer
from fipy.tools import numerix
if __name__ == '__main__':
steps = 100
else:
steps = 10
timeStepDuration = 0.02
L = 1.5
nx = 100
ny = 100
temperature = 1.
phaseTransientCoeff = 0.1
epsilon = 0.008
s = 0.01
alpha = 0.015
dx = L / nx
dy = L / ny
mesh = Grid2D(dx, dy, nx, ny)
phase = CellVariable(name = 'PhaseField', mesh = mesh, value = 1.)
theta = ModularVariable(name = 'Theta', mesh = mesh, value = 1.)
x, y = mesh.cellCenters
theta.setValue(0., where=(x - L / 2.)**2 + (y - L / 2.)**2 < (L / 4.)**2)
mPhiVar = phase - 0.5 + temperature * phase * (1 - phase)
thetaMag = theta.old.grad.mag
implicitSource = mPhiVar * (phase - (mPhiVar < 0))
implicitSource += (2 * s + epsilon**2 * thetaMag) * thetaMag
phaseEq = TransientTerm(phaseTransientCoeff) == \
ExplicitDiffusionTerm(alpha**2) \
- ImplicitSourceTerm(implicitSource) \
+ (mPhiVar > 0) * mPhiVar * phase
if __name__ == '__main__':
phaseViewer = Viewer(vars = phase)
phaseViewer.plot()
for step in range(steps):
phaseEq.solve(phase, dt = timeStepDuration)
phaseViewer.plot()
input('finished') | PypiClean |
/OASYS1-ALS-ShadowOui-0.0.60.tar.gz/OASYS1-ALS-ShadowOui-0.0.60/orangecontrib/syned/als/util/fqs.py | import math, cmath
import numpy as np
from numba import jit
@jit(nopython=True)
def single_quadratic(a0, b0, c0):
''' Analytical solver for a single quadratic equation
(2nd order polynomial).
Parameters
----------
a0, b0, c0: array_like
Input data are coefficients of the Quadratic polynomial::
a0*x^2 + b0*x + c0 = 0
Returns
-------
r1, r2: tuple
Output data is a tuple of two roots of a given polynomial.
'''
''' Reduce the quadratic equation to to form:
x^2 + ax + b = 0'''
a, b = b0 / a0, c0 / a0
# Some repating variables
a0 = -0.5*a
delta = a0*a0 - b
sqrt_delta = cmath.sqrt(delta)
# Roots
r1 = a0 - sqrt_delta
r2 = a0 + sqrt_delta
return r1, r2
@jit(nopython=True)
def single_cubic(a0, b0, c0, d0):
''' Analytical closed-form solver for a single cubic equation
(3rd order polynomial), gives all three roots.
Parameters
----------
a0, b0, c0, d0: array_like
Input data are coefficients of the Cubic polynomial::
a0*x^3 + b0*x^2 + c0*x + d0 = 0
Returns
-------
roots: tuple
Output data is a tuple of three roots of a given polynomial.
'''
''' Reduce the cubic equation to to form:
x^3 + a*x^2 + b*x + c = 0 '''
a, b, c = b0 / a0, c0 / a0, d0 / a0
# Some repeating constants and variables
third = 1./3.
a13 = a*third
a2 = a13*a13
sqr3 = math.sqrt(3)
# Additional intermediate variables
f = third*b - a2
g = a13 * (2*a2 - b) + c
h = 0.25*g*g + f*f*f
def cubic_root(x):
''' Compute cubic root of a number while maintaining its sign'''
if x.real >= 0:
return x**third
else:
return -(-x)**third
if f == g == h == 0:
r1 = -cubic_root(c)
return r1, r1, r1
elif h <= 0:
j = math.sqrt(-f)
k = math.acos(-0.5*g / (j*j*j))
m = math.cos(third*k)
n = sqr3 * math.sin(third*k)
r1 = 2*j*m - a13
r2 = -j * (m + n) - a13
r3 = -j * (m - n) - a13
return r1, r2, r3
else:
sqrt_h = cmath.sqrt(h)
S = cubic_root(-0.5*g + sqrt_h)
U = cubic_root(-0.5*g - sqrt_h)
S_plus_U = S + U
S_minus_U = S - U
r1 = S_plus_U - a13
r2 = -0.5*S_plus_U - a13 + S_minus_U*sqr3*0.5j
r3 = -0.5*S_plus_U - a13 - S_minus_U*sqr3*0.5j
return r1, r2, r3
@jit(nopython=True)
def single_cubic_one(a0, b0, c0, d0):
''' Analytical closed-form solver for a single cubic equation
(3rd order polynomial), gives only one real root.
Parameters
----------
a0, b0, c0, d0: array_like
Input data are coefficients of the Cubic polynomial::
a0*x^3 + b0*x^2 + c0*x + d0 = 0
Returns
-------
roots: float
Output data is a real root of a given polynomial.
'''
''' Reduce the cubic equation to to form:
x^3 + a*x^2 + bx + c = 0'''
a, b, c = b0 / a0, c0 / a0, d0 / a0
# Some repeating constants and variables
third = 1./3.
a13 = a*third
a2 = a13*a13
# Additional intermediate variables
f = third*b - a2
g = a13 * (2*a2 - b) + c
h = 0.25*g*g + f*f*f
def cubic_root(x):
''' Compute cubic root of a number while maintaining its sign
'''
if x.real >= 0:
return x**third
else:
return -(-x)**third
if f == g == h == 0:
return -cubic_root(c)
elif h <= 0:
j = math.sqrt(-f)
k = math.acos(-0.5*g / (j*j*j))
m = math.cos(third*k)
return 2*j*m - a13
else:
sqrt_h = cmath.sqrt(h)
S = cubic_root(-0.5*g + sqrt_h)
U = cubic_root(-0.5*g - sqrt_h)
S_plus_U = S + U
return S_plus_U - a13
@jit(nopython=True)
def single_quartic(a0, b0, c0, d0, e0):
''' Analytical closed-form solver for a single quartic equation
(4th order polynomial). Calls `single_cubic_one` and
`single quadratic`.
Parameters
----------
a0, b0, c0, d0, e0: array_like
Input data are coefficients of the Quartic polynomial::
a0*x^4 + b0*x^3 + c0*x^2 + d0*x + e0 = 0
Returns
-------
r1, r2, r3, r4: tuple
Output data is a tuple of four roots of given polynomial.
'''
''' Reduce the quartic equation to to form:
x^4 + a*x^3 + b*x^2 + c*x + d = 0'''
a, b, c, d = b0/a0, c0/a0, d0/a0, e0/a0
# Some repeating variables
a0 = 0.25*a
a02 = a0*a0
# Coefficients of subsidiary cubic euqtion
p = 3*a02 - 0.5*b
q = a*a02 - b*a0 + 0.5*c
r = 3*a02*a02 - b*a02 + c*a0 - d
# One root of the cubic equation
z0 = single_cubic_one(1, p, r, p*r - 0.5*q*q)
# Additional variables
s = cmath.sqrt(2*p + 2*z0.real + 0j)
if s == 0:
t = z0*z0 + r
else:
t = -q / s
# Compute roots by quadratic equations
r0, r1 = single_quadratic(1, s, z0 + t)
r2, r3 = single_quadratic(1, -s, z0 - t)
return r0 - a0, r1 - a0, r2 - a0, r3 - a0
def multi_quadratic(a0, b0, c0):
''' Analytical solver for multiple quadratic equations
(2nd order polynomial), based on `numpy` functions.
Parameters
----------
a0, b0, c0: array_like
Input data are coefficients of the Quadratic polynomial::
a0*x^2 + b0*x + c0 = 0
Returns
-------
r1, r2: ndarray
Output data is an array of two roots of given polynomials.
'''
''' Reduce the quadratic equation to to form:
x^2 + ax + b = 0'''
a, b = b0 / a0, c0 / a0
# Some repating variables
a0 = -0.5*a
delta = a0*a0 - b
sqrt_delta = np.sqrt(delta + 0j)
# Roots
r1 = a0 - sqrt_delta
r2 = a0 + sqrt_delta
return r1, r2
def multi_cubic(a0, b0, c0, d0, all_roots=True):
''' Analytical closed-form solver for multiple cubic equations
(3rd order polynomial), based on `numpy` functions.
Parameters
----------
a0, b0, c0, d0: array_like
Input data are coefficients of the Cubic polynomial::
a0*x^3 + b0*x^2 + c0*x + d0 = 0
all_roots: bool, optional
If set to `True` (default) all three roots are computed and returned.
If set to `False` only one (real) root is computed and returned.
Returns
-------
roots: ndarray
Output data is an array of three roots of given polynomials of size
(3, M) if `all_roots=True`, and an array of one root of size (M,)
if `all_roots=False`.
'''
''' Reduce the cubic equation to to form:
x^3 + a*x^2 + bx + c = 0'''
a, b, c = b0 / a0, c0 / a0, d0 / a0
# Some repeating constants and variables
third = 1./3.
a13 = a*third
a2 = a13*a13
sqr3 = math.sqrt(3)
# Additional intermediate variables
f = third*b - a2
g = a13 * (2*a2 - b) + c
h = 0.25*g*g + f*f*f
# Masks for different combinations of roots
m1 = (f == 0) & (g == 0) & (h == 0) # roots are real and equal
m2 = (~m1) & (h <= 0) # roots are real and distinct
m3 = (~m1) & (~m2) # one real root and two complex
def cubic_root(x):
''' Compute cubic root of a number while maintaining its sign
'''
root = np.zeros_like(x)
positive = (x >= 0)
negative = ~positive
root[positive] = x[positive]**third
root[negative] = -(-x[negative])**third
return root
def roots_all_real_equal(c):
''' Compute cubic roots if all roots are real and equal
'''
r1 = -cubic_root(c)
if all_roots:
return r1, r1, r1
else:
return r1
def roots_all_real_distinct(a13, f, g, h):
''' Compute cubic roots if all roots are real and distinct
'''
j = np.sqrt(-f)
k = np.arccos(-0.5*g / (j*j*j))
m = np.cos(third*k)
r1 = 2*j*m - a13
if all_roots:
n = sqr3 * np.sin(third*k)
r2 = -j * (m + n) - a13
r3 = -j * (m - n) - a13
return r1, r2, r3
else:
return r1
def roots_one_real(a13, g, h):
''' Compute cubic roots if one root is real and other two are complex
'''
sqrt_h = np.sqrt(h)
S = cubic_root(-0.5*g + sqrt_h)
U = cubic_root(-0.5*g - sqrt_h)
S_plus_U = S + U
r1 = S_plus_U - a13
if all_roots:
S_minus_U = S - U
r2 = -0.5*S_plus_U - a13 + S_minus_U*sqr3*0.5j
r3 = -0.5*S_plus_U - a13 - S_minus_U*sqr3*0.5j
return r1, r2, r3
else:
return r1
# Compute roots
if all_roots:
roots = np.zeros((3, len(a))).astype(complex)
roots[:, m1] = roots_all_real_equal(c[m1])
roots[:, m2] = roots_all_real_distinct(a13[m2], f[m2], g[m2], h[m2])
roots[:, m3] = roots_one_real(a13[m3], g[m3], h[m3])
else:
roots = np.zeros(len(a)) # .astype(complex)
roots[m1] = roots_all_real_equal(c[m1])
roots[m2] = roots_all_real_distinct(a13[m2], f[m2], g[m2], h[m2])
roots[m3] = roots_one_real(a13[m3], g[m3], h[m3])
return roots
def multi_quartic(a0, b0, c0, d0, e0):
''' Analytical closed-form solver for multiple quartic equations
(4th order polynomial), based on `numpy` functions. Calls
`multi_cubic` and `multi_quadratic`.
Parameters
----------
a0, b0, c0, d0, e0: array_like
Input data are coefficients of the Quartic polynomial::
a0*x^4 + b0*x^3 + c0*x^2 + d0*x + e0 = 0
Returns
-------
r1, r2, r3, r4: ndarray
Output data is an array of four roots of given polynomials.
'''
''' Reduce the quartic equation to to form:
x^4 ax^3 + bx^2 + cx + d = 0'''
a, b, c, d = b0/a0, c0/a0, d0/a0, e0/a0
# Some repeating variables
a0 = 0.25*a
a02 = a0*a0
# Coefficients of subsidiary cubic euqtion
p = 3*a02 - 0.5*b
q = a*a02 - b*a0 + 0.5*c
r = 3*a02*a02 - b*a02 + c*a0 - d
# One root of the cubic equation
z0 = multi_cubic(1, p, r, p*r - 0.5*q*q, all_roots=False)
# Additional variables
s = np.sqrt(2*p + 2*z0.real + 0j)
t = np.zeros_like(s)
mask = (s == 0)
t[mask] = z0[mask]*z0[mask] + r[mask]
t[~mask] = -q[~mask] / s[~mask]
# Compute roots by quadratic equations
r0, r1 = multi_quadratic(1, s, z0 + t) - a0
r2, r3 = multi_quadratic(1, -s, z0 - t) - a0
return r0, r1, r2, r3
def cubic_roots(p):
'''
A caller function for a fast cubic root solver (3rd order polynomial).
If a single cubic equation or a set of fewer than 100 equations is
given as an input, this function will call `single_cubic` inside
a list comprehension. Otherwise (if a more than 100 equtions is given), it
will call `multi_cubic` which is based on `numpy` functions.
Both equations are based on a closed-form analytical solutions by Cardano.
Parameters
----------
p: array_like
Input data are coefficients of the Cubic polynomial of the form:
p[0]*x^3 + p[1]*x^2 + p[2]*x + p[3] = 0
Stacked arrays of coefficient are allowed, which means that ``p`` may
have size ``(4,)`` or ``(M, 4)``, where ``M>0`` is the
number of polynomials. Note that the first axis should be used for
stacking.
Returns
-------
roots: ndarray
Output data is an array of three roots of given polynomials,
of size ``(M, 3)``.
Examples
--------
>>> roots = cubic_roots([1, 7, -806, -1050])
>>> roots
array([[ 25.80760451+0.j, -31.51667909+0.j, -1.29092543+0.j]])
>>> roots = cubic_roots([1, 2, 3, 4])
>>> roots
array([[-1.65062919+0.j , -0.1746854 +1.54686889j,
-0.1746854 -1.54686889j]])
>>> roots = cubic_roots([[1, 2, 3, 4],
[1, 7, -806, -1050]])
>>> roots
array([[ -1.65062919+0.j , -0.1746854 +1.54686889j,
-0.1746854 -1.54686889j],
[ 25.80760451+0.j , -31.51667909+0.j ,
-1.29092543+0.j ]])
'''
# Convert input to array (if input is a list or tuple)
p = np.asarray(p)
# If only one set of coefficients is given, add axis
if p.ndim < 2:
p = p[np.newaxis, :]
# Check if four coefficients are given
if p.shape[1] != 4:
raise ValueError('Expected 3rd order polynomial with 4 '
'coefficients, got {:d}.'.format(p.shape[1]))
if p.shape[0] < 100:
roots = [single_cubic(*pi) for pi in p]
return np.array(roots)
else:
roots = multi_cubic(*p.T)
return np.array(roots).T
def quartic_roots(p):
'''
A caller function for a fast quartic root solver (4th order polynomial).
If a single quartic equation or a set of fewer than 100 equations is
given as an input, this function will call `single_quartic` inside
a list comprehension. Otherwise (if a more than 100 equtions is given), it
will call `multi_quartic` which is based on `numpy` functions.
Both equations are based on a closed-form analytical solutions by Ferrari
and Cardano.
Parameters
----------
p: array_like
Input data are coefficients of the Quartic polynomial of the form:
p[0]*x^4 + p[1]*x^3 + p[2]*x^2 + p[3]*x + p[4] = 0
Stacked arrays of coefficient are allowed, which means that ``p`` may
have size ``(5,)`` or ``(M, 5)``, where ``M>0`` is the
number of polynomials. Note that the first axis should be used for
stacking.
Returns
-------
roots: ndarray
Output data is an array of four roots of given polynomials,
of size ``(M, 4)``.
Examples
--------
>>> roots = quartic_roots([1, 7, -806, -1050, 38322])
>>> roots
array([[-30.76994812-0.j, -7.60101564+0.j, 6.61999319+0.j,
24.75097057-0.j]])
>>> roots = quartic_roots([1, 2, 3, 4, 5])
>>> roots
array([[-1.28781548-0.85789676j, -1.28781548+0.85789676j,
0.28781548+1.41609308j, 0.28781548-1.41609308j]])
>>> roots = quartic_roots([[1, 2, 3, 4, 5],
[1, 7, -806, -1050, 38322]])
>>> roots
array([[ -1.28781548-0.85789676j, -1.28781548+0.85789676j,
0.28781548+1.41609308j, 0.28781548-1.41609308j],
[-30.76994812-0.j , -7.60101564+0.j ,
6.61999319+0.j , 24.75097057-0.j ]])
'''
# Convert input to an array (if input is a list or tuple)
p = np.asarray(p)
# If only one set of coefficients is given, add axis
if p.ndim < 2:
p = p[np.newaxis, :]
# Check if all five coefficients are given
if p.shape[1] != 5:
raise ValueError('Expected 4th order polynomial with 5 '
'coefficients, got {:d}.'.format(p.shape[1]))
if p.shape[0] < 100:
roots = [single_quartic(*pi) for pi in p]
return np.array(roots)
else:
roots = multi_quartic(*p.T)
return np.array(roots).T | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/driver/steady_state.py |
from typing import Optional
import jax
import jax.numpy as jnp
from inspect import signature
from netket.operator import Squared, AbstractSuperOperator
from netket.vqs import MCMixedState
from netket.optimizer import (
identity_preconditioner,
PreconditionerT,
_DeprecatedPreconditionerSignature,
)
from .vmc_common import info
from .abstract_variational_driver import AbstractVariationalDriver
class SteadyState(AbstractVariationalDriver):
"""
Steady-state driver minimizing L^†L.
"""
def __init__(
self,
lindbladian,
optimizer,
*args,
variational_state: MCMixedState = None,
preconditioner: PreconditionerT = identity_preconditioner,
**kwargs,
):
"""
Initializes the driver class.
Args:
lindbladian: The Lindbladian of the system.
optimizer: Determines how optimization steps are performed given the
bare energy gradient.
preconditioner: Determines which preconditioner to use for the loss gradient.
This must be a tuple of `(object, solver)` as documented in the section
`preconditioners` in the documentation. The standard preconditioner
included with NetKet is Stochastic Reconfiguration. By default, no preconditioner
is used and the bare gradient is passed to the optimizer.
"""
if variational_state is None:
variational_state = MCMixedState(*args, **kwargs)
if not isinstance(lindbladian, AbstractSuperOperator):
raise TypeError("The first argument must be a super-operator")
super().__init__(variational_state, optimizer, minimized_quantity_name="LdagL")
self._lind = lindbladian
self._ldag_l = Squared(lindbladian)
self.preconditioner = preconditioner
self._dp = None
self._S = None
self._sr_info = None
def _forward_and_backward(self):
"""
Performs a number of VMC optimization steps.
Args:
n_steps (int): Number of steps to perform.
"""
self.state.reset()
# Compute the local energy estimator and average Energy
self._loss_stats, self._loss_grad = self.state.expect_and_grad(self._ldag_l)
# if it's the identity it does
# self._dp = self._loss_grad
self._dp = self.preconditioner(self.state, self._loss_grad, self.step_count)
# If parameters are real, then take only real part of the gradient (if it's complex)
self._dp = jax.tree_map(
lambda x, target: (x if jnp.iscomplexobj(target) else x.real),
self._dp,
self.state.parameters,
)
return self._dp
@property
def preconditioner(self):
"""
The preconditioner used to modify the gradient.
This is a function with the following signature
.. code-block:: python
precondtioner(vstate: VariationalState,
grad: PyTree,
step: Optional[Scalar] = None)
Where the first argument is a variational state, the second argument
is the PyTree of the gradient to precondition and the last optional
argument is the step, used to change some parameters along the
optimisation.
Often, this is taken to be :func:`nk.optimizer.SR`. If it is set to
`None`, then the identity is used.
"""
return self._preconditioner
@preconditioner.setter
def preconditioner(self, val: Optional[PreconditionerT]):
if val is None:
val = identity_preconditioner
if len(signature(val).parameters) == 2:
val = _DeprecatedPreconditionerSignature(val)
self._preconditioner = val
@property
def ldagl(self):
"""
Return MCMC statistics for the expectation value of observables in the
current state of the driver.
"""
return self._loss_stats
def __repr__(self):
return (
"SteadyState("
+ f"\n step_count = {self.step_count},"
+ f"\n state = {self.state})"
)
def info(self, depth=0):
lines = [
f"{name}: {info(obj, depth=depth + 1)}"
for name, obj in [
("Lindbladian ", self._lind),
("Optimizer ", self._optimizer),
("SR solver ", self.sr),
]
]
return "\n{}".format(" " * 3 * (depth + 1)).join([str(self)] + lines) | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Script/Main.py | # these define the range of versions SCons supports
unsupported_python_version = (3, 5, 0)
deprecated_python_version = (3, 6, 0)
import SCons.compat
import atexit
import importlib.util
import os
import re
import sys
import time
import traceback
import platform
import threading
import SCons.CacheDir
import SCons.Debug
import SCons.Defaults
import SCons.Environment
import SCons.Errors
import SCons.Job
import SCons.Node
import SCons.Node.FS
import SCons.Platform
import SCons.Platform.virtualenv
import SCons.SConf
import SCons.Script
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Script.Interactive
# Global variables
first_command_start = None
last_command_end = None
print_objects = False
print_memoizer = False
print_stacktrace = False
print_time = False
print_action_timestamps = False
sconscript_time = 0
cumulative_command_time = 0
exit_status = 0 # final exit status, assume success by default
this_build_status = 0 # "exit status" of an individual build
num_jobs = None
delayed_warnings = []
def revert_io():
# This call is added to revert stderr and stdout to the original
# ones just in case some build rule or something else in the system
# has redirected them elsewhere.
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
class SConsPrintHelpException(Exception):
pass
display = SCons.Util.display
progress_display = SCons.Util.DisplayEngine()
class Progressor:
prev = ''
count = 0
target_string = '$TARGET'
def __init__(self, obj, interval=1, file=None, overwrite=False):
if file is None:
file = sys.stdout
self.obj = obj
self.file = file
self.interval = interval
self.overwrite = overwrite
if callable(obj):
self.func = obj
elif SCons.Util.is_List(obj):
self.func = self.spinner
elif obj.find(self.target_string) != -1:
self.func = self.replace_string
else:
self.func = self.string
def write(self, s):
self.file.write(s)
self.file.flush()
self.prev = s
def erase_previous(self):
if self.prev:
length = len(self.prev)
if self.prev[-1] in ('\n', '\r'):
length = length - 1
self.write(' ' * length + '\r')
self.prev = ''
def spinner(self, node):
self.write(self.obj[self.count % len(self.obj)])
def string(self, node):
self.write(self.obj)
def replace_string(self, node):
self.write(self.obj.replace(self.target_string, str(node)))
def __call__(self, node):
self.count = self.count + 1
if (self.count % self.interval) == 0:
if self.overwrite:
self.erase_previous()
self.func(node)
ProgressObject = SCons.Util.Null()
def Progress(*args, **kw):
global ProgressObject
ProgressObject = Progressor(*args, **kw)
# Task control.
#
_BuildFailures = []
def GetBuildFailures():
return _BuildFailures
class BuildTask(SCons.Taskmaster.OutOfDateTask):
"""An SCons build task."""
progress = ProgressObject
def display(self, message):
display('scons: ' + message)
def prepare(self):
if not isinstance(self.progress, SCons.Util.Null):
for target in self.targets:
self.progress(target)
return SCons.Taskmaster.OutOfDateTask.prepare(self)
def needs_execute(self):
if SCons.Taskmaster.OutOfDateTask.needs_execute(self):
return True
if self.top and self.targets[0].has_builder():
display("scons: `%s' is up to date." % str(self.node))
return False
def execute(self):
if print_time:
start_time = time.time()
global first_command_start
if first_command_start is None:
first_command_start = start_time
SCons.Taskmaster.OutOfDateTask.execute(self)
if print_time:
global cumulative_command_time
global last_command_end
finish_time = time.time()
last_command_end = finish_time
cumulative_command_time += finish_time - start_time
if print_action_timestamps:
sys.stdout.write(
"Command execution start timestamp: %s: %f\n"
% (str(self.node), start_time)
)
sys.stdout.write(
"Command execution end timestamp: %s: %f\n"
% (str(self.node), finish_time)
)
sys.stdout.write(
"Command execution time: %s: %f seconds\n"
% (str(self.node), (finish_time - start_time))
)
def do_failed(self, status=2):
_BuildFailures.append(self.exception[1])
global exit_status
global this_build_status
if self.options.ignore_errors:
SCons.Taskmaster.OutOfDateTask.executed(self)
elif self.options.keep_going:
SCons.Taskmaster.OutOfDateTask.fail_continue(self)
exit_status = status
this_build_status = status
else:
SCons.Taskmaster.OutOfDateTask.fail_stop(self)
exit_status = status
this_build_status = status
def executed(self):
t = self.targets[0]
if self.top and not t.has_builder() and not t.side_effect:
if not t.exists():
if t.__class__.__name__ in ('File', 'Dir', 'Entry'):
errstr="Do not know how to make %s target `%s' (%s)." % (t.__class__.__name__, t, t.get_abspath())
else: # Alias or Python or ...
errstr="Do not know how to make %s target `%s'." % (t.__class__.__name__, t)
sys.stderr.write("scons: *** " + errstr)
if not self.options.keep_going:
sys.stderr.write(" Stop.")
sys.stderr.write("\n")
try:
raise SCons.Errors.BuildError(t, errstr)
except KeyboardInterrupt:
raise
except:
self.exception_set()
self.do_failed()
else:
print("scons: Nothing to be done for `%s'." % t)
SCons.Taskmaster.OutOfDateTask.executed(self)
else:
SCons.Taskmaster.OutOfDateTask.executed(self)
def failed(self):
# Handle the failure of a build task. The primary purpose here
# is to display the various types of Errors and Exceptions
# appropriately.
exc_info = self.exc_info()
try:
t, e, tb = exc_info
except ValueError:
t, e = exc_info
tb = None
if t is None:
# The Taskmaster didn't record an exception for this Task;
# see if the sys module has one.
try:
t, e, tb = sys.exc_info()[:]
except ValueError:
t, e = exc_info
tb = None
# Deprecated string exceptions will have their string stored
# in the first entry of the tuple.
if e is None:
e = t
buildError = SCons.Errors.convert_to_BuildError(e)
if not buildError.node:
buildError.node = self.node
node = buildError.node
if not SCons.Util.is_List(node):
node = [ node ]
nodename = ', '.join(map(str, node))
errfmt = "scons: *** [%s] %s\n"
sys.stderr.write(errfmt % (nodename, buildError))
if (buildError.exc_info[2] and buildError.exc_info[1] and
not isinstance(
buildError.exc_info[1],
(EnvironmentError, SCons.Errors.StopError,
SCons.Errors.UserError))):
type, value, trace = buildError.exc_info
if tb and print_stacktrace:
sys.stderr.write("scons: internal stack trace:\n")
traceback.print_tb(tb, file=sys.stderr)
traceback.print_exception(type, value, trace)
elif tb and print_stacktrace:
sys.stderr.write("scons: internal stack trace:\n")
traceback.print_tb(tb, file=sys.stderr)
self.exception = (e, buildError, tb) # type, value, traceback
self.do_failed(buildError.exitstatus)
self.exc_clear()
def postprocess(self):
if self.top:
t = self.targets[0]
for tp in self.options.tree_printers:
tp.display(t)
if self.options.debug_includes:
tree = t.render_include_tree()
if tree:
print()
print(tree)
SCons.Taskmaster.OutOfDateTask.postprocess(self)
def make_ready(self):
"""Make a task ready for execution"""
SCons.Taskmaster.OutOfDateTask.make_ready(self)
if self.out_of_date and self.options.debug_explain:
explanation = self.out_of_date[0].explain()
if explanation:
sys.stdout.write("scons: " + explanation)
class CleanTask(SCons.Taskmaster.AlwaysTask):
"""An SCons clean task."""
def fs_delete(self, path, pathstr, remove=True):
try:
if os.path.lexists(path):
if os.path.isfile(path) or os.path.islink(path):
if remove: os.unlink(path)
display("Removed " + pathstr)
elif os.path.isdir(path) and not os.path.islink(path):
# delete everything in the dir
for e in sorted(os.listdir(path)):
p = os.path.join(path, e)
s = os.path.join(pathstr, e)
if os.path.isfile(p):
if remove: os.unlink(p)
display("Removed " + s)
else:
self.fs_delete(p, s, remove)
# then delete dir itself
if remove: os.rmdir(path)
display("Removed directory " + pathstr)
else:
errstr = "Path '%s' exists but isn't a file or directory."
raise SCons.Errors.UserError(errstr % pathstr)
except SCons.Errors.UserError as e:
print(e)
except (IOError, OSError) as e:
print("scons: Could not remove '%s':" % pathstr, e.strerror)
def _get_files_to_clean(self):
result = []
target = self.targets[0]
if target.has_builder() or target.side_effect:
result = [t for t in self.targets if not t.noclean]
return result
def _clean_targets(self, remove=True):
target = self.targets[0]
if target in SCons.Environment.CleanTargets:
files = SCons.Environment.CleanTargets[target]
for f in files:
self.fs_delete(f.get_abspath(), str(f), remove)
def show(self):
for t in self._get_files_to_clean():
if not t.isdir():
display("Removed " + str(t))
self._clean_targets(remove=False)
def remove(self):
for t in self._get_files_to_clean():
try:
removed = t.remove()
except OSError as e:
# An OSError may indicate something like a permissions
# issue, an IOError would indicate something like
# the file not existing. In either case, print a
# message and keep going to try to remove as many
# targets as possible.
print("scons: Could not remove '{0}'".format(str(t)), e.strerror)
else:
if removed:
display("Removed " + str(t))
self._clean_targets(remove=True)
execute = remove
# We want the Taskmaster to update the Node states (and therefore
# handle reference counts, etc.), but we don't want to call
# back to the Node's post-build methods, which would do things
# we don't want, like store .sconsign information.
executed = SCons.Taskmaster.Task.executed_without_callbacks
# Have the Taskmaster arrange to "execute" all of the targets, because
# we'll figure out ourselves (in remove() or show() above) whether
# anything really needs to be done.
make_ready = SCons.Taskmaster.Task.make_ready_all
def prepare(self):
pass
class QuestionTask(SCons.Taskmaster.AlwaysTask):
"""An SCons task for the -q (question) option."""
def prepare(self):
pass
def execute(self):
if self.targets[0].get_state() != SCons.Node.up_to_date or \
(self.top and not self.targets[0].exists()):
global exit_status
global this_build_status
exit_status = 1
this_build_status = 1
self.tm.stop()
def executed(self):
pass
class TreePrinter:
def __init__(self, derived=False, prune=False, status=False, sLineDraw=False):
self.derived = derived
self.prune = prune
self.status = status
self.sLineDraw = sLineDraw
def get_all_children(self, node):
return node.all_children()
def get_derived_children(self, node):
children = node.all_children(None)
return [x for x in children if x.has_builder()]
def display(self, t):
if self.derived:
func = self.get_derived_children
else:
func = self.get_all_children
s = self.status and 2 or 0
SCons.Util.print_tree(t, func, prune=self.prune, showtags=s, lastChild=True, singleLineDraw=self.sLineDraw)
def python_version_string():
return sys.version.split()[0]
def python_version_unsupported(version=sys.version_info):
return version < unsupported_python_version
def python_version_deprecated(version=sys.version_info):
return version < deprecated_python_version
class FakeOptionParser:
"""
A do-nothing option parser, used for the initial OptionsParser variable.
During normal SCons operation, the OptionsParser is created right
away by the main() function. Certain tests scripts however, can
introspect on different Tool modules, the initialization of which
can try to add a new, local option to an otherwise uninitialized
OptionsParser object. This allows that introspection to happen
without blowing up.
"""
class FakeOptionValues:
def __getattr__(self, attr):
return None
values = FakeOptionValues()
def add_local_option(self, *args, **kw):
pass
OptionsParser = FakeOptionParser()
def AddOption(*args, **kw):
if 'default' not in kw:
kw['default'] = None
result = OptionsParser.add_local_option(*args, **kw)
return result
def GetOption(name):
return getattr(OptionsParser.values, name)
def SetOption(name, value):
return OptionsParser.values.set_option(name, value)
def PrintHelp(file=None):
OptionsParser.print_help(file=file)
class Stats:
def __init__(self):
self.stats = []
self.labels = []
self.append = self.do_nothing
self.print_stats = self.do_nothing
def enable(self, outfp):
self.outfp = outfp
self.append = self.do_append
self.print_stats = self.do_print
def do_nothing(self, *args, **kw):
pass
class CountStats(Stats):
def do_append(self, label):
self.labels.append(label)
self.stats.append(SCons.Debug.fetchLoggedInstances())
def do_print(self):
stats_table = {}
for s in self.stats:
for n in [t[0] for t in s]:
stats_table[n] = [0, 0, 0, 0]
i = 0
for s in self.stats:
for n, c in s:
stats_table[n][i] = c
i = i + 1
self.outfp.write("Object counts:\n")
pre = [" "]
post = [" %s\n"]
l = len(self.stats)
fmt1 = ''.join(pre + [' %7s']*l + post)
fmt2 = ''.join(pre + [' %7d']*l + post)
labels = self.labels[:l]
labels.append(("", "Class"))
self.outfp.write(fmt1 % tuple([x[0] for x in labels]))
self.outfp.write(fmt1 % tuple([x[1] for x in labels]))
for k in sorted(stats_table.keys()):
r = stats_table[k][:l] + [k]
self.outfp.write(fmt2 % tuple(r))
count_stats = CountStats()
class MemStats(Stats):
def do_append(self, label):
self.labels.append(label)
self.stats.append(SCons.Debug.memory())
def do_print(self):
fmt = 'Memory %-32s %12d\n'
for label, stats in zip(self.labels, self.stats):
self.outfp.write(fmt % (label, stats))
memory_stats = MemStats()
# utility functions
def _scons_syntax_error(e):
"""Handle syntax errors. Print out a message and show where the error
occurred.
"""
etype, value, tb = sys.exc_info()
lines = traceback.format_exception_only(etype, value)
for line in lines:
sys.stderr.write(line+'\n')
sys.exit(2)
def find_deepest_user_frame(tb):
"""
Find the deepest stack frame that is not part of SCons.
Input is a "pre-processed" stack trace in the form
returned by traceback.extract_tb() or traceback.extract_stack()
"""
tb.reverse()
# find the deepest traceback frame that is not part
# of SCons:
for frame in tb:
filename = frame[0]
if filename.find(os.sep+'SCons'+os.sep) == -1:
return frame
return tb[0]
def _scons_user_error(e):
"""Handle user errors. Print out a message and a description of the
error, along with the line number and routine where it occured.
The file and line number will be the deepest stack frame that is
not part of SCons itself.
"""
global print_stacktrace
etype, value, tb = sys.exc_info()
if print_stacktrace:
traceback.print_exception(etype, value, tb)
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb))
sys.stderr.write("\nscons: *** %s\n" % value)
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
sys.exit(2)
def _scons_user_warning(e):
"""Handle user warnings. Print out a message and a description of
the warning, along with the line number and routine where it occured.
The file and line number will be the deepest stack frame that is
not part of SCons itself.
"""
etype, value, tb = sys.exc_info()
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb))
sys.stderr.write("\nscons: warning: %s\n" % e)
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
def _scons_internal_warning(e):
"""Slightly different from _scons_user_warning in that we use the
*current call stack* rather than sys.exc_info() to get our stack trace.
This is used by the warnings framework to print warnings."""
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_stack())
sys.stderr.write("\nscons: warning: %s\n" % e.args[0])
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
def _scons_internal_error():
"""Handle all errors but user errors. Print out a message telling
the user what to do in this case and print a normal trace.
"""
print('internal error')
traceback.print_exc()
sys.exit(2)
def _SConstruct_exists(dirname='', repositories=[], filelist=None):
"""This function checks that an SConstruct file exists in a directory.
If so, it returns the path of the file. By default, it checks the
current directory.
"""
if not filelist:
filelist = ['SConstruct', 'Sconstruct', 'sconstruct', 'SConstruct.py', 'Sconstruct.py', 'sconstruct.py']
for file in filelist:
sfile = os.path.join(dirname, file)
if os.path.isfile(sfile):
return sfile
if not os.path.isabs(sfile):
for rep in repositories:
if os.path.isfile(os.path.join(rep, sfile)):
return sfile
return None
def _set_debug_values(options):
global print_memoizer, print_objects, print_stacktrace, print_time, print_action_timestamps
debug_values = options.debug
if "count" in debug_values:
# All of the object counts are within "if track_instances:" blocks,
# which get stripped when running optimized (with python -O or
# from compiled *.pyo files). Provide a warning if __debug__ is
# stripped, so it doesn't just look like --debug=count is broken.
enable_count = False
if __debug__: enable_count = True
if enable_count:
count_stats.enable(sys.stdout)
SCons.Debug.track_instances = True
else:
msg = "--debug=count is not supported when running SCons\n" + \
"\twith the python -O option or optimized (.pyo) modules."
SCons.Warnings.warn(SCons.Warnings.NoObjectCountWarning, msg)
if "dtree" in debug_values:
options.tree_printers.append(TreePrinter(derived=True))
options.debug_explain = ("explain" in debug_values)
if "findlibs" in debug_values:
SCons.Scanner.Prog.print_find_libs = "findlibs"
options.debug_includes = ("includes" in debug_values)
print_memoizer = ("memoizer" in debug_values)
if "memory" in debug_values:
memory_stats.enable(sys.stdout)
print_objects = ("objects" in debug_values)
if print_objects:
SCons.Debug.track_instances = True
if "presub" in debug_values:
SCons.Action.print_actions_presub = True
if "stacktrace" in debug_values:
print_stacktrace = True
if "stree" in debug_values:
options.tree_printers.append(TreePrinter(status=True))
if "time" in debug_values:
print_time = True
if "action-timestamps" in debug_values:
print_time = True
print_action_timestamps = True
if "tree" in debug_values:
options.tree_printers.append(TreePrinter())
if "prepare" in debug_values:
SCons.Taskmaster.print_prepare = True
if "duplicate" in debug_values:
SCons.Node.print_duplicate = True
def _create_path(plist):
path = '.'
for d in plist:
if os.path.isabs(d):
path = d
else:
path = path + '/' + d
return path
def _load_site_scons_dir(topdir, site_dir_name=None):
"""Load the site directory under topdir.
If a site dir name is supplied use it, else use default "site_scons"
Prepend site dir to sys.path.
If a "site_tools" subdir exists, prepend to toolpath.
Import "site_init.py" from site dir if it exists.
"""
if site_dir_name:
err_if_not_found = True # user specified: err if missing
else:
site_dir_name = "site_scons"
err_if_not_found = False # scons default: okay to be missing
site_dir = os.path.join(topdir, site_dir_name)
if not os.path.exists(site_dir):
if err_if_not_found:
raise SCons.Errors.UserError("site dir %s not found." % site_dir)
return
sys.path.insert(0, os.path.abspath(site_dir))
site_init_filename = "site_init.py"
site_init_modname = "site_init"
site_tools_dirname = "site_tools"
site_init_file = os.path.join(site_dir, site_init_filename)
site_tools_dir = os.path.join(site_dir, site_tools_dirname)
if os.path.exists(site_tools_dir):
SCons.Tool.DefaultToolpath.insert(0, os.path.abspath(site_tools_dir))
if not os.path.exists(site_init_file):
return
# "import" the site_init.py file into the SCons.Script namespace.
# This is a variant on the basic Python import flow in that the globals
# dict for the compile step is prepopulated from the SCons.Script
# module object; on success the SCons.Script globals are refilled
# from the site_init globals so it all appears in SCons.Script
# instead of as a separate module.
try:
try:
m = sys.modules['SCons.Script']
except KeyError:
fmt = 'cannot import {}: missing SCons.Script module'
raise SCons.Errors.InternalError(fmt.format(site_init_file))
spec = importlib.util.spec_from_file_location(site_init_modname, site_init_file)
site_m = {
"__file__": spec.origin,
"__name__": spec.name,
"__doc__": None,
}
re_dunder = re.compile(r"__[^_]+__")
# update site dict with all but magic (dunder) methods
for k, v in m.__dict__.items():
if not re_dunder.match(k):
site_m[k] = v
with open(spec.origin, 'r') as f:
code = f.read()
try:
codeobj = compile(code, spec.name, "exec")
exec(codeobj, site_m)
except KeyboardInterrupt:
raise
except Exception:
fmt = "*** Error loading site_init file {}:\n"
sys.stderr.write(fmt.format(site_init_file))
raise
else:
# now refill globals with site_init's symbols
for k, v in site_m.items():
if not re_dunder.match(k):
m.__dict__[k] = v
except KeyboardInterrupt:
raise
except Exception:
fmt = "*** cannot import site init file {}:\n"
sys.stderr.write(fmt.format(site_init_file))
raise
def _load_all_site_scons_dirs(topdir, verbose=False):
"""Load all of the predefined site_scons dir.
Order is significant; we load them in order from most generic
(machine-wide) to most specific (topdir).
The verbose argument is only for testing.
"""
platform = SCons.Platform.platform_default()
def homedir(d):
return os.path.expanduser('~/'+d)
if platform == 'win32' or platform == 'cygwin':
# Note we use $ here instead of %...% because older
# pythons (prior to 2.6?) didn't expand %...% on Windows.
# This set of dirs should work on XP, Vista, 7 and later.
sysdirs=[
os.path.expandvars('$ALLUSERSPROFILE\\Application Data\\scons'),
os.path.expandvars('$USERPROFILE\\Local Settings\\Application Data\\scons')]
appdatadir = os.path.expandvars('$APPDATA\\scons')
if appdatadir not in sysdirs:
sysdirs.append(appdatadir)
sysdirs.append(homedir('.scons'))
elif platform == 'darwin': # MacOS X
sysdirs=['/Library/Application Support/SCons',
'/opt/local/share/scons', # (for MacPorts)
'/sw/share/scons', # (for Fink)
homedir('Library/Application Support/SCons'),
homedir('.scons')]
elif platform == 'sunos': # Solaris
sysdirs=['/opt/sfw/scons',
'/usr/share/scons',
homedir('.scons')]
else: # Linux, HPUX, etc.
# assume posix-like, i.e. platform == 'posix'
sysdirs=['/usr/share/scons',
homedir('.scons')]
dirs = sysdirs + [topdir]
for d in dirs:
if verbose: # this is used by unit tests.
print("Loading site dir ", d)
_load_site_scons_dir(d)
def test_load_all_site_scons_dirs(d):
_load_all_site_scons_dirs(d, True)
def version_string(label, module):
version = module.__version__
build = module.__build__
if build:
if build[0] != '.':
build = '.' + build
version = version + build
fmt = "\t%s: v%s, %s, by %s on %s\n"
return fmt % (label,
version,
module.__date__,
module.__developer__,
module.__buildsys__)
def path_string(label, module):
path = module.__path__
return "\t%s path: %s\n"%(label,path)
def _main(parser):
global exit_status
global this_build_status
options = parser.values
# Here's where everything really happens.
# First order of business: set up default warnings and then
# handle the user's warning options, so that we can issue (or
# suppress) appropriate warnings about anything that might happen,
# as configured by the user.
default_warnings = [ SCons.Warnings.WarningOnByDefault,
SCons.Warnings.DeprecatedWarning,
]
for warning in default_warnings:
SCons.Warnings.enableWarningClass(warning)
SCons.Warnings._warningOut = _scons_internal_warning
SCons.Warnings.process_warn_strings(options.warn)
# Now that we have the warnings configuration set up, we can actually
# issue (or suppress) any warnings about warning-worthy things that
# occurred while the command-line options were getting parsed.
try:
dw = options.delayed_warnings
except AttributeError:
pass
else:
delayed_warnings.extend(dw)
for warning_type, message in delayed_warnings:
SCons.Warnings.warn(warning_type, message)
if not SCons.Platform.virtualenv.virtualenv_enabled_by_default:
if options.enable_virtualenv:
SCons.Platform.virtualenv.enable_virtualenv = True
if options.ignore_virtualenv:
SCons.Platform.virtualenv.ignore_virtualenv = True
if options.diskcheck:
SCons.Node.FS.set_diskcheck(options.diskcheck)
# Next, we want to create the FS object that represents the outside
# world's file system, as that's central to a lot of initialization.
# To do this, however, we need to be in the directory from which we
# want to start everything, which means first handling any relevant
# options that might cause us to chdir somewhere (-C, -D, -U, -u).
if options.directory:
script_dir = os.path.abspath(_create_path(options.directory))
else:
script_dir = os.getcwd()
target_top = None
if options.climb_up:
target_top = '.' # directory to prepend to targets
while script_dir and not _SConstruct_exists(script_dir,
options.repository,
options.file):
script_dir, last_part = os.path.split(script_dir)
if last_part:
target_top = os.path.join(last_part, target_top)
else:
script_dir = ''
if script_dir and script_dir != os.getcwd():
if not options.silent:
display("scons: Entering directory `%s'" % script_dir)
try:
os.chdir(script_dir)
except OSError:
sys.stderr.write("Could not change directory to %s\n" % script_dir)
# Now that we're in the top-level SConstruct directory, go ahead
# and initialize the FS object that represents the file system,
# and make it the build engine default.
fs = SCons.Node.FS.get_default_fs()
for rep in options.repository:
fs.Repository(rep)
# Now that we have the FS object, the next order of business is to
# check for an SConstruct file (or other specified config file).
# If there isn't one, we can bail before doing any more work.
scripts = []
if options.file:
scripts.extend(options.file)
if not scripts:
sfile = _SConstruct_exists(repositories=options.repository,
filelist=options.file)
if sfile:
scripts.append(sfile)
if not scripts:
if options.help:
# There's no SConstruct, but they specified -h.
# Give them the options usage now, before we fail
# trying to read a non-existent SConstruct file.
raise SConsPrintHelpException
raise SCons.Errors.UserError("No SConstruct file found.")
if scripts[0] == "-":
d = fs.getcwd()
else:
d = fs.File(scripts[0]).dir
fs.set_SConstruct_dir(d)
_set_debug_values(options)
SCons.Node.implicit_cache = options.implicit_cache
SCons.Node.implicit_deps_changed = options.implicit_deps_changed
SCons.Node.implicit_deps_unchanged = options.implicit_deps_unchanged
if options.no_exec:
SCons.SConf.dryrun = 1
SCons.Action.execute_actions = None
if options.question:
SCons.SConf.dryrun = 1
if options.clean:
SCons.SConf.SetBuildType('clean')
if options.help:
SCons.SConf.SetBuildType('help')
SCons.SConf.SetCacheMode(options.config)
SCons.SConf.SetProgressDisplay(progress_display)
if options.no_progress or options.silent:
progress_display.set_mode(0)
# if site_dir unchanged from default None, neither --site-dir
# nor --no-site-dir was seen, use SCons default
if options.site_dir is None:
_load_all_site_scons_dirs(d.get_internal_path())
elif options.site_dir: # if a dir was set, use it
_load_site_scons_dir(d.get_internal_path(), options.site_dir)
if options.include_dir:
sys.path = options.include_dir + sys.path
# If we're about to start SCons in the interactive mode,
# inform the FS about this right here. Else, the release_target_info
# method could get called on some nodes, like the used "gcc" compiler,
# when using the Configure methods within the SConscripts.
# This would then cause subtle bugs, as already happened in #2971.
if options.interactive:
SCons.Node.interactive = True
# That should cover (most of) the options.
# Next, set up the variables that hold command-line arguments,
# so the SConscript files that we read and execute have access to them.
# TODO: for options defined via AddOption which take space-separated
# option-args, the option-args will collect into targets here,
# because we don't yet know to do any different.
targets = []
xmit_args = []
for a in parser.largs:
# Skip so-far unrecognized options, and empty string args
if a.startswith('-') or a in ('', '""', "''"):
continue
if '=' in a:
xmit_args.append(a)
else:
targets.append(a)
SCons.Script._Add_Targets(targets + parser.rargs)
SCons.Script._Add_Arguments(xmit_args)
# If stdout is not a tty, replace it with a wrapper object to call flush
# after every write.
#
# Tty devices automatically flush after every newline, so the replacement
# isn't necessary. Furthermore, if we replace sys.stdout, the readline
# module will no longer work. This affects the behavior during
# --interactive mode. --interactive should only be used when stdin and
# stdout refer to a tty.
if not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
sys.stdout = SCons.Util.Unbuffered(sys.stdout)
if not hasattr(sys.stderr, 'isatty') or not sys.stderr.isatty():
sys.stderr = SCons.Util.Unbuffered(sys.stderr)
memory_stats.append('before reading SConscript files:')
count_stats.append(('pre-', 'read'))
# And here's where we (finally) read the SConscript files.
progress_display("scons: Reading SConscript files ...")
if print_time:
start_time = time.time()
try:
for script in scripts:
SCons.Script._SConscript._SConscript(fs, script)
except SCons.Errors.StopError as e:
# We had problems reading an SConscript file, such as it
# couldn't be copied in to the VariantDir. Since we're just
# reading SConscript files and haven't started building
# things yet, stop regardless of whether they used -i or -k
# or anything else.
revert_io()
sys.stderr.write("scons: *** %s Stop.\n" % e)
sys.exit(2)
if print_time:
global sconscript_time
sconscript_time = time.time() - start_time
progress_display("scons: done reading SConscript files.")
memory_stats.append('after reading SConscript files:')
count_stats.append(('post-', 'read'))
# Re-{enable,disable} warnings in case they disabled some in
# the SConscript file.
#
# We delay enabling the PythonVersionWarning class until here so that,
# if they explicitly disabled it in either in the command line or in
# $SCONSFLAGS, or in the SConscript file, then the search through
# the list of deprecated warning classes will find that disabling
# first and not issue the warning.
#SCons.Warnings.enableWarningClass(SCons.Warnings.PythonVersionWarning)
SCons.Warnings.process_warn_strings(options.warn)
# Now that we've read the SConscript files, we can check for the
# warning about deprecated Python versions--delayed until here
# in case they disabled the warning in the SConscript files.
if python_version_deprecated():
msg = "Support for pre-%s Python version (%s) is deprecated.\n" + \
" If this will cause hardship, contact scons-dev@scons.org"
deprecated_version_string = ".".join(map(str, deprecated_python_version))
SCons.Warnings.warn(SCons.Warnings.PythonVersionWarning,
msg % (deprecated_version_string, python_version_string()))
if not options.help:
# [ ] Clarify why we need to create Builder here at all, and
# why it is created in DefaultEnvironment
# https://bitbucket.org/scons/scons/commits/d27a548aeee8ad5e67ea75c2d19a7d305f784e30
if SCons.SConf.NeedConfigHBuilder():
SCons.SConf.CreateConfigHBuilder(SCons.Defaults.DefaultEnvironment())
# Now re-parse the command-line options (any to the left of a '--'
# argument, that is) with any user-defined command-line options that
# the SConscript files may have added to the parser object. This will
# emit the appropriate error message and exit if any unknown option
# was specified on the command line.
parser.preserve_unknown_options = False
parser.parse_args(parser.largs, options)
if options.help:
help_text = SCons.Script.help_text
if help_text is None:
# They specified -h, but there was no Help() inside the
# SConscript files. Give them the options usage.
raise SConsPrintHelpException
else:
print(help_text)
print("Use scons -H for help about command-line options.")
exit_status = 0
return
# Change directory to the top-level SConstruct directory, then tell
# the Node.FS subsystem that we're all done reading the SConscript
# files and calling Repository() and VariantDir() and changing
# directories and the like, so it can go ahead and start memoizing
# the string values of file system nodes.
fs.chdir(fs.Top)
SCons.Node.FS.save_strings(1)
# Now that we've read the SConscripts we can set the options
# that are SConscript settable:
SCons.Node.implicit_cache = options.implicit_cache
SCons.Node.FS.set_duplicate(options.duplicate)
fs.set_max_drift(options.max_drift)
SCons.Job.explicit_stack_size = options.stack_size
# Hash format and chunksize are set late to support SetOption being called
# in a SConscript or SConstruct file.
SCons.Util.set_hash_format(options.hash_format)
if options.md5_chunksize:
SCons.Node.FS.File.hash_chunksize = options.md5_chunksize * 1024
platform = SCons.Platform.platform_module()
if options.interactive:
SCons.Script.Interactive.interact(fs, OptionsParser, options,
targets, target_top)
else:
# Build the targets
nodes = _build_targets(fs, options, targets, target_top)
if not nodes:
revert_io()
print('Found nothing to build')
exit_status = 2
def _build_targets(fs, options, targets, target_top):
global this_build_status
this_build_status = 0
progress_display.set_mode(not (options.no_progress or options.silent))
display.set_mode(not options.silent)
SCons.Action.print_actions = not options.silent
SCons.Action.execute_actions = not options.no_exec
SCons.Node.do_store_info = not options.no_exec
SCons.SConf.dryrun = options.no_exec
if options.diskcheck:
SCons.Node.FS.set_diskcheck(options.diskcheck)
SCons.CacheDir.cache_enabled = not options.cache_disable
SCons.CacheDir.cache_readonly = options.cache_readonly
SCons.CacheDir.cache_debug = options.cache_debug
SCons.CacheDir.cache_force = options.cache_force
SCons.CacheDir.cache_show = options.cache_show
if options.no_exec:
CleanTask.execute = CleanTask.show
else:
CleanTask.execute = CleanTask.remove
lookup_top = None
if targets or SCons.Script.BUILD_TARGETS != SCons.Script._build_plus_default:
# They specified targets on the command line or modified
# BUILD_TARGETS in the SConscript file(s), so if they used -u,
# -U or -D, we have to look up targets relative to the top,
# but we build whatever they specified.
if target_top:
lookup_top = fs.Dir(target_top)
target_top = None
targets = SCons.Script.BUILD_TARGETS
else:
# There are no targets specified on the command line,
# so if they used -u, -U or -D, we may have to restrict
# what actually gets built.
d = None
if target_top:
if options.climb_up == 1:
# -u, local directory and below
target_top = fs.Dir(target_top)
lookup_top = target_top
elif options.climb_up == 2:
# -D, all Default() targets
target_top = None
lookup_top = None
elif options.climb_up == 3:
# -U, local SConscript Default() targets
target_top = fs.Dir(target_top)
def check_dir(x, target_top=target_top):
if hasattr(x, 'cwd') and x.cwd is not None:
cwd = x.cwd.srcnode()
return cwd == target_top
else:
# x doesn't have a cwd, so it's either not a target,
# or not a file, so go ahead and keep it as a default
# target and let the engine sort it out:
return 1
d = [tgt for tgt in SCons.Script.DEFAULT_TARGETS if check_dir(tgt)]
SCons.Script.DEFAULT_TARGETS[:] = d
target_top = None
lookup_top = None
targets = SCons.Script._Get_Default_Targets(d, fs)
if not targets:
sys.stderr.write("scons: *** No targets specified and no Default() targets found. Stop.\n")
return None
def Entry(x, ltop=lookup_top, ttop=target_top, fs=fs):
if isinstance(x, SCons.Node.Node):
node = x
else:
node = None
# Why would ltop be None? Unfortunately this happens.
if ltop is None: ltop = ''
# Curdir becomes important when SCons is called with -u, -C,
# or similar option that changes directory, and so the paths
# of targets given on the command line need to be adjusted.
curdir = os.path.join(os.getcwd(), str(ltop))
for lookup in SCons.Node.arg2nodes_lookups:
node = lookup(x, curdir=curdir)
if node is not None:
break
if node is None:
node = fs.Entry(x, directory=ltop, create=1)
if ttop and not node.is_under(ttop):
if isinstance(node, SCons.Node.FS.Dir) and ttop.is_under(node):
node = ttop
else:
node = None
return node
nodes = [_f for _f in map(Entry, targets) if _f]
task_class = BuildTask # default action is to build targets
opening_message = "Building targets ..."
closing_message = "done building targets."
if options.keep_going:
failure_message = "done building targets (errors occurred during build)."
else:
failure_message = "building terminated because of errors."
if options.question:
task_class = QuestionTask
try:
if options.clean:
task_class = CleanTask
opening_message = "Cleaning targets ..."
closing_message = "done cleaning targets."
if options.keep_going:
failure_message = "done cleaning targets (errors occurred during clean)."
else:
failure_message = "cleaning terminated because of errors."
except AttributeError:
pass
task_class.progress = ProgressObject
if options.random:
def order(dependencies):
"""Randomize the dependencies."""
import random
random.shuffle(dependencies)
return dependencies
else:
def order(dependencies):
"""Leave the order of dependencies alone."""
return dependencies
def tmtrace_cleanup(tfile):
tfile.close()
if options.taskmastertrace_file == '-':
tmtrace = sys.stdout
elif options.taskmastertrace_file:
tmtrace = open(options.taskmastertrace_file, 'w')
atexit.register(tmtrace_cleanup, tmtrace)
else:
tmtrace = None
taskmaster = SCons.Taskmaster.Taskmaster(nodes, task_class, order, tmtrace)
# Let the BuildTask objects get at the options to respond to the
# various print_* settings, tree_printer list, etc.
BuildTask.options = options
is_pypy = platform.python_implementation() == 'PyPy'
# As of 3.7, python removed support for threadless platforms.
# See https://www.python.org/dev/peps/pep-0011/
is_37_or_later = sys.version_info >= (3, 7)
# python_has_threads = sysconfig.get_config_var('WITH_THREAD') or is_pypy or is_37_or_later
# As of python 3.4 threading has a dummy_threading module for use when there is no threading
# it's get_ident() will allways return -1, while real threading modules get_ident() will
# always return a positive integer
python_has_threads = threading.get_ident() != -1
# to check if python configured with threads.
global num_jobs
num_jobs = options.num_jobs
jobs = SCons.Job.Jobs(num_jobs, taskmaster)
if num_jobs > 1:
msg = None
if jobs.num_jobs == 1 or not python_has_threads:
msg = "parallel builds are unsupported by this version of Python;\n" + \
"\tignoring -j or num_jobs option.\n"
# Nuitka: We know we are not affected.
if False and msg:
SCons.Warnings.warn(SCons.Warnings.NoParallelSupportWarning, msg)
memory_stats.append('before building targets:')
count_stats.append(('pre-', 'build'))
def jobs_postfunc(
jobs=jobs,
options=options,
closing_message=closing_message,
failure_message=failure_message
):
if jobs.were_interrupted():
if not options.no_progress and not options.silent:
sys.stderr.write("scons: Build interrupted.\n")
global exit_status
global this_build_status
exit_status = 2
this_build_status = 2
if this_build_status:
progress_display("scons: " + failure_message)
else:
progress_display("scons: " + closing_message)
if not options.no_exec:
if jobs.were_interrupted():
progress_display("scons: writing .sconsign file.")
SCons.SConsign.write()
progress_display("scons: " + opening_message)
jobs.run(postfunc = jobs_postfunc)
memory_stats.append('after building targets:')
count_stats.append(('post-', 'build'))
return nodes
def _exec_main(parser, values):
sconsflags = os.environ.get('SCONSFLAGS', '')
all_args = sconsflags.split() + sys.argv[1:]
options, args = parser.parse_args(all_args, values)
if isinstance(options.debug, list) and "pdb" in options.debug:
import pdb
pdb.Pdb().runcall(_main, parser)
elif options.profile_file:
from cProfile import Profile
prof = Profile()
try:
prof.runcall(_main, parser)
finally:
prof.dump_stats(options.profile_file)
else:
_main(parser)
def main():
global OptionsParser
global exit_status
global first_command_start
# Check up front for a Python version we do not support. We
# delay the check for deprecated Python versions until later,
# after the SConscript files have been read, in case they
# disable that warning.
if python_version_unsupported():
msg = "scons: *** SCons version %s does not run under Python version %s.\n"
sys.stderr.write(msg % (SCons.__version__, python_version_string()))
sys.exit(1)
parts = ["SCons by Steven Knight et al.:\n"]
try:
import SCons
parts.append(version_string("SCons", SCons))
except (ImportError, AttributeError):
# On Windows there is no scons.py, so there is no
# __main__.__version__, hence there is no script version.
pass
parts.append(path_string("SCons", SCons))
parts.append(SCons.__copyright__)
version = ''.join(parts)
from . import SConsOptions
parser = SConsOptions.Parser(version)
values = SConsOptions.SConsValues(parser.get_default_values())
OptionsParser = parser
try:
try:
_exec_main(parser, values)
finally:
revert_io()
except SystemExit as s:
if s:
exit_status = s.code
except KeyboardInterrupt:
print("scons: Build interrupted.")
sys.exit(2)
except SyntaxError as e:
_scons_syntax_error(e)
except SCons.Errors.InternalError:
_scons_internal_error()
except SCons.Errors.UserError as e:
_scons_user_error(e)
except SConsPrintHelpException:
parser.print_help()
exit_status = 0
except SCons.Errors.BuildError as e:
print(e)
exit_status = e.exitstatus
except:
# An exception here is likely a builtin Python exception Python
# code in an SConscript file. Show them precisely what the
# problem was and where it happened.
SCons.Script._SConscript.SConscript_exception()
sys.exit(2)
memory_stats.print_stats()
count_stats.print_stats()
if print_objects:
SCons.Debug.listLoggedInstances('*')
#SCons.Debug.dumpLoggedInstances('*')
if print_memoizer:
SCons.Memoize.Dump("Memoizer (memory cache) hits and misses:")
# Dump any development debug info that may have been enabled.
# These are purely for internal debugging during development, so
# there's no need to control them with --debug= options; they're
# controlled by changing the source code.
SCons.Debug.dump_caller_counts()
SCons.Taskmaster.dump_stats()
if print_time:
total_time = time.time() - SCons.Script.start_time
if num_jobs == 1:
ct = cumulative_command_time
else:
if last_command_end is None or first_command_start is None:
ct = 0.0
else:
ct = last_command_end - first_command_start
scons_time = total_time - sconscript_time - ct
print("Total build time: %f seconds"%total_time)
print("Total SConscript file execution time: %f seconds"%sconscript_time)
print("Total SCons execution time: %f seconds"%scons_time)
print("Total command execution time: %f seconds"%ct)
sys.exit(exit_status)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Demomgr-1.10.1-py3-none-any.whl/demomgr/demo_info.py | import os
import re
from demomgr.helpers import getstreakpeaks
RE_LINE = re.compile(
r'\[(\d{4}/\d\d/\d\d \d\d:\d\d)\] (Killstreak|Bookmark)'
r' (.*) \("(.*)" at (\d+)\)'
)
class GROUP:
DATE = 1
TYPE = 2
VALUE = 3
DEMO = 4
TICK = 5
class DemoEvent():
"""
Simple event dataclass for a demo event.
Supports setitem and getitem in range 0..2 mapping to
`value`, `tick`, `time`.
Instances are compared by `tick`.
"""
__slots__ = ("value", "tick", "time")
def __init__(self, value, tick, time):
"""
value: Value of the event.
tick <Int>: Tick the event occurred at.
time <str/None>: Time the event was recorded at, in the format
`YYYY/mm/dd HH:MM`. Unstable, I hope this is not influenced
by any locales.
May be None if the time could not be determined,
realistically it only appears for events read from
`_events.txt`.
"""
self.value = value
self.tick = tick
self.time = time
def __setitem__(self, i, v):
setattr(self, ("value", "tick", "time")[i], v)
def __getitem__(self, i):
return getattr(self, ("value", "tick", "time")[i])
def __iter__(self):
yield from (self.value, self.tick, self.time)
def __gt__(self, other):
if isinstance(other, DemoEvent):
return self.tick > other.tick
return NotImplemented
def __lt__(self, other):
if isinstance(other, DemoEvent):
return self.tick < other.tick
return NotImplemented
class DemoInfo():
"""Container class to hold information regarding a demo."""
__slots__ = ("demo_name", "killstreaks", "killstreak_peaks", "bookmarks")
def __init__(self, demo_name, killstreaks, bookmarks):
"""
demo_name: The name of the demo that is described in the
logchunk. (str)
killstreaks <List>: Killstreaks as a list of DemoEvents.
bookmarks <List>: Bookmarks as a list of DemoEvents.
NOTE: `killstreak_peaks` will be invalidated if direct changes
to `killstreaks` are made. Use `set_killstreaks` instead.
"""
# Funny dilemma: I know what properties are by now, but still
# don't like code running just by setting an attribute.
self.demo_name = demo_name
self.killstreaks = killstreaks
self.killstreak_peaks = getstreakpeaks(killstreaks)
self.bookmarks = bookmarks
def is_empty(self):
"""
Determines whether no events are stored in the DemoInfo.
"""
return not (self.killstreaks or self.bookmarks)
@classmethod
def from_json(cls, json_data, demo_name):
"""
Parses the given data which should be retrieved from a standard format
JSON file and turns it into a DemoInfo instance.
json_data: Dict converted from a JSON file as the source engine writes it.
demo_name: Name of associated demo file; i.e. "foo.dem". (str)
May raise: KeyError, ValueError, TypeError.
"""
cur_bm = []
cur_ks = []
for k in json_data["events"]:
if k["name"] == "Killstreak":
cur_ks.append(DemoEvent(int(k["value"]), int(k["tick"]), None))
elif k["name"] == "Bookmark":
cur_bm.append(DemoEvent(k["value"], int(k["tick"]), None))
return cls(demo_name, cur_ks, cur_bm)
def to_json(self):
"""
Converts the demo info to a JSON-compliant dict that can be
written to a file and be reconstructed to an equal DemoInfo
object by using `DemoInfo.from_json`.
The event dicts will be ordered ascendingly by tick, just like
they are produced by Team Fortress 2.
May raise: ValueError if data has been screwed up.
"""
events = [{"name": "Killstreak", "value": v, "tick": t} for v, t, _ in self.killstreaks]
events += [{"name": "Bookmark", "value": v, "tick": t} for v, t, _ in self.bookmarks]
return {"events": sorted(events, key = lambda e: int(e["tick"]))}
@classmethod
def from_raw_logchunk(cls, in_chk):
"""
Takes a handle_events.RawLogchunk and converts it into DemoInfo.
in_chk : RawLogchunk to process, as returned by an EventReader.
May raise: ValueError on bad logchunks.
"""
loglines = in_chk.content.split("\n")
if not loglines:
raise ValueError("Logchunks may not be empty.")
regres = RE_LINE.search(loglines[0])
if not regres:
raise ValueError("Regex match failed, Logchunk malformed.")
demo = regres[GROUP.DEMO] + ".dem"
killstreaks = []
bookmarks = []
for line in loglines:
regres = RE_LINE.search(line)
if regres is None:
raise ValueError("Regex match failed, Logchunk malformed.")
line_type = regres[GROUP.TYPE]
value = regres[GROUP.VALUE]
tick = int(regres[GROUP.TICK])
if line_type == "Killstreak":
killstreaks.append(DemoEvent(int(value), tick, regres[GROUP.DATE]))
elif line_type == "Bookmark":
bookmarks.append(DemoEvent(value, tick, regres[GROUP.DATE]))
return cls(demo, killstreaks, bookmarks)
def to_logchunk(self):
"""
Returns a string that can be written to an `_events.txt` file
and will be interpretable as a valid logchunk by the events
reader afterwards.
All info is sorted by tick.
May raise: ValueError if information has been messed up.
"""
demo_name = os.path.splitext(self.demo_name)[0]
to_write = [("Killstreak", value, tick, date) for value, tick, date in self.killstreaks]
to_write.extend(("Bookmark", value, tick, date) for value, tick, date in self.bookmarks)
to_write.sort(key = lambda t: t[2])
return "\n".join(
f'[{date}] {type_} {value} ("{demo_name}" at {tick})'
for type_, value, tick, date in to_write
)
def set_killstreaks(self, killstreaks):
self.killstreaks = killstreaks
self.killstreak_peaks = getstreakpeaks(killstreaks) | PypiClean |
/CartiMorph_nnUNet-1.7.14.tar.gz/CartiMorph_nnUNet-1.7.14/CartiMorph_nnUNet/experiment_planning/utils.py |
import json
import os
import pickle
import shutil
from collections import OrderedDict
from multiprocessing import Pool
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import join, isdir, maybe_mkdir_p, subfiles, subdirs, isfile
from CartiMorph_nnUNet.configuration import default_num_threads
from CartiMorph_nnUNet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from CartiMorph_nnUNet.experiment_planning.common_utils import split_4d_nifti
from CartiMorph_nnUNet.paths import nnUNet_raw_data, nnUNet_cropped_data, preprocessing_output_dir
from CartiMorph_nnUNet.preprocessing.cropping import ImageCropper
def split_4d(input_folder, num_processes=default_num_threads, overwrite_task_output_id=None):
assert isdir(join(input_folder, "imagesTr")) and isdir(join(input_folder, "labelsTr")) and \
isfile(join(input_folder, "dataset.json")), \
"The input folder must be a valid Task folder from the Medical Segmentation Decathlon with at least the " \
"imagesTr and labelsTr subfolders and the dataset.json file"
while input_folder.endswith("/"):
input_folder = input_folder[:-1]
full_task_name = input_folder.split("/")[-1]
assert full_task_name.startswith("Task"), "The input folder must point to a folder that starts with TaskXX_"
first_underscore = full_task_name.find("_")
assert first_underscore == 6, "Input folder start with TaskXX with XX being a 3-digit id: 00, 01, 02 etc"
input_task_id = int(full_task_name[4:6])
if overwrite_task_output_id is None:
overwrite_task_output_id = input_task_id
task_name = full_task_name[7:]
output_folder = join(nnUNet_raw_data, "Task%03.0d_" % overwrite_task_output_id + task_name)
if isdir(output_folder):
shutil.rmtree(output_folder)
files = []
output_dirs = []
maybe_mkdir_p(output_folder)
for subdir in ["imagesTr", "imagesTs"]:
curr_out_dir = join(output_folder, subdir)
if not isdir(curr_out_dir):
os.mkdir(curr_out_dir)
curr_dir = join(input_folder, subdir)
nii_files = [join(curr_dir, i) for i in os.listdir(curr_dir) if i.endswith(".nii.gz")]
nii_files.sort()
for n in nii_files:
files.append(n)
output_dirs.append(curr_out_dir)
shutil.copytree(join(input_folder, "labelsTr"), join(output_folder, "labelsTr"))
p = Pool(num_processes)
p.starmap(split_4d_nifti, zip(files, output_dirs))
p.close()
p.join()
shutil.copy(join(input_folder, "dataset.json"), output_folder)
def create_lists_from_splitted_dataset(base_folder_splitted):
lists = []
json_file = join(base_folder_splitted, "dataset.json")
with open(json_file) as jsn:
d = json.load(jsn)
training_files = d['training']
num_modalities = len(d['modality'].keys())
for tr in training_files:
cur_pat = []
for mod in range(num_modalities):
cur_pat.append(join(base_folder_splitted, "imagesTr", tr['image'].split("/")[-1][:-7] +
"_%04.0d.nii.gz" % mod))
cur_pat.append(join(base_folder_splitted, "labelsTr", tr['label'].split("/")[-1]))
lists.append(cur_pat)
return lists, {int(i): d['modality'][str(i)] for i in d['modality'].keys()}
def create_lists_from_splitted_dataset_folder(folder):
"""
does not rely on dataset.json
:param folder:
:return:
"""
caseIDs = get_caseIDs_from_splitted_dataset_folder(folder)
list_of_lists = []
for f in caseIDs:
list_of_lists.append(subfiles(folder, prefix=f, suffix=".nii.gz", join=True, sort=True))
return list_of_lists
def get_caseIDs_from_splitted_dataset_folder(folder):
files = subfiles(folder, suffix=".nii.gz", join=False)
# all files must be .nii.gz and have 4 digit modality index
files = [i[:-12] for i in files]
# only unique patient ids
files = np.unique(files)
return files
def crop(task_string, override=False, num_threads=default_num_threads):
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(cropped_out_dir)
if override and isdir(cropped_out_dir):
shutil.rmtree(cropped_out_dir)
maybe_mkdir_p(cropped_out_dir)
splitted_4d_output_dir_task = join(nnUNet_raw_data, task_string)
lists, _ = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
imgcrop = ImageCropper(num_threads, cropped_out_dir)
imgcrop.run_cropping(lists, overwrite_existing=override)
shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), cropped_out_dir)
def analyze_dataset(task_string, override=False, collect_intensityproperties=True, num_processes=default_num_threads):
cropped_out_dir = join(nnUNet_cropped_data, task_string)
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=override, num_processes=num_processes)
_ = dataset_analyzer.analyze_dataset(collect_intensityproperties)
def plan_and_preprocess(task_string, processes_lowres=default_num_threads, processes_fullres=3, no_preprocessing=False):
from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string)
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(preprocessing_output_dir_this_task_train)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task_train)
shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), preprocessing_output_dir_this_task_train)
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if not no_preprocessing:
exp_planner.run_preprocessing((processes_lowres, processes_fullres))
exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if not no_preprocessing:
exp_planner.run_preprocessing(processes_fullres)
# write which class is in which slice to all training cases (required to speed up 2D Dataloader)
# This is done for all data so that if we wanted to use them with 2D we could do so
if not no_preprocessing:
p = Pool(default_num_threads)
# if there is more than one my_data_identifier (different brnaches) then this code will run for all of them if
# they start with the same string. not problematic, but not pretty
stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True)
if i.split("/")[-1].find("stage") != -1]
for s in stages:
print(s.split("/")[-1])
list_of_npz_files = subfiles(s, True, None, ".npz", True)
list_of_pkl_files = [i[:-4]+".pkl" for i in list_of_npz_files]
all_classes = []
for pk in list_of_pkl_files:
with open(pk, 'rb') as f:
props = pickle.load(f)
all_classes_tmp = np.array(props['classes'])
all_classes.append(all_classes_tmp[all_classes_tmp >= 0])
p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes))
p.close()
p.join()
def add_classes_in_slice_info(args):
"""
We need this for 2D dataloader with oversampling. As of now it will detect slices that contain specific classes
at run time, meaning it needs to iterate over an entire patient just to extract one slice. That is obviously bad,
so we are doing this once beforehand and just give the dataloader the info it needs in the patients pkl file.
"""
npz_file, pkl_file, all_classes = args
seg_map = np.load(npz_file)['data'][-1]
with open(pkl_file, 'rb') as f:
props = pickle.load(f)
#if props.get('classes_in_slice_per_axis') is not None:
print(pkl_file)
# this will be a dict of dict where the first dict encodes the axis along which a slice is extracted in its keys.
# The second dict (value of first dict) will have all classes as key and as values a list of all slice ids that
# contain this class
classes_in_slice = OrderedDict()
for axis in range(3):
other_axes = tuple([i for i in range(3) if i != axis])
classes_in_slice[axis] = OrderedDict()
for c in all_classes:
valid_slices = np.where(np.sum(seg_map == c, axis=other_axes) > 0)[0]
classes_in_slice[axis][c] = valid_slices
number_of_voxels_per_class = OrderedDict()
for c in all_classes:
number_of_voxels_per_class[c] = np.sum(seg_map == c)
props['classes_in_slice_per_axis'] = classes_in_slice
props['number_of_voxels_per_class'] = number_of_voxels_per_class
with open(pkl_file, 'wb') as f:
pickle.dump(props, f) | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/gui/vtc/history_list.py |
import webbrowser
from util import *
from electrum_vtc.i18n import _
from electrum_vtc.util import block_explorer_URL, format_satoshis, format_time
from electrum_vtc.plugins import run_hook
from electrum_vtc.util import timestamp_to_datetime, profiler
TX_ICONS = [
"warning.png",
"warning.png",
"warning.png",
"unconfirmed.png",
"unconfirmed.png",
"clock1.png",
"clock2.png",
"clock3.png",
"clock4.png",
"clock5.png",
"confirmed.png",
]
class HistoryList(MyTreeWidget):
filter_columns = [2, 3, 4] # Date, Description, Amount
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
self.refresh_headers()
self.setColumnHidden(1, True)
def refresh_headers(self):
headers = ['', '', _('Date'), _('Description') , _('Amount'), _('Balance')]
fx = self.parent.fx
if fx and fx.show_history():
headers.extend(['%s '%fx.ccy + _('Amount'), '%s '%fx.ccy + _('Balance')])
self.update_headers(headers)
def get_domain(self):
'''Replaced in address_dialog.py'''
return self.wallet.get_addresses()
@profiler
def on_update(self):
self.wallet = self.parent.wallet
h = self.wallet.get_history(self.get_domain())
item = self.currentItem()
current_tx = item.data(0, Qt.UserRole).toString() if item else None
self.clear()
fx = self.parent.fx
if fx: fx.history_used_spot = False
for h_item in h:
tx_hash, height, conf, timestamp, value, balance = h_item
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
has_invoice = self.wallet.invoices.paid.get(tx_hash)
icon = QIcon(":icons/" + TX_ICONS[status])
v_str = self.parent.format_amount(value, True, whitespaces=True)
balance_str = self.parent.format_amount(balance, whitespaces=True)
label = self.wallet.get_label(tx_hash)
entry = ['', tx_hash, status_str, label, v_str, balance_str]
if fx and fx.show_history():
date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
for amount in [value, balance]:
text = fx.historical_value_str(amount, date)
entry.append(text)
item = QTreeWidgetItem(entry)
item.setIcon(0, icon)
item.setToolTip(0, str(conf) + " confirmation" + ("s" if conf != 1 else ""))
if has_invoice:
item.setIcon(3, QIcon(":icons/seal"))
for i in range(len(entry)):
if i>3:
item.setTextAlignment(i, Qt.AlignRight)
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
item.setTextAlignment(i, Qt.AlignVCenter)
if value < 0:
item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
self.insertTopLevelItem(0, item)
if current_tx == tx_hash:
self.setCurrentItem(item)
def on_doubleclick(self, item, column):
if self.permit_edit(item, column):
super(HistoryList, self).on_doubleclick(item, column)
else:
tx_hash = str(item.data(0, Qt.UserRole).toString())
tx = self.wallet.transactions.get(tx_hash)
self.parent.show_transaction(tx)
def update_labels(self):
root = self.invisibleRootItem()
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
txid = str(item.data(0, Qt.UserRole).toString())
label = self.wallet.get_label(txid)
item.setText(3, label)
def update_item(self, tx_hash, height, conf, timestamp):
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
icon = QIcon(":icons/" + TX_ICONS[status])
items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
if items:
item = items[0]
item.setIcon(0, icon)
item.setText(2, status_str)
def create_menu(self, position):
self.selectedIndexes()
item = self.currentItem()
if not item:
return
column = self.currentColumn()
tx_hash = str(item.data(0, Qt.UserRole).toString())
if not tx_hash:
return
if column is 0:
column_title = "ID"
column_data = tx_hash
else:
column_title = self.headerItem().text(column)
column_data = item.text(column)
tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
height, conf, timestamp = self.wallet.get_tx_height(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
is_unconfirmed = height <= 0
pr_key = self.wallet.invoices.paid.get(tx_hash)
menu = QMenu()
menu.addAction(_("Copy %s")%column_title, lambda: self.parent.app.clipboard().setText(column_data))
if column in self.editable_columns:
menu.addAction(_("Edit %s")%column_title, lambda: self.editItem(item, column))
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx))
if is_unconfirmed and tx:
rbf = is_mine and not tx.is_final()
if rbf:
menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx))
else:
child_tx = self.wallet.cpfp(tx, 0)
if child_tx:
menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx))
if pr_key:
menu.addAction(QIcon(":icons/seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key))
if tx_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
menu.exec_(self.viewport().mapToGlobal(position)) | PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/core/i18n.py | import contextlib
import functools
import io
import os
from pathlib import Path
from typing import Callable, Dict, Optional, Union
import babel.localedata
from babel.core import Locale
# This import to be down here to avoid circular import issues.
# This will be cleaned up at a later date
# noinspection PyPep8
from . import commands
__all__ = [
"get_locale",
"set_locale",
"reload_locales",
"cog_i18n",
"Translator",
"get_babel_locale",
]
_current_locale = "en-US"
_current_regional_format = None
WAITING_FOR_MSGID = 1
IN_MSGID = 2
WAITING_FOR_MSGSTR = 3
IN_MSGSTR = 4
MSGID = 'msgid "'
MSGSTR = 'msgstr "'
_translators = []
def get_locale() -> str:
return _current_locale
def set_locale(locale: str) -> None:
global _current_locale
_current_locale = locale
reload_locales()
def get_regional_format() -> str:
if _current_regional_format is None:
return _current_locale
return _current_regional_format
def set_regional_format(regional_format: Optional[str]) -> None:
global _current_regional_format
_current_regional_format = regional_format
def reload_locales() -> None:
for translator in _translators:
translator.load_translations()
def _parse(translation_file: io.TextIOWrapper) -> Dict[str, str]:
"""
Custom gettext parsing of translation files.
Parameters
----------
translation_file : io.TextIOWrapper
An open text file containing translations.
Returns
-------
Dict[str, str]
A dict mapping the original strings to their translations. Empty
translated strings are omitted.
"""
step = None
untranslated = ""
translated = ""
translations = {}
for line in translation_file:
line = line.strip()
if line.startswith(MSGID):
# New msgid
if step is IN_MSGSTR and translated:
# Store the last translation
translations[_unescape(untranslated)] = _unescape(translated)
step = IN_MSGID
untranslated = line[len(MSGID) : -1]
elif line.startswith('"') and line.endswith('"'):
if step is IN_MSGID:
# Line continuing on from msgid
untranslated += line[1:-1]
elif step is IN_MSGSTR:
# Line continuing on from msgstr
translated += line[1:-1]
elif line.startswith(MSGSTR):
# New msgstr
step = IN_MSGSTR
translated = line[len(MSGSTR) : -1]
if step is IN_MSGSTR and translated:
# Store the final translation
translations[_unescape(untranslated)] = _unescape(translated)
return translations
def _unescape(string):
string = string.replace(r"\\", "\\")
string = string.replace(r"\t", "\t")
string = string.replace(r"\r", "\r")
string = string.replace(r"\n", "\n")
string = string.replace(r"\"", '"')
return string
def get_locale_path(cog_folder: Path, extension: str) -> Path:
"""
Gets the folder path containing localization files.
:param Path cog_folder:
The cog folder that we want localizations for.
:param str extension:
Extension of localization files.
:return:
Path of possible localization file, it may not exist.
"""
return cog_folder / "locales" / "{}.{}".format(get_locale(), extension)
class Translator(Callable[[str], str]):
"""Function to get translated strings at runtime."""
def __init__(self, name: str, file_location: Union[str, Path, os.PathLike]):
"""
Initializes an internationalization object.
Parameters
----------
name : str
Your cog name.
file_location : `str` or `pathlib.Path`
This should always be ``__file__`` otherwise your localizations
will not load.
"""
self.cog_folder = Path(file_location).resolve().parent
self.cog_name = name
self.translations = {}
_translators.append(self)
self.load_translations()
def __call__(self, untranslated: str) -> str:
"""Translate the given string.
This will look for the string in the translator's :code:`.pot` file,
with respect to the current locale.
"""
try:
return self.translations[untranslated]
except KeyError:
return untranslated
def load_translations(self):
"""
Loads the current translations.
"""
self.translations = {}
locale_path = get_locale_path(self.cog_folder, "po")
with contextlib.suppress(IOError, FileNotFoundError):
with locale_path.open(encoding="utf-8") as file:
self._parse(file)
def _parse(self, translation_file):
self.translations.update(_parse(translation_file))
def _add_translation(self, untranslated, translated):
untranslated = _unescape(untranslated)
translated = _unescape(translated)
if translated:
self.translations[untranslated] = translated
@functools.lru_cache()
def _get_babel_locale(red_locale: str) -> babel.core.Locale:
supported_locales = babel.localedata.locale_identifiers()
try: # Handles cases where kron_locale is already Babel supported
babel_locale = Locale(*babel.parse_locale(red_locale))
except (ValueError, babel.core.UnknownLocaleError):
try:
babel_locale = Locale(*babel.parse_locale(red_locale, sep="-"))
except (ValueError, babel.core.UnknownLocaleError):
# ValueError is Raised by `parse_locale` when an invalid Locale is given to it
# Lets handle it silently and default to "en_US"
try:
# Try to find a babel locale that's close to the one used by red
babel_locale = Locale(Locale.negotiate([red_locale], supported_locales, sep="-"))
except (ValueError, TypeError, babel.core.UnknownLocaleError):
# If we fail to get a close match we will then default to "en_US"
babel_locale = Locale("en", "US")
return babel_locale
def get_babel_locale(locale: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a locale to a `babel.core.Locale`.
Parameters
----------
locale : Optional[str]
The locale to convert, if not specified it defaults to the bot's locale.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if locale is None:
locale = get_locale()
return _get_babel_locale(locale)
def get_babel_regional_format(regional_format: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a regional format to a `babel.core.Locale`.
If ``regional_format`` parameter is passed, this behaves the same as `get_babel_locale`.
Parameters
----------
regional_format : Optional[str]
The regional format to convert, if not specified it defaults to the bot's regional format.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if regional_format is None:
regional_format = get_regional_format()
return _get_babel_locale(regional_format)
def cog_i18n(translator: Translator):
"""Get a class decorator to link the translator to this cog."""
def decorator(cog_class: type):
cog_class.__translator__ = translator
for name, attr in cog_class.__dict__.items():
if isinstance(attr, (commands.Group, commands.Command)):
attr.translator = translator
setattr(cog_class, name, attr)
return cog_class
return decorator | PypiClean |
/Demomgr-1.10.1-py3-none-any.whl/demomgr/context_menus.py | import tkinter as tk
def _generate_readonly_cmd(w):
cmd = (
("command", {"label": "Copy", "command": lambda: w.event_generate("<<Copy>>")}, 2),
("command", {"label": "Select all", "command": lambda: w.event_generate("<<SelectAll>>")}),
)
return cmd
def _generate_normal_cmd(w):
cmd = (
("command", {"label": "Cut", "command": lambda: w.event_generate("<<Cut>>")}),
("command", {"label": "Clear", "command": lambda: w.event_generate("<<Clear>>")}),
("command", {"label": "Paste", "command": lambda: w.event_generate("<<Paste>>")}),
)
return cmd
_existing_menu = None
def _remove_existing_menu():
global _existing_menu
if _existing_menu is not None:
# It's possible for the existing menu to be destroyed when its
# root widgets are, e.g. when it was created in a dialog and the dialog
# is closed. Just eat the error up then, seems fine.
try:
_existing_menu.unpost()
_existing_menu.destroy()
except tk.TclError as e:
pass
_existing_menu = None
class PopupMenu(tk.Menu):
"""Base popup menu. Will immediatedly focus itself when posted."""
def __init__(self, parent, elems, *args, **kw):
"""
Passes all arguments to a tk.Menu, except for:
parent <tkinter.Widget>: Parent widget of the menu.
elems: Elements that will be added to the menu, in the form of
((<str>, <dict>, [<int>]), ...) where the element type is [0] and
the related options the dict in [1]. If [2] is present, the entry
will be inserted into the menu at the given position, if not, it
will be appended.
"""
super().__init__(parent, *args, tearoff = 0, **kw)
for elem in elems:
if len(elem) > 2:
self.insert(elem[2], elem[0], **elem[1])
else:
self.add(elem[0], **elem[1])
def f(e, self=self):
self.unpost()
self.bind("<Escape>", f)
def post(self, x, y):
global _existing_menu
_remove_existing_menu()
super().post(x, y)
_existing_menu = self
def multiframelist_cb(event, mfl, demo_ops):
"""
Very specific and hardcoded callback that opens a popup menu on a
MultiframeList.
Arguments:
event: The tkinter event that triggered the callback.
mfl: The MultiframeList that got clicked.
demo_ops: The demo operations just like in `MainApp.__init__`.
"""
add_elems = [
("command", {"label": s, "command": cmd})
for s, cmd, _, fit_for_sel in demo_ops
if fit_for_sel(len(mfl.selection))
]
men = PopupMenu(mfl, add_elems)
men.post(*mfl.get_last_click())
men.focus()
def entry_cb(event):
"""
Callback that is designed to open a menu over widgets
such as Entries, Comboboxes and Spinboxes and contains options
for copying, pasting etc.
"""
w = event.widget
x = w.winfo_rootx() + event.x
y = w.winfo_rooty() + event.y
add_elems = []
w_state = str(w.configure("state")[-1])
if w_state != "disabled":
if w_state != "readonly":
add_elems.extend(_generate_normal_cmd(w))
add_elems.extend(_generate_readonly_cmd(w))
if not add_elems:
return
men = PopupMenu(w, add_elems)
men.post(x, y)
men.focus()
def _remove_existing_menu_swallow_event(e):
_remove_existing_menu()
def decorate_root_window(win):
"""
On linux (in some environments anyways), menus remain and
apparently have to be unposted manually, because why make things
easy when you can add more pain?
This function registers a bunch of event callbacks to top-level
windows that will fire on clicks in appropiate regions and then
close possibly opened context menus.
"""
win.bind(f"<Button>", _remove_existing_menu_swallow_event)
win.bind(f"<KeyPress>", _remove_existing_menu_swallow_event)
win.bind(f"<Unmap>", _remove_existing_menu_swallow_event) | PypiClean |
/DataPreprocessing4Analytics-2023.0.3-py3-none-any.whl/DataPreprocessingAnalytics4all4ever/__init__.py | import re
import os
import csv
import sys
import nltk
import spacy
import random
import string
import unicodedata
import math as m
import numpy as np
import pandas as pd
import dateutil as du
import datetime as dt
import seaborn as sns
import calendar as cal
from textblob import Word
import statistics as stats
from bs4 import BeautifulSoup
from textblob import TextBlob
import matplotlib.pyplot as plt
from nltk.stem import PorterStemmer
from datetime import timedelta as td
from pandas.tseries.offsets import *
from spacy.lang.en.stop_words import STOP_WORDS
from nltk.stem.snowball import SnowballStemmer
pd.options.display.float_format = '{:.5f}'.format
print(os.getcwd())
import warnings
warnings.filterwarnings('ignore')
# import data_processing as DP
# import datetime
# from datetime import timedelta as TD
#
# start_time = datetime.datetime.now()
# print("start_time: ", start_time)
#
#
# import os
# import pandas as pd
# import numpy as np
# import sys
#
# #from google.colab import drive
# #drive.mount('/content/drive')
# #import sys
# #sys.path.append('/content/drive/My Drive/Colab Notebooks/')
#
# try:
# #Google Drive libraries
# from google.colab import drive
# drive.mount('/content/drive')
#
# #Folders
# Curret_Folder = os.getcwd()
# Dataset_Folder = '/content/drive/My Drive/Datasets'
# ML_Folder = '/content/drive/My Drive/Colab Notebooks'
# #print('Dataset_Folder Path:',Dataset_Folder)
# #print('ML_Folder Path:',ML_Folder)
#
# #Data Loading
# os.chdir(Dataset_Folder)
# print("Data is importing from Google Drive.......")
# df = pd.read_csv('titanic.csv')
# print("df.shape:",df.shape)
#
# #Importing Custom_Module_Data_PreProcessing_ML
# os.chdir(ML_Folder)
# #import Custom_Methods_Final as DPP
# import Custom_Module_Data_PreProcessing_ML as DPP
# except:
# #Folders
# Curret_Folder = os.getcwd()
# Dataset_Folder = 'C:\\Users\\laxma\Python Work\\Datasets'
# ML_Folder = 'C:\\Users\\laxma\\Python Work\\Data Analysis\\Machine Learning'
#
# #Data Loading
# os.chdir(Dataset_Folder)
# print("Data is importing from Local Computer.......")
# df = pd.read_csv('titanic.csv')
# print("df.shape:",df.shape, "\n")
#
# #Importing Custom_Module_Data_PreProcessing_ML
# os.listdir(ML_Folder)
# import Custom_Module_Data_PreProcessing_ML as DPP
#
# Data_pre_pro = DPP.data_preprocessing()
# stats = DPP.simple_statistics()
#
# df.columns = Data_pre_pro.rename_columns(df)
# numeric_cols,categoric_cols,id_date_zip_cols,only_num_cals,only_cat_cals,list_all_cols = Data_pre_pro.columns_seperation(df)
#
# print("numeric_cols:\n", numeric_cols,
# "\n\ncategoric_cols:\n", categoric_cols,
# "\n\nid_date_zip_cols:\n", id_date_zip_cols,
# "\n\nonly_num_cals:\n", only_num_cals,
# "\n\nonly_cat_cals:\n", only_cat_cals,
# "\n\nlist_all_cols:\n", list_all_cols
# )
# print("Impoeted custom library & applied methods on the data !!!!!!!!!!!!!!!! ")
#
# import datetime
# from datetime import timedelta as TD
# end_time = datetime.datetime.now()
# print("end_time: ", end_time)
# TD = pd.to_datetime(end_time) - pd.to_datetime(start_time)
#
# print("\n\nTime Consumption: "+ str(TD.components[0]) + "Days " + str(TD.components[1]) + "H:"\
# + str(TD.components[2]) + "M:" + str(TD.components[3]) + "S")
#
# # titnic.csv
# # Sample Data for Classification - Titanic
# # Sample Data for Regression - Housing Price
# df = pd.read_csv('USA_Housing.csv')
# df.head()
# In[2]:
class data_preprocessing:
import numpy as np
import pandas as pd
import re
import math as m
# -------------------- Data Processing --------------------------
def rename_columns(self, df) :
updated_columns_names = df.columns.str.title().str.replace(" " , "_").str.replace("'" , "")\
.str.replace("-" , "").str.replace("." , "")\
.str.replace("(" , "").str.replace(")" , "")\
.str.replace("(" , "").str.replace(")" , "")
return updated_columns_names
def update_column_names(self,df):
df1 = pd.DataFrame(list(df.columns))
df1.columns = ['Column_Name']
df1['Columns_Updated'] =df1['Column_Name'].apply(lambda X : X.title())
df1['Columns_Updated'] =df1['Columns_Updated'].apply(lambda X : re.sub("\s","_",X))
df1['Columns_Updated'] =df1['Columns_Updated'].apply(lambda X : re.sub("\.|\'|\-\(|\)", "" ,X.title()))
df.columns = list(df1['Columns_Updated'])
return df.columns
def columns_seperation(self,df):
numeric_cols = list(df.select_dtypes(include = 'number').columns)
categoric_cols = list(df.select_dtypes(exclude = 'number').columns)
list_all_cols = list(df.columns)
id_date_zip_cols = []
for i in list(df.columns):
if ('date' in i.lower()) or ('id' in i.lower())or ('zip' in i.lower())or ('pos' in i.lower()):
id_date_zip_cols.append(i)
#print(i)
only_num_cals = list(set(numeric_cols).difference(set(id_date_zip_cols)))
only_cat_cals = list(set(categoric_cols).difference(set(id_date_zip_cols)))
return (numeric_cols,categoric_cols,id_date_zip_cols,only_num_cals,only_cat_cals,list_all_cols)
def null_counts(self , df , perc_threshold_remove_nulls =0):
df = pd.DataFrame(df)
df_nulls = pd.DataFrame(df.isnull().sum()).reset_index().rename(columns = {"index" : 'Col_Name', 0: 'Count_Nulls'})
df_nulls['Perc_Nulls'] = (df_nulls['Count_Nulls']/df.shape[0])*100
df_nulls = df_nulls.sort_values(by = 'Perc_Nulls' , ascending = False).reset_index(drop = True)
col_gt_5pct_nulls = df_nulls.loc[df_nulls['Perc_Nulls' ]>perc_threshold_remove_nulls
,['Col_Name' , 'Perc_Nulls']]
col_gt_5pct_nulls = col_gt_5pct_nulls.sort_values(by = 'Perc_Nulls' , ascending = False)
req_cols = list(set(df.columns).difference(set(col_gt_5pct_nulls['Col_Name'].tolist())) )
rem_cols = list(col_gt_5pct_nulls.Col_Name)
#print("list of columns which are having nulls more than 5% : \n" ,
#df_nulls.loc[df_nulls['Perc_Nulls']>5 , :].columns )
return (df_nulls , req_cols , rem_cols)
def handling_null_values(self,df, remv_cols = [],
numer_null_replace = 'mean' ,
categoric_null_replace = 'mode',
data_req = 'full'):
if (len(remv_cols)>0) & (data_req == 'full'):
rem_data = df[remv_cols]
else:
rem_data = pd.DataFrame()
req_col = list(set(df.columns).difference(set(remv_cols)))
df = df[req_col]
num_col = df.select_dtypes(include ='number').columns
cat_col = df.select_dtypes(exclude ='number').columns
df_cat = df[cat_col]
df_num = df[num_col]
# -------------- Handling nulls - Numerical Data ------------
numer_null_replace = str(numer_null_replace).strip().lower()
for i in num_col:
if numer_null_replace =='mean':
df_num[i].fillna(df_num[i].mean(), inplace = True)
elif numer_null_replace =='median':
df_num[i].fillna(df_num[i].median(), inplace = True)
elif numer_null_replace =='mode':
df_num[i].fillna(df_num[i].mode().values[0], inplace = True )
# -------------- Handling nulls - Categorical Data ------------
categoric_null_replace = str(categoric_null_replace).strip().lower()
for j in cat_col:
if categoric_null_replace =='mode':
mode = df_cat[j].mode().values[0]
df_cat[j] = df_cat[j].fillna(mode)
df_final = pd.DataFrame()
if data_req == 'full':
df_final = pd.concat([df_num,df_cat,rem_data], axis = 1)
elif data_req == 'selected':
df_final = pd.concat([df_num,df_cat], axis = 1)
else:
df_final = pd.concat([df_num,df_cat], axis = 1)
return df_final
def df_col_val_perc(self, df,col = None, total_num_records = False):
df = pd.DataFrame(df)
try:
if len(col) >0:
tab = df[col].value_counts().reset_index().rename(columns = {"index" :col, col: "Value_Counts"})
tab['Value_Perc'] = np.round((tab['Value_Counts'] / tab['Value_Counts'].sum()) *100,2)
tab = tab.sort_values(by = 'Value_Counts', ascending = False)
tab['Cum_Perc'] = tab['Value_Perc'].cumsum()
tab['Ranks'] = range(1,tab.shape[0]+1,1)
if total_num_records ==True:
tab['Total_Num_Reocrds'] = tab['Value_Counts'].sum()
return tab
else:
return tab
except:
if col == None:
first_col = df.columns[0]
tab = df[first_col].value_counts().reset_index().rename(columns = {"index" :first_col, first_col: "Value_Counts"})
tab['Value_Perc'] = np.round((tab['Value_Counts'] / tab['Value_Counts'].sum()) *100,2)
tab = tab.sort_values(by = 'Value_Counts', ascending = False)
tab['Cum_Perc'] = tab['Value_Perc'].cumsum()
tab['Ranks'] = range(1,tab.shape[0]+1,1)
if total_num_records ==True:
tab['Total_Num_Reocrds'] = tab['Value_Counts'].sum()
return tab
else:
return tab
def replacing_nulls_in_numeric_cols(self,df, numeric_cols = [],replace_nulls_by = 'mean'):
numeric_cols = list(numeric_cols)
df = pd.DataFrame(df)
for i in numeric_cols:
df[i] = df[i].astype(np.float64)
#--------- Replacing nulls -----------
if replace_nulls_by.lower().strip() == 'mean':
df[i] = df[i].fillna(df[i].mean())
elif replace_nulls_by.lower().strip() == 'median':
df[i] = df[i].fillna(df[i].median())
def numer_scaling(self, df, remv_cols = [], num_data_scaling = 'minmax' ,data_req = 'full'):
try:
remv_cols = list(remv_cols)
if len(remv_cols) !=0:
data_rem = df.copy()
data_rem = data_rem[remv_cols]
else:
data_rem = pd.DataFrame()
req_col = list(set(df.columns).difference(set(remv_cols)))
df = df[req_col]
num_col = df.select_dtypes(include ='number').columns
cat_col = df.select_dtypes(exclude ='number').columns
df_cat = df[cat_col]
df_num = df[num_col]
df_num_updated = pd.DataFrame(df_num)
# -------------- Handling nulls - Numerical Data ------------
num_data_scaling = str(num_data_scaling).strip().lower()
if ('max' in num_data_scaling) | ('min' in num_data_scaling):
from sklearn.preprocessing import MinMaxScaler
scaling = MinMaxScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif ('norm' in num_data_scaling ) | ('stand' in num_data_scaling ):
from sklearn.preprocessing import StandardScaler
scaling = StandardScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif ('rob' in num_data_scaling ):
from sklearn.preprocessing import RobustScaler
scaling = RobustScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif('max' in num_data_scaling ) & ('abs' in num_data_scaling ) :
from sklearn.preprocessing import MaxAbsScaler
scaling = MaxAbsScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
data_req = str(data_req).strip().lower()
if data_req == 'full':
df_final = pd.concat([df_num_updated,df_cat,data_rem ], axis = 1)
return df_final
elif data_req == 'selected':
df_final = pd.concat([df_num_updated,df_cat], axis = 1)
return df_final
except:
print("Please enter the right information !!!!!!!!")
def numerical_scaling(self, df, cols = [], num_data_scaling = 'minmax'):
try:
df_cols = df.columns
req_cols = list(cols)
not_req_cols = set(df_cols).difference(set(req_cols))
df_cat = df[list(not_req_cols)]
df_num = df[req_cols]
df_num_updated = pd.DataFrame(df_num)
# -------------- Handling nulls - Numerical Data ------------
num_data_scaling = str(num_data_scaling).strip().lower()
if ('max' in num_data_scaling) | ('min' in num_data_scaling):
from sklearn.preprocessing import MinMaxScaler
scaling = MinMaxScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif ('norm' in num_data_scaling ) | ('stand' in num_data_scaling ):
from sklearn.preprocessing import StandardScaler
scaling = StandardScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif ('rob' in num_data_scaling ):
from sklearn.preprocessing import RobustScaler
scaling = RobustScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif('max' in num_data_scaling ) & ('abs' in num_data_scaling ) :
from sklearn.preprocessing import MaxAbsScaler
scaling = MaxAbsScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
df_final = pd.concat([df_num_updated,df_cat], axis = 1)
return df_final
except:
print("Please enter the right information !!!!!!!!")
def numerical_values_bins(self,df, cols =[] , no_bins_col_wise = [], default_no_bins = 3):
bins_ = {}
cols = list(cols)
no_bins_col_wise = list(no_bins_col_wise)
#print(type(no_bins_col_wise) , len( no_bins_col_wise))
if (len(no_bins_col_wise)==0) | (no_bins_col_wise==None) | (len(cols) != len(no_bins_col_wise)):
no_bins = list([default_no_bins]*len(cols))
else:
no_bins = no_bins_col_wise
#print(no_bins)
for i, v in enumerate(cols):
bins_[v] = self.df_col_val_perc(pd.cut(df[v],no_bins[i]))
bins_[v]['Ranks']=list(range(len(bins_[v]['Ranks'])))
bins_[v][v]= bins_[v][v].astype(str)
bins_[v].columns = ['Range', 'Counts', 'Perc', 'Cum_Perc', 'Mapping_Value']
bins_[v] = bins_[v][['Range','Mapping_Value','Counts', 'Perc', 'Cum_Perc']]
df[v] = pd.cut(df[v],no_bins[i], labels = list(range(no_bins[i])))
print(bins_)
def DF_Count_Uniq_Values_By_col(self, df):
df = pd.DataFrame(df)
cols = df.columns
df_n_unique = {}
for i in cols:
df_n_unique[i] = df[i].nunique()
DF_Count_Uniq_Values_By_col = pd.Series(df_n_unique).reset_index().rename(columns = {"index": 'Column_Name' ,
0:"#Unique_Values"})
return DF_Count_Uniq_Values_By_col
# ---------------------- Decision Tree related ----------------
def entropy(self, df,col = None ):
try:
if len(col)>0:
labeled_data = self.df_col_val_perc(df, col)
entropy = []
for i in range(labeled_data.shape[0]):
total_counts = labeled_data['Value_Counts'].sum()
#print(labeled_data['Value_Counts'][i])
pro_ratio = labeled_data['Value_Counts'][i]/total_counts
entropy.append(-(pro_ratio*np.log2(pro_ratio)))
return np.sum(entropy)
except:
print("Pass the column name to calculate the entropy")
def entropy_all_cols(self, df):
cols = list(df.columns)
entropy_cols = {}
for i in cols:
entropy_cols[i] = (self.entropy(df,i))
entropy_cols = pd.Series(entropy_cols).reset_index().rename(columns = {"index": 'Predictor_Name' , 0:"Entropy"})
entropy_cols = entropy_cols.sort_values(by = 'Entropy' , ascending = True)
return entropy_cols
def descriptive_statistics(self,df, cols = [] , full_info = False, num_decm_points = 2):
if (len(cols)==0) | (type(cols) !=list) :
print("Please supply the list of the columns, for which you want o see the Descriptive Statistics")
else:
if full_info == False:
for i in cols:
print(i ,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_iqr_ = q1_ - (1.5*iqr_)
upp_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_iqr_) | (df[i]< low_iqr_)
outliers = list(df.loc[outliers_range , i])
print('iqr:', iqr_ , ', #outliers:', len(outliers) , "\n")
elif full_info == True:
for i in cols:
#print(i,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_fence_iqr_ = q1_ - (1.5*iqr_)
upp_fence_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_fence_iqr_) | (df[i]< low_fence_iqr_)
outliers = list(df.loc[outliers_range , i])
outliers_neg = list(df.loc[df[i]> upp_fence_iqr_ , i])
outliers_pos = list(df.loc[df[i]< low_fence_iqr_ , i])
info_1 = {'Min' : np.round(df[i].min(),2),
'Q1' : np.round(df[i].quantile(.25),num_decm_points),
'Median' : np.round(df[i].quantile(.5),num_decm_points),
'Q3' : np.round(df[i].quantile(.75),num_decm_points),
'Max' : np.round(df[i].quantile(1),num_decm_points),
'Mean' :np.round( df[i].mean(),num_decm_points),
'STD' : np.round(df[i].std(),num_decm_points),
'Variance' : np.round(df[i].var(),num_decm_points),
'Count' : df[i].count(),
'IQR' : np.round(iqr_,num_decm_points),
'IQR_Lower_Fence' : np.round(low_fence_iqr_,num_decm_points),
'IQR_Upper_Fence' : np.round(upp_fence_iqr_,num_decm_points),
'Skewness' : np.round(df[i].skew(),num_decm_points),
'Kurtosis' : np.round(df[i].kurt(),num_decm_points),
'#NonOutliers' : df[i].shape[0]-len(outliers),
'NonOutliers_Perc' : np.round(((df[i].shape[0]-len(outliers)) / df[i].shape[0])*100,num_decm_points),
'#Outliers' : len(outliers),
'Outliers_Perc' : np.round((len(outliers) / df[i].shape[0])*100,num_decm_points),
'#Outliers_neg' : len(outliers_neg),
'#Outliers_pos' : len(outliers_pos)
}
print(info_1 , "\n")
def Outlier_Detect_And_Show_Repl_Value(self,
df,
cols = [],
detect_method = 'iqr',
replace_outlier_by = 'mean',
replace_nulls_by = 'mean'
):
data_info = {}
cols = list(cols)
df = pd.DataFrame(df)
#df1 = pd.DataFrame(df[cols])
#print("Using",detect_method.upper(), "to detect the Outliers and" ,
# 'Nulls are replaced by', replace_nulls_by.upper(),
# 'Outliers are replaced by', replace_outlier_by.upper()
#)
for i in cols:
df1 = pd.DataFrame(df[i])
df1[i] = df1[i].astype(np.float64)
#print(i, "\n\n")
#--------- Replacing nulls -----------
if replace_nulls_by == 'mean':
df1[i] = df1[i].fillna(df1[i].mean())
elif replace_nulls_by == 'median':
df1[i] = df1[i].fillna(df1[i].median())
#--------- Descriptive Statistics -----------
means_ = np.mean(df[i])
std_ = np.std(df1[i])
medians_ = np.median(df1[i])
q3_ = df1[i].quantile(.75)
q1_ = df1[i].quantile(.25)
iqr_ = q3_ - q1_
low_iqr_ = q1_ - (1.5*iqr_)
upp_iqr_ = q3_ + (1.5*iqr_)
df1['ZScores'] = df1[i].apply(lambda X : ((X - means_)/std_))
values_after_replacing_Outliers = 'Values_Updated'
if detect_method == 'iqr':
df1['Outlier_or_Not'] = np.where(((df1[i]> upp_iqr_) | (df1[i]< low_iqr_)),'Y','N')
if replace_outlier_by =='mean':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',means_,df1[i])
elif replace_outlier_by =='median':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',medians_,df1[i])
elif detect_method == 'zscore':
df1['Outliers_or_Not'] = np.where(((df1['ZScores']> 3) | (df1['ZScores']< -3)),'Y','N')
if replace_outlier_by =='mean':
df1[values_after_replacing_Outliers] = np.where(df1['Outliers_or_Not']=='Y', means_,df1[i])
elif replace_outlier_by =='median':
df1[values_after_replacing_Outliers] = np.where(df1['Outliers_or_Not']=='Y', medians_,df1[i])
data_info[i] = df1
return (data_info)
def outlier_replace_in_Num_Cols(self, df , cols = [],
detect_method = 'iqr',
replace_outlier_by = 'mean',
replace_nulls_by = 'mean'
):
for i in cols:
j = self.Outlier_Detect_And_Show_Repl_Value(df, cols = [i],
detect_method = detect_method,
replace_outlier_by = replace_outlier_by,
replace_nulls_by = replace_nulls_by)
#print(pd.DataFrame(j[i])['Values_Updated'])
df[i] = pd.DataFrame(j[i])['Values_Updated'].values
# In[ ]:
# In[3]:
class simple_statistics:
import numpy as np
import pandas as pd
import re
import math as m
def Data_Sum_Count(self, X):
X = list(X)
count_ = 0
sum_ = 0
for i in range(len(X)):
sum_ = sum_+X[i]
count_ = count_+1
return(sum_,count_)
def Data_Unique_Values(self, X):
X = list(X)
unique_values = []
for i in X:
if i not in unique_values:
unique_values.append(i)
return unique_values
def Data_Max_Min(self, X):
X = sorted(list(X))
max_ = X[len(X)-1]
min_ = X[0]
return (max_ , min_)
def Data_Arith_Mean(self ,X):
self.sum_, self.count_ = self.sum_count(X)
return(self.sum_/self.count_)
def Data_Harmonic_Mean(self ,X):
self.sum_, self.count_ = self.sum_count(X)
return(self.count_/self.sum_)
def Data_Geometric_Mean(self ,X):
sum_1, counts_1= self.Data_Sum_Count(X)
prod_all_mem = 1
for i in range(counts_1):
prod_all_mem = prod_all_mem * X[i]
gem_mean = prod_all_mem**(1/counts_1)
return (counts_1,prod_all_mem,gem_mean)
def Data_Median(self, X):
X = sorted(list(X))
if len(X)%2==0:
return (x[len(X)/2] + x[(len(X)/2)+1])/2
elif len(X)%2==1:
return X[int((len(X) +1)/2)]
def Data_Sort(self, X):
X = sorted(list(X))
return X
def Data_Mode(self, X):
X = self.Data_Sort(list(X))
set_x = set(X)
counts = {}
for i in range(len(set_x)):
counts[set_x[i]] = X.count(set_x[i])
return counts
def Data_Freq_Values(self , X):
X = list(X)
counts= {}
for i in set(X):
counts[i] = X.count(i)
freq_tab = pd.Series(counts).reset_index().rename(columns = {"index": 'Value' , 0:"Frequency"})
freq_tab = freq_tab.sort_values(by = 'Frequency' , ascending = False)
freq_tab['Frequency_Perc'] = round((freq_tab['Frequency']/freq_tab['Frequency'].sum())*100,2)
freq_tab['Ranks'] = range(1, freq_tab.shape[0]+1)
return freq_tab
def Data_Mode_Values(self , X):
freq_tab = self.Data_Freq_Values(X)
max_,min_ = self.Data_Max_Min(X = freq_tab['Frequency'])
mode_values = list(freq_tab.loc[freq_tab['Frequency'] == max_ , 'Value' ])
return mode_values
def Data_Range(self, X):
X = sorted(list(X))
max_ = X[len(X)-1]
min_ = X[0]
return (max_ - min_)
def Data_Percentile(X, perc = 50):
cnt = stats.Data_Sum_Count(X)[1]
X = stats.Data_Sort(X)
mem = (cnt *(perc/100))
if (mem - int(mem)) ==0:
return X[int(mem)-1]
elif (mem - int(mem)) !=0:
return (X[int(mem)] + X[int(mem)+1])/2
def Data_Var_Std(self, X):
self.mean_ = self.Arithermatic_Mean(X)
self.sum_ = 0
for i in range(len(X)):
self.sum_ = self.sum_+ ((X[i] - self.mean_)**(2))
self.Data_Variance = self.sum_
self.Data_STD= self.sum_**(1/2)
return (self.Data_Variance, self.Data_STD)
def Data_Sample_Sim_Rand(self , X, perc = 15):
X = list(X)
perc = int(perc/100 * ((len(X)+1)))
random =list(np.random.randint(0 , len(X)+1 , perc))
print(random)
req = []
for i in random:
if i in X:
req.append(i)
return req
def Desciptive_Stats(self,df ,cols = ['Sales' , 'Profit' , 'Discount' , 'Quantity'] ):
data_info = {}
for i in cols:
print(i,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_fence_iqr_ = q1_ - (1.5*iqr_)
upp_fence_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_fence_iqr_) | (df[i]< low_fence_iqr_)
outliers = list(df.loc[outliers_range , i])
outliers_neg = list(df.loc[df[i]> upp_fence_iqr_ , i])
outliers_pos = list(df.loc[df[i]< low_fence_iqr_ , i])
info_1 = {
'Count' : df[i].count(),
'Q0(Min)' : np.round(df[i].quantile(0),2),
'Q1' : np.round(df[i].quantile(.25),2),
'Q2(Median)' : np.round(df[i].quantile(.5),2),
'Q3' : np.round(df[i].quantile(.75),2),
'Q4(Max)' : np.round(df[i].quantile(1),2),
'Mean' :np.round( df[i].mean(),2),
'STD' : np.round(df[i].std(),2),
'Variance' : np.round(df[i].var(),2),
#'MAD': np.round(df[i].var(),2),
'IQR' : np.round(iqr_,2),
'IQR_Lower_Fence' : np.round(low_fence_iqr_,2),
'IQR_Upper_Fence' : np.round(upp_fence_iqr_,2),
'Skewness' : np.round(df[i].skew(),2),
'Curtosis' : np.round(df[i].kurt(),2),
'#NonOutliers' : df[i].shape[0]-len(outliers),
'NonOutliers_Perc' : np.round(((df[i].shape[0]-len(outliers)) / df[i].shape[0])*100,2),
'#Outliers' : len(outliers),
'Outliers_Perc' : np.round((len(outliers) / df[i].shape[0])*100,2),
'#Outliers_neg' : len(outliers_neg),
'#Outliers_pos' : len(outliers_pos)
}
data_info[i] = info_1
return (data_info)
def factorial(self , X):
if X<0:
print('Please enter a positive Value.')
elif X== 1:
return 1
else:
return X * self.factorial(X-1)
def Binomial_Distribution(self , n=1,p=.1,r = 1):
import math as m
ncr = (self.factorial(n))/(self.factorial(n-r) * self.factorial(r))
pq = m.pow(p, r)* m.pow( (1-p), (n-r))
return (ncr * pq)
class Data_Pre_Processing:
import numpy as np
import pandas as pd
import re
import math as m
# -------------- Basic indormation about the Data - Central Tendency ---------------
def Data_Sum_Count(self, X):
X = list(X)
count_ = 0
sum_ = 0
for i in range(len(X)):
sum_ = sum_+X[i]
count_ = count_+1
return(sum_,count_)
def Data_Unique_Values(self, X):
X = list(X)
unique_values = []
for i in X:
if i not in unique_values:
unique_values.append(i)
return unique_values
def Data_Max_Min(self, X):
X = sorted(list(X))
max_ = X[len(X)-1]
min_ = X[0]
return (max_ , min_)
def Data_Arith_Mean(self ,X):
self.sum_, self.count_ = self.sum_count(X)
return(self.sum_/self.count_)
def Data_Harmonic_Mean(self ,X):
self.sum_, self.count_ = self.sum_count(X)
return(self.count_/self.sum_)
def Data_Geometric_Mean(self ,X):
sum_1, counts_1= self.Data_Sum_Count(X)
prod_all_mem = 1
for i in range(counts_1):
prod_all_mem = prod_all_mem * X[i]
gem_mean = prod_all_mem**(1/counts_1)
return (counts_1,prod_all_mem,gem_mean)
def Data_Median(self, X):
X = sorted(list(X))
if len(X)%2==0:
return (x[len(X)/2] + x[(len(X)/2)+1])/2
elif len(X)%2==1:
return X[int((len(X) +1)/2)]
def Data_Sort(self, X):
X = sorted(list(X))
return X
def Data_Mode(self, X):
X = self.Data_Sort(list(X))
set_x = set(X)
counts = {}
for i in range(len(set_x)):
counts[set_x[i]] = X.count(set_x[i])
return counts
def Data_Freq_Values(self , X):
X = list(X)
counts= {}
for i in set(X):
counts[i] = X.count(i)
freq_tab = pd.Series(counts).reset_index().rename(columns = {"index": 'Value' , 0:"Frequency"})
freq_tab = freq_tab.sort_values(by = 'Frequency' , ascending = False)
freq_tab['Frequency_Perc'] = round((freq_tab['Frequency']/freq_tab['Frequency'].sum())*100,2)
freq_tab['Ranks'] = range(1, freq_tab.shape[0]+1)
return freq_tab
def Data_Mode_Values(self , X):
freq_tab = self.Data_Freq_Values(X)
max_,min_ = self.Data_Max_Min(X = freq_tab['Frequency'])
mode_values = list(freq_tab.loc[freq_tab['Frequency'] == max_ , 'Value' ])
return mode_values
def Data_Range(self, X):
X = sorted(list(X))
max_ = X[len(X)-1]
min_ = X[0]
return (max_ - min_)
def Univariate_Basic_Statistics(self,df ,cols = ['Sales' , 'Profit' , 'Discount' , 'Quantity'] ):
data_info = {}
for i in cols:
#print(i,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_fence_iqr_ = q1_ - (1.5*iqr_)
upp_fence_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_fence_iqr_) | (df[i]< low_fence_iqr_)
outliers = list(df.loc[outliers_range , i])
outliers_neg = list(df.loc[df[i]> upp_fence_iqr_ , i])
outliers_pos = list(df.loc[df[i]< low_fence_iqr_ , i])
mode_ = self.Data_Mode_Values(df[i])[0]
info_1 = {
'Count' : df[i].count(),
'Q0(Min)' : np.round(df[i].quantile(0),2),
'Q1' : np.round(df[i].quantile(.25),2),
'Q2(Median)' : np.round(df[i].quantile(.5),2),
'Q3' : np.round(df[i].quantile(.75),2),
'Q4(Max)' : np.round(df[i].quantile(1),2),
'Mean' :np.round( df[i].mean(),2),
'Mode' : mode_,
'STD' : np.round(df[i].std(),2),
'Variance' : np.round(df[i].var(),2),
#'MAD': np.round(df[i].var(),2),
'IQR' : np.round(iqr_,2),
'IQR_Lower_Fence' : np.round(low_fence_iqr_,2),
'IQR_Upper_Fence' : np.round(upp_fence_iqr_,2),
'Skewness' : np.round(df[i].skew(),2),
'Curtosis' : np.round(df[i].kurt(),2),
'#NonOutliers' : df[i].shape[0]-len(outliers),
'NonOutliers_Perc' : np.round(((df[i].shape[0]-len(outliers)) / df[i].shape[0])*100,2),
'#Outliers' : len(outliers),
'Outliers_Perc' : np.round((len(outliers) / df[i].shape[0])*100,2),
'#Outliers_neg' : len(outliers_neg),
'#Outliers_pos' : len(outliers_pos)
}
data_info[i] = info_1
return (data_info)
def factorial(self , X):
if X<0:
print('Please enter a positive Value.')
elif X== 1:
return 1
else:
return X * self.factorial(X-1)
# -------------------- Data Processing --------------------------
def rename_columns(self, df) :
updated_columns_names = df.columns.str.title().str.replace(" " , "_").str.replace("'" , "")\
.str.replace("-" , "").str.replace("." , "")\
.str.replace("(" , "").str.replace(")" , "")
return updated_columns_names
def update_column_names(self,df):
import re
df1 = pd.DataFrame(list(df.columns))
df1.columns = ['Column_Name']
df1['Columns_Updated'] =df1['Column_Name'].apply(lambda X : X.title())
df1['Columns_Updated'] =df1['Columns_Updated'].apply(lambda X : re.sub("\s","_",X))
df1['Columns_Updated'] =df1['Columns_Updated'].apply(lambda X : re.sub("\.|\'|\-\(|\)", "" ,X.title()))
df.columns = list(df1['Columns_Updated'])
return df.columns
def columns_seperation(self,df):
numeric_cols = list(df.select_dtypes(include = 'number').columns)
categoric_cols = list(df.select_dtypes(exclude = 'number').columns)
list_all_cols = list(df.columns)
id_date_zip_cols = []
for i in list(df.columns):
if ('date' in i.lower()) or ('id' in i.lower())or ('zip' in i.lower())or ('pos' in i.lower()):
id_date_zip_cols.append(i)
#print(i)
num_cols_model = list(set(numeric_cols).difference(set(id_date_zip_cols)))
cat_cols_model = list(set(categoric_cols).difference(set(id_date_zip_cols)))
return (numeric_cols,categoric_cols,id_date_zip_cols,num_cols_model,cat_cols_model,list_all_cols)
def null_counts(self , df , perc_threshold_remove_nulls =0):
df = pd.DataFrame(df)
df_nulls = pd.DataFrame(df.isnull().sum()).reset_index().rename(columns = {"index" : 'Col_Name', 0: 'Count_Nulls'})
df_nulls['Perc_Nulls'] = (df_nulls['Count_Nulls']/df.shape[0])*100
df_nulls = df_nulls.sort_values(by = 'Perc_Nulls' , ascending = False).reset_index(drop = True)
col_gt_5pct_nulls = df_nulls.loc[df_nulls['Perc_Nulls' ]>perc_threshold_remove_nulls
,['Col_Name' , 'Perc_Nulls']]
col_gt_5pct_nulls = col_gt_5pct_nulls.sort_values(by = 'Perc_Nulls' , ascending = False)
req_cols = list(set(df.columns).difference(set(col_gt_5pct_nulls['Col_Name'].tolist())) )
rem_cols = list(col_gt_5pct_nulls.Col_Name)
#print("list of columns which are having nulls more than 5% : \n" ,
#df_nulls.loc[df_nulls['Perc_Nulls']>5 , :].columns )
return (df_nulls , req_cols , rem_cols)
def handling_null_values(self,df, remv_cols = [],
numer_null_replace = 'mean' ,
categoric_null_replace = 'mode',
data_req = 'full'):
if (len(remv_cols)>0) & (data_req == 'full'):
rem_data = df[remv_cols]
else:
rem_data = pd.DataFrame()
req_col = list(set(df.columns).difference(set(remv_cols)))
df = df[req_col]
num_col = df.select_dtypes(include ='number').columns
cat_col = df.select_dtypes(exclude ='number').columns
df_cat = df[cat_col]
df_num = df[num_col]
# -------------- Handling nulls - Numerical Data ------------
numer_null_replace = str(numer_null_replace).strip().lower()
for i in num_col:
if numer_null_replace =='mean':
df_num[i].fillna(df_num[i].mean(), inplace = True)
elif numer_null_replace =='median':
df_num[i].fillna(df_num[i].median(), inplace = True)
elif numer_null_replace =='mode':
df_num[i].fillna(df_num[i].mode().values[0], inplace = True )
# -------------- Handling nulls - Categorical Data ------------
categoric_null_replace = str(categoric_null_replace).strip().lower()
for j in cat_col:
if categoric_null_replace =='mode':
mode = df_cat[j].mode().values[0]
df_cat[j] = df_cat[j].fillna(mode)
df_final = pd.DataFrame()
if data_req == 'full':
df_final = pd.concat([df_num,df_cat,rem_data], axis = 1)
elif data_req == 'selected':
df_final = pd.concat([df_num,df_cat], axis = 1)
else:
df_final = pd.concat([df_num,df_cat], axis = 1)
return df_final
def df_col_val_perc(self, df,col = None, total_num_records = False):
df = pd.DataFrame(df)
try:
if len(col) >0:
tab = df[col].value_counts().reset_index().rename(columns = {"index" :col, col: "Value_Counts"})
tab['Value_Perc'] = np.round((tab['Value_Counts'] / tab['Value_Counts'].sum()) *100,2)
tab = tab.sort_values(by = 'Value_Counts', ascending = False)
tab['Cum_Perc'] = tab['Value_Perc'].cumsum()
tab['Ranks'] = range(1,tab.shape[0]+1,1)
if total_num_records ==True:
tab['Total_Num_Reocrds'] = tab['Value_Counts'].sum()
return tab
else:
return tab
except:
if col == None:
first_col = df.columns[0]
tab = df[first_col].value_counts().reset_index().rename(columns = {"index" :first_col, first_col: "Value_Counts"})
tab['Value_Perc'] = np.round((tab['Value_Counts'] / tab['Value_Counts'].sum()) *100,2)
tab = tab.sort_values(by = 'Value_Counts', ascending = False)
tab['Cum_Perc'] = tab['Value_Perc'].cumsum()
tab['Ranks'] = range(1,tab.shape[0]+1,1)
if total_num_records ==True:
tab['Total_Num_Reocrds'] = tab['Value_Counts'].sum()
return tab
else:
return tab
def replacing_nulls_in_numeric_cols(self,df, numeric_cols = [],replace_nulls_by = 'mean'):
numeric_cols = list(numeric_cols)
df = pd.DataFrame(df)
for i in numeric_cols:
mode_ = self.Data_Mode_Values(df[i])[0]
df[i] = df[i].astype(np.float64)
#--------- Replacing nulls -----------
if replace_nulls_by.lower().strip() == 'mean':
df[i] = df[i].fillna(df[i].mean())
elif replace_nulls_by.lower().strip() == 'median':
df[i] = df[i].fillna(df[i].median())
elif replace_nulls_by.lower().strip() == 'mode':
df[i] = df[i].fillna(mode_)
def numer_scaling(self, df, remv_cols = [], num_data_scaling = 'minmax' ,data_req = 'full'):
try:
remv_cols = list(remv_cols)
if len(remv_cols) !=0:
data_rem = df.copy()
data_rem = data_rem[remv_cols]
else:
data_rem = pd.DataFrame()
req_col = list(set(df.columns).difference(set(remv_cols)))
df = df[req_col]
num_col = df.select_dtypes(include ='number').columns
cat_col = df.select_dtypes(exclude ='number').columns
df_cat = df[cat_col]
df_num = df[num_col]
df_num_updated = pd.DataFrame(df_num)
# -------------- Handling nulls - Numerical Data ------------
num_data_scaling = str(num_data_scaling).strip().lower()
if ('max' in num_data_scaling) | ('min' in num_data_scaling):
from sklearn.preprocessing import MinMaxScaler
scaling = MinMaxScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif ('norm' in num_data_scaling ) | ('stand' in num_data_scaling ):
from sklearn.preprocessing import StandardScaler
scaling = StandardScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif ('rob' in num_data_scaling ):
from sklearn.preprocessing import RobustScaler
scaling = RobustScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
elif('max' in num_data_scaling ) & ('abs' in num_data_scaling ) :
from sklearn.preprocessing import MaxAbsScaler
scaling = MaxAbsScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = num_col)
data_req = str(data_req).strip().lower()
if data_req == 'full':
df_final = pd.concat([df_num_updated,df_cat,data_rem ], axis = 1)
return df_final
elif data_req == 'selected':
df_final = pd.concat([df_num_updated,df_cat], axis = 1)
return df_final
except:
print("Please enter the right information !!!!!!!!")
def numerical_scaling(self, df, cols = [], num_data_scaling = 'minmax'):
try:
df_cols = df.columns
req_cols = list(cols)
not_req_cols = set(df_cols).difference(set(req_cols))
df_cat = df[list(not_req_cols)]
df_num = df[req_cols]
df_num_updated = pd.DataFrame(df_num)
# -------------- Handling nulls - Numerical Data ------------
num_data_scaling = str(num_data_scaling).strip().lower()
if ('max' in num_data_scaling) | ('min' in num_data_scaling):
from sklearn.preprocessing import MinMaxScaler
scaling = MinMaxScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif ('norm' in num_data_scaling ) | ('stand' in num_data_scaling ):
from sklearn.preprocessing import StandardScaler
scaling = StandardScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif ('rob' in num_data_scaling ):
from sklearn.preprocessing import RobustScaler
scaling = RobustScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
elif('max' in num_data_scaling ) & ('abs' in num_data_scaling ) :
from sklearn.preprocessing import MaxAbsScaler
scaling = MaxAbsScaler()
df_num_updated = pd.DataFrame(scaling.fit_transform(df_num) , columns = req_cols)
df_final = pd.concat([df_num_updated,df_cat], axis = 1)
return df_final
except:
print("Please enter the right information !!!!!!!!")
def numerical_values_bins(self,df, cols =[] , no_bins_col_wise = [], default_no_bins = 3):
bins_ = {}
cols = list(cols)
no_bins_col_wise = list(no_bins_col_wise)
#print(type(no_bins_col_wise) , len( no_bins_col_wise))
if (len(no_bins_col_wise)==0) | (no_bins_col_wise==None) | (len(cols) != len(no_bins_col_wise)):
no_bins = list([default_no_bins]*len(cols))
else:
no_bins = no_bins_col_wise
#print(no_bins)
for i, v in enumerate(cols):
bins_[v] = self.df_col_val_perc(pd.cut(df[v],no_bins[i]))
bins_[v]['Ranks']=list(range(len(bins_[v]['Ranks'])))
bins_[v][v]= bins_[v][v].astype(str)
bins_[v].columns = ['Range', 'Counts', 'Perc', 'Cum_Perc', 'Mapping_Value']
bins_[v] = bins_[v][['Range','Mapping_Value','Counts', 'Perc', 'Cum_Perc']]
df[v] = pd.cut(df[v],no_bins[i], labels = list(range(no_bins[i])))
print(bins_)
def DF_Count_Uniq_Values_By_col(self, df):
df = pd.DataFrame(df)
cols = df.columns
df_n_unique = {}
for i in cols:
df_n_unique[i] = df[i].nunique()
DF_Count_Uniq_Values_By_col = pd.Series(df_n_unique).reset_index().rename(columns = {"index": 'Column_Name' ,
0:"#Unique_Values"})
return DF_Count_Uniq_Values_By_col
# ---------------------- Decision Tree related ----------------
def entropy(self, df,col = None ):
try:
if len(col)>0:
labeled_data = self.df_col_val_perc(df, col)
entropy = []
for i in range(labeled_data.shape[0]):
total_counts = labeled_data['Value_Counts'].sum()
#print(labeled_data['Value_Counts'][i])
pro_ratio = labeled_data['Value_Counts'][i]/total_counts
entropy.append(-(pro_ratio*np.log2(pro_ratio)))
return np.sum(entropy)
except:
print("Pass the column name to calculate the entropy")
def entropy_all_cols(self, df):
cols = list(df.columns)
entropy_cols = {}
for i in cols:
entropy_cols[i] = (self.entropy(df,i))
entropy_cols = pd.Series(entropy_cols).reset_index().rename(columns = {"index": 'Predictor_Name' , 0:"Entropy"})
entropy_cols = entropy_cols.sort_values(by = 'Entropy' , ascending = True)
return entropy_cols
def descriptive_statistics(self,df, cols = [] , full_info = False, num_decm_points = 2):
if (len(cols)==0) | (type(cols) !=list) :
print("Please supply the list of the columns, for which you want o see the Descriptive Statistics")
else:
if full_info == False:
for i in cols:
print(i ,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_iqr_ = q1_ - (1.5*iqr_)
upp_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_iqr_) | (df[i]< low_iqr_)
outliers = list(df.loc[outliers_range , i])
print('iqr:', iqr_ , ', #outliers:', len(outliers) , "\n")
elif full_info == True:
for i in cols:
#print(i,":")
q3_ = df[i].quantile(.75)
q1_ = df[i].quantile(.25)
iqr_ = q3_ - q1_
low_fence_iqr_ = q1_ - (1.5*iqr_)
upp_fence_iqr_ = q3_ + (1.5*iqr_)
outliers_range = (df[i]> upp_fence_iqr_) | (df[i]< low_fence_iqr_)
outliers = list(df.loc[outliers_range , i])
outliers_neg = list(df.loc[df[i]> upp_fence_iqr_ , i])
outliers_pos = list(df.loc[df[i]< low_fence_iqr_ , i])
info_1 = {'Min' : np.round(df[i].min(),2),
'Q1' : np.round(df[i].quantile(.25),num_decm_points),
'Median' : np.round(df[i].quantile(.5),num_decm_points),
'Q3' : np.round(df[i].quantile(.75),num_decm_points),
'Max' : np.round(df[i].quantile(1),num_decm_points),
'Mean' :np.round( df[i].mean(),num_decm_points),
'STD' : np.round(df[i].std(),num_decm_points),
'Variance' : np.round(df[i].var(),num_decm_points),
'Count' : df[i].count(),
'IQR' : np.round(iqr_,num_decm_points),
'IQR_Lower_Fence' : np.round(low_fence_iqr_,num_decm_points),
'IQR_Upper_Fence' : np.round(upp_fence_iqr_,num_decm_points),
'Skewness' : np.round(df[i].skew(),num_decm_points),
'Kurtosis' : np.round(df[i].kurt(),num_decm_points),
'#NonOutliers' : df[i].shape[0]-len(outliers),
'NonOutliers_Perc' : np.round(((df[i].shape[0]-len(outliers)) / df[i].shape[0])*100,num_decm_points),
'#Outliers' : len(outliers),
'Outliers_Perc' : np.round((len(outliers) / df[i].shape[0])*100,num_decm_points),
'#Outliers_neg' : len(outliers_neg),
'#Outliers_pos' : len(outliers_pos)
}
print(info_1 , "\n")
def Outlier_Detect_And_Show_Repl_Value(self,
df,
cols = [],
detect_method = 'iqr',
replace_outlier_by = 'mean',
replace_nulls_by = 'mean'
):
data_info = {}
cols = list(cols)
df = pd.DataFrame(df)
#df1 = pd.DataFrame(df[cols])
#print("Using",detect_method.upper(), "to detect the Outliers and" ,
# 'Nulls are replaced by', replace_nulls_by.upper(),
# 'Outliers are replaced by', replace_outlier_by.upper()
#)
for i in cols:
df1 = pd.DataFrame(df[i])
df1[i] = df1[i].astype(np.float64)
#print(i, "\n\n")
#--------- Replacing nulls -----------
if replace_nulls_by == 'mean':
df1[i] = df1[i].fillna(df1[i].mean())
elif replace_nulls_by == 'median':
df1[i] = df1[i].fillna(df1[i].median())
#--------- Descriptive Statistics -----------
means_ = np.mean(df[i])
std_ = np.std(df1[i])
medians_ = np.median(df1[i])
q3_ = df1[i].quantile(.75)
q1_ = df1[i].quantile(.25)
iqr_ = q3_ - q1_
low_iqr_ = q1_ - (1.5*iqr_)
upp_iqr_ = q3_ + (1.5*iqr_)
df1['ZScores'] = df1[i].apply(lambda X : ((X - means_)/std_))
mode_2 = self.Data_Mode_Values(df1[i])[0]
values_after_replacing_Outliers = 'Values_Updated'
if detect_method == 'iqr':
df1['Outlier_or_Not'] = np.where(((df1[i]> upp_iqr_) | (df1[i]< low_iqr_)),'Y','N')
if replace_outlier_by =='mean':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',means_,df1[i])
elif replace_outlier_by =='median':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',medians_,df1[i])
elif replace_outlier_by =='mode':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',mode_2,df1[i])
elif detect_method == 'zscore':
df1['Outliers_or_Not'] = np.where(((df1['ZScores']> 3) | (df1['ZScores']< -3)),'Y','N')
if replace_outlier_by =='mean':
df1[values_after_replacing_Outliers] = np.where(df1['Outliers_or_Not']=='Y', means_,df1[i])
elif replace_outlier_by =='median':
df1[values_after_replacing_Outliers] = np.where(df1['Outliers_or_Not']=='Y', medians_,df1[i])
elif replace_outlier_by =='mode':
df1[values_after_replacing_Outliers] = np.where(df1['Outlier_or_Not']=='Y',mode_2,df1[i])
data_info[i] = df1
return (data_info)
def outlier_replace_in_Num_Cols(self, df , cols = [],
detect_method = 'iqr',
replace_outlier_by = 'mean',
replace_nulls_by = 'mean'
):
for i in cols:
j = self.Outlier_Detect_And_Show_Repl_Value(df, cols = [i],
detect_method = detect_method,
replace_outlier_by = replace_outlier_by,
replace_nulls_by = replace_nulls_by)
df[i] = pd.DataFrame(j[i])['Values_Updated'].values
# def numerical_values_bins(df , cols =[] , no_bins_col_wise = [], default_no_bins = 3):
# bins_ = {}
# cols = list(cols)
# no_bins_col_wise = list(no_bins_col_wise)
# #print(type(no_bins_col_wise) , len( no_bins_col_wise))
# if (len(no_bins_col_wise)==0) | (no_bins_col_wise==None) | (len(cols) != len(no_bins_col_wise)):
# no_bins = list([default_no_bins]*len(cols))
# else:
# no_bins = no_bins_col_wise
# #print(no_bins)
# for i, v in enumerate(cols):
# bins_[v] = Data_pre_pro.df_col_val_perc(pd.cut(df[v],no_bins[i]))
# bins_[v]['Ranks']=list(range(len(bins_[v]['Ranks'])))
# bins_[v][v]= bins_[v][v].astype(str)
# bins_[v].columns = ['Range', 'Counts', 'Perc', 'Cum_Perc', 'Mapping_Value']
# bins_[v] = bins_[v][['Range','Mapping_Value','Counts', 'Perc', 'Cum_Perc']]
# df[v] = pd.cut(df[v],no_bins[i], labels = list(range(no_bins[i])))
# print(bins_)
# def replacing_nulls_in_numeric_cols(self,
# df,
# numeric_cols = [],
# replace_nulls_by = 'mean'
# ):
# numeric_cols = list(numeric_cols)
# df = pd.DataFrame(df)
# for i in numeric_cols:
# df[i] = df[i].astype(np.float64)
# #--------- Replacing nulls -----------
# if replace_nulls_by == 'mean':
# df[i] = df[i].fillna(df[i].mean())
# elif replace_nulls_by == 'median':
# df[i] = df[i].fillna(df[i].median())
# In[ ]:
class Text_Mining_Variables:
import re
import os
import csv
import sys
import nltk
import spacy
import random
import string
import unicodedata
import math as m
import numpy as np
import pandas as pd
from textblob import Word
from textblob import TextBlob
from bs4 import BeautifulSoup
from nltk.stem import PorterStemmer
from spacy.lang.en.stop_words import STOP_WORDS
from nltk.stem.snowball import SnowballStemmer
remove_punctuation = string.punctuation
import re
re_syntax_extract_urls = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
re_syntax_email_ids = r"([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)"
re_syntax_get_urls = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# re_syntax_get_urls =r'(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?'
re_extract_numbers = r"[\d]+"
re_extract_numbers_including_decimals = r"[-+]?(\d+\.?\d*|\.\d+)"
wh_words = ['who', 'what', 'when', 'why', 'how', 'which', 'where', 'whom']
# removing stop words
import nltk
from nltk.corpus import stopwords
stop_words_1 = list(set(stopwords.words('english')))
import spacy
en = spacy.load('en_core_web_sm')
stop_words_2 = list(set(en.Defaults.stop_words))
stop_words_total = list(set(stop_words_2 + stop_words_1))
# # print( "len(stop_words):" , len(stop_words_total),
# # "\nlen(stop_words_2)-SPACY:" , len(stop_words_2),
# # "\nlen(stop_words_1)-NLTK:" , len(stop_words_1)
# # )
# removing stop words
#https://gist.github.com/sebleier/554280
# import nltk
# from nltk.corpus import stopwords
# stop_words_1 = list(set(stopwords.words('english')))
# import spacy
# en = spacy.load('en_core_web_sm')
# stop_words_2 = list(set(en.Defaults.stop_words))
# stop_words_total = list(set(stop_words_2 + stop_words_1))
# print( "len(stop_words):" , len(stop_words_total),
# "\nlen(stop_words_2)-SPACY:" , len(stop_words_2),
# "\nlen(stop_words_1)-NLTK:" , len(stop_words_1)
# )
# import requests
# stopwords_list = requests.get("https://gist.githubusercontent.com/rg089/35e00abf8941d72d419224cfd5b5925d/raw/12d899b70156fd0041fa9778d657330b024b959c/stopwords.txt").content
# stopwords = set(stopwords_list.decode().splitlines())
# stopwords
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002500-\U00002BEF"
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f"
u"\u3030"
"]+", flags=re.UNICODE)
chat_abbrevations = {' 4u ': ' i have a question for you. ',
' ^^ ': ' read line above ',
' 121 ': ' one to one ',
' <3 ': ' love ',
' 2 ': ' to ',
' 2mrw ': ' tomorrow ',
' 4 ': ' for ',
' afk ': ' away from keyboard ',
' aka ': ' also known as ',
' asap ': ' as soon as possible ',
' a/s/l ': ' age sex location ',
' ayt ': ' are you there ',
' b2w ': ' back to work ',
' b4 ': ' before ',
' bbl ': ' be back later ',
' bbs ': ' be back soon ',
' bf ': ' boyfriend ',
' bff ': ' best friend(s) forever ',
' brb ': ' be right ',
' btw ': ' by the way ',
' cmb ': ' call me back ',
' cmiiw ': " correct me if i am wrong ",
' cu ': ' see you ',
' cu l8r ': ' see you later ',
' cuz ': ' because ',
' cos ': ' because ',
' cwyl ': ' chat with you later ',
' dc ': ' disconnected ',
' diy ': ' do it yourself ',
' dm ': ' direct message ',
' f2f ': ' face to face ',
' faq ': ' frequently asked questions ',
' fb ': ' facebook ',
' fyi ': ' for your information ',
' fyeo ': ' for your eyes only ',
' gb ': ' goodbye ',
' gf ': ' girlfriend ',
' gg ': ' gotta go ',
' gl ': ' good luck ',
' gr8 ': ' great! ',
' hbd ': ' happy birthday ',
' hhhhhh ': ' very funny ',
' how r u ': ' how are you ',
' ic ': ' i see ',
' idk ': " i do not know ",
' imho ': ' in my humble opinion ',
' ik ': ' i know ',
' im ': ' instant message ',
' iow ': ' in other words ',
' j k ': ' just kidding ',
' k ': ' ok ',
' l8r ': ' later or goodbye ',
' lol ': ' laugh out loud ',
' m/f ': ' male or female ',
' mins ': ' minutes ',
' msg ': ' message ',
' nv ': ' nevermind ',
' oic ': ' oh, i see ',
' otw ': ' on the way ',
' p2p ': ' person to person ',
' plz ': ' please ',
#' plz': ' please ',
' pm ': ' private message ',
' rofl ': ' rolling on the floor laughing ',
' ruok ': ' are you okay ',
' sup ': " what is up ",
' zup ': " what is up ",
' syl ': ' see you later ',
' tgif ': " thank goodness it is friday ",
' thx ': ' thanks ',
' thnx ': ' thanks ',
' ttfn ': ' ta ta for now ',
' ttyl ': ' talk to you later ',
' tyt ': ' take your time ',
' u ': ' you ',
' u2 ': ' you too ',
' ur ': " your you are ",
' w ': ' with ',
' w/o ': ' without ',
' wru ': ' where are you ',
' xo ': ' hugs and kisses love ',
' zzz ': ' tired or bored ',
' gm ': ' good morning ',
' ga ': ' good afternoon ',
' ge ': ' good evening ',
' gn ': ' good night ',
' gm, ': ' good morning ',
' ga, ': ' good afternoon, ',
' ge, ': ' good evening, ',
' gn, ': ' good night, ',
' gn.': ' good night. ',
' gm.': ' good morning. ',
' ge.': ' good evening. ',
' ga.': ' good afternoon. ' ,
' nlp ' : ' natural language processing ',
' pls': ' please',
' pls,': ' please,',
' pls.': ' please.',
' r ' : ' you ',
' hru ' : ' how are you ',
' n ' : ' and ',
"B'day" : "Birthday"
}
contractions_1 = {
"ain't": "am not",
"aren't": "are not",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"how's": "how does",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
" u ": " you ",
" ur ": " your ",
" n ": " and ",
" ok " : " okay ",
" nope " : " no ",
" asap ": " as soon as possible ",
" eod ": " end of the day ",
" fyi ": " for your information ",
" omg ": " oh my god !!! ",
" gn ": " good night ",
" ge ": " good evening " ,
" gm ": " good morning " ,
}
# ----------------------------------------------------
class dummy1:
pass
# ----------------------------------------------------
class Data_Pre_Processing_Text_Mining(Text_Mining_Variables):
import re
import os
import csv
import sys
import nltk
import spacy
import random
import string
import unicodedata
import math as m
import numpy as np
import pandas as pd
from textblob import Word
from textblob import TextBlob
from bs4 import BeautifulSoup
from nltk.stem import PorterStemmer
from spacy.lang.en.stop_words import STOP_WORDS
from nltk.stem.snowball import SnowballStemmer
remove_punctuation = string.punctuation
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('stopwords')
nltk.download('punkt')
nlp = spacy.load("en_core_web_sm")
chat_abbrevations = Text_Mining_Variables.chat_abbrevations
re_syntax_email_ids = Text_Mining_Variables.re_syntax_email_ids
re_syntax_extract_urls = Text_Mining_Variables.re_syntax_extract_urls
contractions_1 = Text_Mining_Variables.contractions_1
emoji_pattern = Text_Mining_Variables.emoji_pattern
stop_words_total = list(Text_Mining_Variables.stop_words_total)
re_extract_numbers_including_decimals = Text_Mining_Variables.re_extract_numbers_including_decimals
def cosine_similarity(self, V1,V2):
import numpy as np
#V1, V2 should be Numpy arrays and contain numerical values to perform VECTOR DOT products
V1 = np.array(V1)
V2 = np.array(V2)
result = np.dot(1, V2) / (np.sVqrt(np.sum(V1**2)) * np.sqrt(np.sum(V2**2)))
return result
def spelling_correction(self,text):
from textblob import TextBlob
text = str(TextBlob(text).correct()).strip()
return text
def remove_html_tags(self,text):
from bs4 import BeautifulSoup
text = BeautifulSoup(text, 'lxml').get_text()
return text.strip()
def remove_accented_chars(self,text):
import unicodedata
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return text
def remove_spaces_repetition(self,text):
text = " ".join(text.strip().split())
return text
def remove_punctuations(self,text):
import string,re
remove_punctuation = string.punctuation
for i in remove_punctuation:
text = text.replace(i, "").replace(" ", "").strip()
text = re.sub(r"[\s]{1,}", " " , str(text)).strip()
return text
def get_email_ids(self,text):
import string,re
emails_ids = re.findall(Text_Mining_Variables.re_syntax_email_ids, text.lower())
return emails_ids
def count_email_ids(self,text):
import string,re
emails_ids = re.findall(Text_Mining_Variables.re_syntax_email_ids,text.lower())
return len(emails_ids)
def remove_email_ids(self,text):
import string,re
text_formatted = re.sub(Text_Mining_Variables.re_syntax_email_ids, "",text.lower())
return text_formatted
def get_urls(self,text):
import string,re
text = text.lower()
urls = re.findall(Text_Mining_Variables.re_syntax_extract_urls, text)
return urls
def count_urls(self,text):
import string,re
text = text.lower()
urls = re.findall(Text_Mining_Variables.re_syntax_extract_urls, text)
return len(urls)
def remove_urls(self,text):
import string,re
text = text.lower()
urls = re.sub(Text_Mining_Variables.re_syntax_extract_urls, "", text)
return urls
def count_words(self,text):
import string,re
words = str(text).split()
return len(words)
def count_characters(self,text):
import string,re
#words = self.remove_spaces_repetition(str(text))
raw_text = re.sub(r"[^\w\s\_]*", "" , str(text)).strip()
raw_text = re.sub(r"[\s]{1,}", " " , str(raw_text)).strip()
return len(raw_text)
def count_characters2(self,text):
import string,re
text1 = self.remove_punctuations(text)
words = str(text1).split()
final_text = ["".join(i) for i in words]
return len(final_text)
def count_stop_words(self,text):
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
counts = len([i for i in text.split() if i in self.stop_words_total])
return counts
def count_Non_stop_words(self,text):
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
counts = len([i for i in text.split() if i not in self.stop_words_total])
return counts
def count_punctuations(self,text):
import re
k = len(text) - len(re.sub(r"[^\w\s]+" , "", re.sub("[\s]+" , " " , text)))
return k
def remove_stop_words(self,text):
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
#Text_Mining_Variables.stop_words_total
text = " ".join([i for i in text.split() if i not in self.stop_words_total])
return text.strip()
def count_upper_case_words(self,text, min_num_chars = 2):
k =len([i for i in text.split() if i.isupper() and len(i)>min_num_chars])
#k1 =([i for i in text.split() if i.isupper() and len(i)>2])
return k
def count_sentences(self,text):
import spacy
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
k =len([i for i in doc.sents])
return k
def count_sentences_nltk(self,text):
from nltk.tokenize import sent_tokenize
k =len(sent_tokenize(text))
return k
def get_upper_case_words(self,text, min_num_chars = 2):
k =([i for i in text.split() if i.isupper() and len(i)>=min_num_chars])
return k
def custom_contractions(self,text):
if type(text) is str:
text = text.lower()
for key in Text_Mining_Variables.contractions_1:
value = Text_Mining_Variables.contractions_1[key]
text = text.replace(key,value)
return text
else: return text
def remove_retweets(self,text):
import string,re
text = re.sub("RT" , "", text)
return text
def count_hash_tags(self,text):
k = len([i for i in text.split() if i.startswith("#")])
return k
def count_mentions(self,text):
k = len([i for i in text.split() if i.startswith("@")])
return k
def get_numericals(self,text):
import re
numericals = list(re.findall(self.re_extract_numbers_including_decimals,text.lower()) )
return (numericals)
def get_numericals_2(self,text):
import re
numericals = [i for i in text.split() if i.isdigit()]
return (numericals)
def count_numericals(self,text):
import re
numericals = re.findall(self.re_extract_numbers_including_decimals,text.lower())
return len(numericals)
def count_numericals_2(self,text):
import re
numericals = [i for i in text.split() if i.isdigit()]
return len(numericals)
def remove_numericals(self,text):
import re
text = re.sub(self.re_extract_numbers_including_decimals,"", text.lower())
return text
def count_verbs(self,text):
from textblob import TextBlob
k = len([i for i in text.split() if TextBlob(i).tags[0][1] in ['VB','VBD','VBG','VBN','VBP' ,'VBZ']])
return k
def count_digits(self,text):
from textblob import TextBlob
k = len([i for i in text.split() if TextBlob(i).tags[0][1] in ['CC','CD']])
return k
def count_adjectives(self,text):
from textblob import TextBlob
k = len([i for i in text.split() if TextBlob(i).tags[0][1] in ['JJ','JJR' , 'JJR']])
return k
def count_pronouns(self,text):
from textblob import TextBlob
k = len([i for i in text.split() if TextBlob(i).tags[0][1] in ['PRP','PRP$' , 'POS']])
return k
def get_root_word(self, text_):
import spacy
nlp = spacy.load('en_core_web_sm')
# try:
# nlp = spacy.load('en_core_web_lg')
# print('large is imported')
# except (ModuleNotFoundError, ValueError):
# print('large is not imported, trying to import medium')
# nlp = spacy.load('en_core_web_md')
# print('medium')
# except:
# print('large, medium are not imported, and trying to import small')
# nlp = spacy.load('en_core_web_sm')
# print('small')
text_list = []
doc = nlp(text_)
for token in doc:
lemma = str(token.lemma_)
if lemma == '-PRON-' or lemma == 'be':
lemma = token.text
text_list.append(lemma)
return (" ".join(text_list))
def nltk_snowball_stemmer(self, text):
from nltk.stem.snowball import SnowballStemmer
snowball_stemmer = SnowballStemmer(language='english')
text = " ".join([snowball_stemmer.stem(token) for token in text.split()])
return text
def textblob_lemma(self,text):
from nltk.stem.snowball import SnowballStemmer
from textblob import Word
snowball_stemmer = SnowballStemmer(language='english')
text = str(" ".join([Word(token).lemmatize() for token in text.split()])).strip()
return text
def spacy_lemma(self,text):
import spacy
#!python -m spacy download en_core_web_sm
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
text = str(" ".join([token.lemma_ for token in doc])).strip()
return text.strip()
def remove_most_repeated_rare_words(self, df, col = None, number_of_words_remov = 5):
A = " ".join(df[col])
t = list(pd.Series(A.split()).value_counts()[:number_of_words_remov].index)
b = list(pd.Series(A.split()).value_counts()[-number_of_words_remov:].index)
b = b+t
#len(b)
f = list(df[col].apply(lambda X : " ".join([i for i in X.split() if i not in b])))
df[col] = f
return df
import re
def get_actual_text_chat_abbrevations(self,text):
for i in Text_Mining_Variables.chat_abbrevations:
text = text.lower()
key = i.lower()
value = Text_Mining_Variables.chat_abbrevations[i].lower()
text = text.replace(key,value)
return text
def remove_punctuations_2(self,text):
import re
text = re.sub(r'[^\w\s]', '', text)
#text = re.sub(r'[\s\s]', '', text)
return text
def count_punctuations_2(self,text):
import re
extract_pncts = re.findall(r'[^\w\s]',text)
new_text = re.sub(r'[^\w\s]', "", text)
return len(extract_pncts)
# def remove_emojis_1(self,text):
# import csv,os,sys,re
# try:
# from cleantext import clean
# except:
# ! pip install cleantext
# from cleantext import clean
# import re
# text = clean(text, no_emoji=True)
# return text
def remove_emojis_2(self,text):
import csv,os,sys,re
text = self.emoji_pattern.sub(r'', text)
return text
#def get_emojis(self,text):
#import csv,os,sys,re
#emojis = list(self.emoji_pattern.findall(text))
#return emojis
def count_nouns_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
nouns = [i for i in doc if i.pos_ in ['NOUN']]
return len(nouns)
def count_pronouns_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['PRON' ]]
return len(k)
def count_propernouns_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['PROPN' ]]
return len(k)
def count_adjectives_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
ADJ = [i for i in doc if i.pos_ in ['ADJ']]
return len(ADJ)
def count_auxliary_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['AUX']]
return len(k)
def count_verbs_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['VERB']]
return len(k)
def count_adverbs_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['ADV']]
return len(k)
def count_numericals_spacy(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = [i for i in doc if i.pos_ in ['NUM']]
return len(k)
def count_verbs_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['VBN', 'VBP', 'VBG', "VB"]])
return k
def count_nouns_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['NN', 'NNS', 'NNP', "NNPS"]])
return k
def count_pronouns_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['PRP', 'PRP$', 'WP']])
return k
def count_adverbs_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['RBS', 'RB','RBR','WRB']])
return k
def count_adjectives_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['JJ', 'JJS','JJR']])
return k
def count_conjuctions_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['CC']])
return k
def count_interjections_nltk(self,text):
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
k = len([i[0] for i in nltk.pos_tag(text.split()) if i[1] in ['UH']])
return k
def remove_text_garbage(self,text):
import re
import os
import csv
import sys
import nltk
import spacy
import random
import string
import unicodedata
import math as m
import numpy as np
import pandas as pd
from textblob import Word
from textblob import TextBlob
from bs4 import BeautifulSoup
from nltk.stem import PorterStemmer
from spacy.lang.en.stop_words import STOP_WORDS
from nltk.stem.snowball import SnowballStemmer
remove_punctuation = string.punctuation
text = " ".join(text.lower().strip().split())
#print("converting into lowercase: Done")
text = self.spelling_correction(text)
#print("spelling_correction: Done")
text = BeautifulSoup(text, 'lxml').get_text().strip()
#print("remove_html_tags: Done")
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
#print("remove_accented_chars: Done")
text = " ".join(text.strip().split())
for i in remove_punctuation:
text = text.replace(i, "").replace(" ", "").strip()
#print("remove_punctuation: Done")
text = re.sub(r"[\s]{1,}", " " , str(text)).strip()
#print("remove_spaces_repetition: Done")
text = re.sub(Text_Mining_Variables.re_syntax_extract_urls, "", text)
#print("remove_urls: Done")
text = " ".join([i for i in text.split() if i not in self.stop_words_total])
#print("remove_stop_words_total: Done")
text = text.strip()
text = re.sub("RT" , "", text)
#print("remove_sretweets: Done")
text = re.sub(r'[^\w\s]', '', text)
#print("remove_punctuations: Done")
text = self.emoji_pattern.sub(r'', text).strip()
#print("remove_emoji_pattern: Done")
text = re.sub(Text_Mining_Variables.re_syntax_email_ids, "",text).strip()
#print("remove_email_ids: Done")
text = " ".join(text.lower().strip().split())
#text = self.spelling_correction(text).strip()
return text
def simple_clean(self, text):
import string
import re
remove_punctuation = string.punctuation
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
tokens = [i.lemma_.lower().strip() for i in doc ]
tokens = [i for i in tokens if i not in self.stop_words_total and i not in remove_punctuation]
#tokens = " ".join(tokens)
#for i in remove_punctuation:
#if i in tokens:
#tokens =tokens.replace(i, "").replace(" ", "").strip()
return tokens
#--------------------------------------------------------------------------------------------
def Text_Basic_Pre_Processing(self,df, text_column_name = None,
count_stop_words = False,count_Non_stop_words= False,
count_sentences = False, count_numericals= False,
count_punctuations= False,
count_nouns = False,count_pronouns= False,
count_verbs= False,count_adverbs =False,
count_adjectives= False,count_auxliary = False,
spelling_correction = False,
twitter_messages = False,count_hash_tags= False,count_mentions= False,
get_email_ids = False, get_urls = False,
process_chat_abbrevations = False,
remove_most_repeated_rare_words= False ,
number_of_words_remov = 10):
print("Text Data Preprocessing for Text Analytics is started ................ :)\n")
df[text_column_name] = df[text_column_name].astype(str)
df['len_Acual_Message'] = df[text_column_name].apply(lambda X : len(str(X)))
df['Words_Count'] = df[text_column_name].apply(lambda X : self.count_words(str(X)))
print("Words_Count: Done")
df['Literal_Count'] = df[text_column_name].apply(lambda X : self.count_characters(str(X)))
print("Literal_Count: Done")
if count_sentences == True:
df['count_sentences'] = df[text_column_name].apply(lambda X : self.count_sentences_nltk(str(X)))
print("count_sentences: Done")
if count_punctuations == True:
#import spacy
#nlp = spacy.load("en_core_web_sm")
df['count_punctuations'] = df[text_column_name].apply(lambda X : \
(self.count_punctuations_2(str(X))))
print("count_punctuations: Done")
df['AVG_Chars_Count'] = df['Literal_Count'] / df['Words_Count']
df['AVG_Chars_Count'] = df['AVG_Chars_Count'].astype(np.int64)
print("AVG_Chars_Count: Done")
df[text_column_name] = df[text_column_name].apply(lambda X : self.remove_emojis_2(text = str(X)))
print("remove_emojis: Done")
df['Upper_case_Words_Count'] = df[text_column_name].apply(lambda X : self.count_upper_case_words(str(X)))
print("Upper_case_Words_Count: Done")
df['Get_Upper_case_Words'] = df[text_column_name].apply(lambda X : self.get_upper_case_words(str(X)))
print("get_upper_case_words: Done")
if get_email_ids == True:
df['email_ids'] = df[text_column_name].apply(lambda X : (self.get_email_ids(str(X))))
print("get_email_ids: Done")
if get_urls == True:
df['urls'] = df[text_column_name].apply(lambda X : (self.get_urls(str(X))))
print("get_urls: Done")
df['count_email_ids'] = df[text_column_name].apply(lambda X : len(self.get_email_ids(str(X))))
print("count_email_ids: Done")
df['count_urls'] = df[text_column_name].apply(lambda X : len(self.get_urls(str(X))))
print("count_urls: Done")
df[text_column_name] = df[text_column_name].apply(lambda X : (self.remove_email_ids(str(X))))
print("remove_email_ids: Done")
df[text_column_name] = df[text_column_name].apply(lambda X : (self.remove_urls(str(X))))
print("remove_urls: Done")
df[text_column_name] = df[text_column_name].apply(lambda X : (self.remove_html_tags(str(X))))
print("remove_html_tags: Done")
df[text_column_name] = df[text_column_name].apply(lambda X : self.remove_retweets(str(X)))
print("remove_retweets: Done")
if (twitter_messages ==True) and (count_hash_tags==True) :
df['count_hash_tags'] = df[text_column_name].apply(lambda X : self.count_hash_tags(str(X)))
print("count_hash_tags: Done")
if (twitter_messages ==True) and (count_mentions==True) :
df['count_mentions'] = df[text_column_name].apply(lambda X : self.count_mentions(str(X)))
print("count_mentions: Done")
df['Processed_Message'] = df[text_column_name].apply(lambda X : (str(X).lower()))
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : \
self.custom_contractions(str(X)))
print("custom_contractions: Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : (self.remove_spaces_repetition(str(X))))
print("remove_spaces_repetition: Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : (self.remove_accented_chars(str(X))))
print("remove_accented_chars: Done")
if process_chat_abbrevations ==True:
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : \
(self.get_actual_text_chat_abbrevations(text = str(X))))
print("get_actual_text_chat_abbrevations: Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.remove_punctuations(str(X)))
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.remove_punctuations_2(text = str(X)))
print("remove_punctuations: Done")
if spelling_correction == True:
from textblob import TextBlob
df['Processed_Message'] = df['Processed_Message'].apply(lambda X: str(TextBlob(X).correct()))
print("textblob_Spelling_correction: Done")
if count_stop_words == True:
df['count_stop_words'] = df['Processed_Message'].apply(lambda X : \
self.count_stop_words(text = str(X)))
print("count_stop_words: Done")
if count_Non_stop_words == True:
df['count_Non_stop_words'] = df['Processed_Message'].apply(lambda X : \
(self.count_Non_stop_words(str(X))))
print("count_Non_stop_words: Done")
if count_nouns ==True:
#import spacy
#nlp = spacy.load("en_core_web_sm")
df['count_nouns'] = df['Processed_Message'].apply(lambda X:\
self.count_nouns_nltk(text = X))
print("count_nouns: Done")
if count_pronouns ==True:
df['count_pronouns'] = df['Processed_Message'].apply(lambda X : \
self.count_pronouns_nltk(text = X))
print("count_pronouns: Done")
if count_verbs ==True:
df['count_verbs'] = df['Processed_Message'].apply(lambda X : \
self.count_verbs_nltk(text = X))
print("count_verbs: Done")
if count_adverbs ==True:
df['count_adverbs'] = df['Processed_Message'].apply(lambda X : \
self.count_adverbs_nltk(text = X))
print("count_adverbs: Done")
if count_adjectives ==True:
df['count_adjectives'] = df['Processed_Message'].apply(lambda X : \
self.count_adjectives_nltk(text = X))
print("count_adjectives: Done")
#if count_auxliary == True:
#df['count_auxliary'] = df['Processed_Message'].apply(lambda X : (self.count_auxliary_spacy(str(X))))
#print("count_auxliary: Done")
if count_numericals == True:
df['count_numericals'] = df['Processed_Message'].apply(self.count_numericals)
print("count_numericals: Done")
if count_numericals == True:
df['get_numericals'] = df['Processed_Message'].apply(self.get_numericals)
print("get_numericals: Done")
#from textblob import TextBlob
#df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.remove_text_garbage(text = str(X)))
#print("remove_text_garbage: Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.remove_stop_words(str(X)))
print("remove_stop_words: Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.nltk_snowball_stemmer(str(X)))
print("nltk_snowball_stemmer : Done")
df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.textblob_lemma(text = str(X)))
print("textblob_lemma: Done")
#df['Processed_Message'] = df['Processed_Message'].apply(lambda X : self.get_root_word(str(X)))
#print("get_root_word i.e. lemmatization: Done")
if remove_most_repeated_rare_words == True:
df = self.remove_most_repeated_rare_words(df, col='Processed_Message' , \
number_of_words_remov = number_of_words_remov)
print("remove_most_repeated_rare_words: Done, number of Top and Bottom words remove is: ",
number_of_words_remov)
df['len_Processed_Message'] = df['Processed_Message'].apply(lambda X : len(X))
print("length of Processed_Message is added to the main DataFrame : Done")
print("\nText Data Preprocessing for Text Analytics is Completed!!!!!!!!!!!!!! :)")
return df | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/abc.py | import string
from .core import Symbol, symbols
from .core.alphabets import greeks
# ##### Symbol definitions #####
a, b, c, d, e, f, g, h, i, j, k, l, m = symbols('a:m')
n, o, p, q, r, s, t, u, v, w, x, y, z = symbols('n:z')
A, B, C, D, E, F, G, H, I, J, K, L, M = symbols('A:M')
N, O, P, Q, R, S, T, U, V, W, X, Y, Z = symbols('N:Z')
alpha, beta, gamma, delta = symbols('alpha, beta, gamma, delta')
epsilon, zeta, eta, theta = symbols('epsilon, zeta, eta, theta')
iota, kappa, lamda, mu = symbols('iota, kappa, lamda, mu')
nu, xi, omicron, pi = symbols('nu, xi, omicron, pi')
rho, sigma, tau, upsilon = symbols('rho, sigma, tau, upsilon')
phi, chi, psi, omega = symbols('phi, chi, psi, omega')
# ##### Clashing-symbols diagnostics #####
# We want to know which names in Diofant collide with those in here.
# This is mostly for diagnosing Diofant's namespace during Diofant development.
_latin = list(string.ascii_letters)
# OSINEQ should not be imported as they clash; gamma, pi and zeta clash, too
_greek = list(greeks) # make a copy, so we can mutate it
# Note: We import lamda since lambda is a reserved keyword in Python
_greek.remove('lambda')
_greek.append('lamda')
def clashing():
"""Return the clashing-symbols dictionaries.
``clash1`` defines all the single letter variables that clash with
Diofant objects; ``clash2`` defines the multi-letter clashing symbols;
and ``clash`` is the union of both. These can be passed for ``locals``
during sympification if one desires Symbols rather than the non-Symbol
objects for those names.
Examples
========
>>> from diofant.abc import _clash, _clash1, _clash2
>>> sympify('Q & C', locals=_clash1)
And(C, Q)
>>> sympify('pi(x)', locals=_clash2)
pi(x)
>>> sympify('pi(C, Q)', locals=_clash)
pi(C, Q)
Note: if changes are made to the docstring examples they can only
be tested after removing "clashing" from the list of deleted items
at the bottom of this file which removes this function from the
namespace.
"""
ns = {}
exec('from diofant import *', ns) # pylint: disable=exec-used
clash1 = {}
clash2 = {}
while ns:
k, _ = ns.popitem()
if k in _greek:
clash2[k] = Symbol(k)
_greek.remove(k)
elif k in _latin:
clash1[k] = Symbol(k)
_latin.remove(k)
clash = {}
clash.update(clash1)
clash.update(clash2)
return clash1, clash2, clash
_clash1, _clash2, _clash = clashing()
del _latin, _greek, clashing, greeks, symbols, Symbol | PypiClean |
/Js2Py-0.74.tar.gz/Js2Py-0.74/js2py/constructors/jsobject.py | from ..base import *
import six
#todo Double check everything is OK
@Js
def Object():
val = arguments.get('0')
if val.is_null() or val.is_undefined():
return PyJsObject(prototype=ObjectPrototype)
return val.to_object()
@Js
def object_constructor():
if len(arguments):
val = arguments.get('0')
if val.TYPE == 'Object':
#Implementation dependent, but my will simply return :)
return val
elif val.TYPE in ('Number', 'String', 'Boolean'):
return val.to_object()
return PyJsObject(prototype=ObjectPrototype)
Object.create = object_constructor
Object.own['length']['value'] = Js(1)
class ObjectMethods:
def getPrototypeOf(obj):
if not obj.is_object():
raise MakeError('TypeError',
'Object.getPrototypeOf called on non-object')
return null if obj.prototype is None else obj.prototype
def getOwnPropertyDescriptor(obj, prop):
if not obj.is_object():
raise MakeError(
'TypeError',
'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.get(
prop.to_string().
value) # will return undefined if we dont have this prop
def getOwnPropertyNames(obj):
if not obj.is_object():
raise MakeError(
'TypeError',
'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.keys()
def create(obj):
if not (obj.is_object() or obj.is_null()):
raise MakeError('TypeError',
'Object prototype may only be an Object or null')
temp = PyJsObject(prototype=(None if obj.is_null() else obj))
if len(arguments) > 1 and not arguments[1].is_undefined():
if six.PY2:
ObjectMethods.defineProperties.__func__(temp, arguments[1])
else:
ObjectMethods.defineProperties(temp, arguments[1])
return temp
def defineProperty(obj, prop, attrs):
if not obj.is_object():
raise MakeError('TypeError',
'Object.defineProperty called on non-object')
name = prop.to_string().value
if not obj.define_own_property(name, ToPropertyDescriptor(attrs)):
raise MakeError('TypeError', 'Cannot redefine property: %s' % name)
return obj
def defineProperties(obj, properties):
if not obj.is_object():
raise MakeError('TypeError',
'Object.defineProperties called on non-object')
props = properties.to_object()
for name in props:
desc = ToPropertyDescriptor(props.get(name.value))
if not obj.define_own_property(name.value, desc):
raise MakeError(
'TypeError',
'Failed to define own property: %s' % name.value)
return obj
def seal(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.seal called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
obj.extensible = False
return obj
def freeze(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.freeze called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
if is_data_descriptor(desc):
desc['writable'] = False
obj.extensible = False
return obj
def preventExtensions(obj):
if not obj.is_object():
raise MakeError('TypeError',
'Object.preventExtensions on non-object')
obj.extensible = False
return obj
def isSealed(obj):
if not obj.is_object():
raise MakeError('TypeError',
'Object.isSealed called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
return True
def isFrozen(obj):
if not obj.is_object():
raise MakeError('TypeError',
'Object.isFrozen called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
if is_data_descriptor(desc) and desc['writable']:
return False
return True
def isExtensible(obj):
if not obj.is_object():
raise MakeError('TypeError',
'Object.isExtensible called on non-object')
return obj.extensible
def keys(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.keys called on non-object')
return [e for e, d in six.iteritems(obj.own) if d.get('enumerable')]
# add methods attached to Object constructor
fill_prototype(Object, ObjectMethods, default_attrs)
# add constructor to prototype
fill_in_props(ObjectPrototype, {'constructor': Object}, default_attrs)
# add prototype property to the constructor.
Object.define_own_property(
'prototype', {
'value': ObjectPrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
# some utility functions:
def ToPropertyDescriptor(obj): # page 38 (50 absolute)
if obj.TYPE != 'Object':
raise MakeError('TypeError',
'Can\'t convert non-object to property descriptor')
desc = {}
if obj.has_property('enumerable'):
desc['enumerable'] = obj.get('enumerable').to_boolean().value
if obj.has_property('configurable'):
desc['configurable'] = obj.get('configurable').to_boolean().value
if obj.has_property('value'):
desc['value'] = obj.get('value')
if obj.has_property('writable'):
desc['writable'] = obj.get('writable').to_boolean().value
if obj.has_property('get'):
cand = obj.get('get')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError(
'TypeError',
'Invalid getter (it has to be a function or undefined)')
desc['get'] = cand
if obj.has_property('set'):
cand = obj.get('set')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError(
'TypeError',
'Invalid setter (it has to be a function or undefined)')
desc['set'] = cand
if ('get' in desc or 'set' in desc) and ('value' in desc
or 'writable' in desc):
raise MakeError(
'TypeError',
'Invalid property. A property cannot both have accessors and be writable or have a value.'
)
return desc | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/extensions/TeX/color.js | MathJax.Extension["TeX/color"]={version:"2.7.9",config:MathJax.Hub.CombineConfig("TeX.color",{padding:"5px",border:"2px"}),colors:{Apricot:"#FBB982",Aquamarine:"#00B5BE",Bittersweet:"#C04F17",Black:"#221E1F",Blue:"#2D2F92",BlueGreen:"#00B3B8",BlueViolet:"#473992",BrickRed:"#B6321C",Brown:"#792500",BurntOrange:"#F7921D",CadetBlue:"#74729A",CarnationPink:"#F282B4",Cerulean:"#00A2E3",CornflowerBlue:"#41B0E4",Cyan:"#00AEEF",Dandelion:"#FDBC42",DarkOrchid:"#A4538A",Emerald:"#00A99D",ForestGreen:"#009B55",Fuchsia:"#8C368C",Goldenrod:"#FFDF42",Gray:"#949698",Green:"#00A64F",GreenYellow:"#DFE674",JungleGreen:"#00A99A",Lavender:"#F49EC4",LimeGreen:"#8DC73E",Magenta:"#EC008C",Mahogany:"#A9341F",Maroon:"#AF3235",Melon:"#F89E7B",MidnightBlue:"#006795",Mulberry:"#A93C93",NavyBlue:"#006EB8",OliveGreen:"#3C8031",Orange:"#F58137",OrangeRed:"#ED135A",Orchid:"#AF72B0",Peach:"#F7965A",Periwinkle:"#7977B8",PineGreen:"#008B72",Plum:"#92268F",ProcessBlue:"#00B0F0",Purple:"#99479B",RawSienna:"#974006",Red:"#ED1B23",RedOrange:"#F26035",RedViolet:"#A1246B",Rhodamine:"#EF559F",RoyalBlue:"#0071BC",RoyalPurple:"#613F99",RubineRed:"#ED017D",Salmon:"#F69289",SeaGreen:"#3FBC9D",Sepia:"#671800",SkyBlue:"#46C5DD",SpringGreen:"#C6DC67",Tan:"#DA9D76",TealBlue:"#00AEB3",Thistle:"#D883B7",Turquoise:"#00B4CE",Violet:"#58429B",VioletRed:"#EF58A0",White:"#FFFFFF",WildStrawberry:"#EE2967",Yellow:"#FFF200",YellowGreen:"#98CC70",YellowOrange:"#FAA21A"},getColor:function(a,c){if(!a){a="named"}var b=this["get_"+a];if(!b){this.TEX.Error(["UndefinedColorModel","Color model '%1' not defined",a])}return b.call(this,c)},get_rgb:function(b){b=b.replace(/^\s+/,"").replace(/\s+$/,"").split(/\s*,\s*/);var a="#";if(b.length!==3){this.TEX.Error(["ModelArg1","Color values for the %1 model require 3 numbers","rgb"])}for(var c=0;c<3;c++){if(!b[c].match(/^(\d+(\.\d*)?|\.\d+)$/)){this.TEX.Error(["InvalidDecimalNumber","Invalid decimal number"])}var d=parseFloat(b[c]);if(d<0||d>1){this.TEX.Error(["ModelArg2","Color values for the %1 model must be between %2 and %3","rgb",0,1])}d=Math.floor(d*255).toString(16);if(d.length<2){d="0"+d}a+=d}return a},get_RGB:function(b){b=b.replace(/^\s+/,"").replace(/\s+$/,"").split(/\s*,\s*/);var a="#";if(b.length!==3){this.TEX.Error(["ModelArg1","Color values for the %1 model require 3 numbers","RGB"])}for(var c=0;c<3;c++){if(!b[c].match(/^\d+$/)){this.TEX.Error(["InvalidNumber","Invalid number"])}var d=parseInt(b[c]);if(d>255){this.TEX.Error(["ModelArg2","Color values for the %1 model must be between %2 and %3","RGB",0,255])}d=d.toString(16);if(d.length<2){d="0"+d}a+=d}return a},get_gray:function(a){if(!a.match(/^\s*(\d+(\.\d*)?|\.\d+)\s*$/)){this.TEX.Error(["InvalidDecimalNumber","Invalid decimal number"])}var b=parseFloat(a);if(b<0||b>1){this.TEX.Error(["ModelArg2","Color values for the %1 model must be between %2 and %3","gray",0,1])}b=Math.floor(b*255).toString(16);if(b.length<2){b="0"+b}return"#"+b+b+b},get_named:function(a){if(this.colors.hasOwnProperty(a)){return this.colors[a]}return a},padding:function(){var c="+"+this.config.padding;var a=this.config.padding.replace(/^.*?([a-z]*)$/,"$1");var b="+"+(2*parseFloat(c))+a;return{width:b,height:c,depth:c,lspace:this.config.padding}}};MathJax.Hub.Register.StartupHook("TeX Jax Ready",function(){var d=MathJax.InputJax.TeX,a=MathJax.ElementJax.mml;var c=d.Stack.Item;var b=MathJax.Extension["TeX/color"];b.TEX=d;d.Definitions.Add({macros:{color:"Color",textcolor:"TextColor",definecolor:"DefineColor",colorbox:"ColorBox",fcolorbox:"fColorBox"}},null,true);d.Parse.Augment({Color:function(h){var g=this.GetBrackets(h),e=this.GetArgument(h);e=b.getColor(g,e);var f=c.style().With({styles:{mathcolor:e}});this.stack.env.color=e;this.Push(f)},TextColor:function(h){var g=this.GetBrackets(h),f=this.GetArgument(h);f=b.getColor(g,f);var e=this.stack.env.color;this.stack.env.color=f;var i=this.ParseArg(h);if(e){this.stack.env.color}else{delete this.stack.env.color}this.Push(a.mstyle(i).With({mathcolor:f}))},DefineColor:function(g){var f=this.GetArgument(g),e=this.GetArgument(g),h=this.GetArgument(g);b.colors[f]=b.getColor(e,h)},ColorBox:function(g){var f=this.GetArgument(g),e=this.InternalMath(this.GetArgument(g));this.Push(a.mpadded.apply(a,e).With({mathbackground:b.getColor("named",f)}).With(b.padding()))},fColorBox:function(g){var h=this.GetArgument(g),f=this.GetArgument(g),e=this.InternalMath(this.GetArgument(g));this.Push(a.mpadded.apply(a,e).With({mathbackground:b.getColor("named",f),style:"border: "+b.config.border+" solid "+b.getColor("named",h)}).With(b.padding()))}});MathJax.Hub.Startup.signal.Post("TeX color Ready")});MathJax.Ajax.loadComplete("[MathJax]/extensions/TeX/color.js"); | PypiClean |
/Heterogeneous_Highway_Env-0.0.3-py3-none-any.whl/Heterogeneous_Highway_Env/vehicle/kinematics.py | from typing import Union, Optional, Tuple, List
import numpy as np
import copy
from collections import deque
from highway_env import utils
from highway_env.road.road import Road, LaneIndex
from highway_env.vehicle.objects import RoadObject, Obstacle, Landmark
from highway_env.utils import Vector
class Vehicle(RoadObject):
"""
A moving vehicle on a road, and its kinematics.
The vehicle is represented by a dynamical system: a modified bicycle model.
It's state is propagated depending on its steering and acceleration actions.
"""
LENGTH = 5.0
""" Vehicle length [m] """
WIDTH = 2.0
""" Vehicle width [m] """
DEFAULT_INITIAL_SPEEDS = [23, 25]
""" Range for random initial speeds [m/s] """
MAX_SPEED = 40.
""" Maximum reachable speed [m/s] """
MIN_SPEED = -40.
""" Minimum reachable speed [m/s] """
HISTORY_SIZE = 30
""" Length of the vehicle state history, for trajectory display"""
def __init__(self,
road: Road,
position: Vector,
heading: float = 0,
speed: float = 0,
predition_type: str = 'constant_steering'):
super().__init__(road, position, heading, speed)
self.prediction_type = predition_type
self.action = {'steering': 0, 'acceleration': 0}
self.crashed = False
self.impact = None
self.log = []
self.history = deque(maxlen=self.HISTORY_SIZE)
self.vehicle_id = None
@classmethod
def create_random(cls, road: Road,
speed: float = None,
lane_from: Optional[str] = None,
lane_to: Optional[str] = None,
lane_id: Optional[int] = None,
spacing: float = 1) \
-> "Vehicle":
"""
Create a random vehicle on the road.
The lane and /or speed are chosen randomly, while longitudinal position is chosen behind the last
vehicle in the road with density based on the number of lanes.
:param road: the road where the vehicle is driving
:param speed: initial speed in [m/s]. If None, will be chosen randomly
:param lane_from: start node of the lane to spawn in
:param lane_to: end node of the lane to spawn in
:param lane_id: id of the lane to spawn in
:param spacing: ratio of spacing to the front vehicle, 1 being the default
:return: A vehicle with random position and/or speed
"""
_from = lane_from or road.np_random.choice(list(road.network.graph.keys()))
_to = lane_to or road.np_random.choice(list(road.network.graph[_from].keys()))
_id = lane_id if lane_id is not None else road.np_random.choice(len(road.network.graph[_from][_to]))
lane = road.network.get_lane((_from, _to, _id))
if speed is None:
if lane.speed_limit is not None:
speed = road.np_random.uniform(0.7*lane.speed_limit, 0.8*lane.speed_limit)
else:
speed = road.np_random.uniform(Vehicle.DEFAULT_INITIAL_SPEEDS[0], Vehicle.DEFAULT_INITIAL_SPEEDS[1])
default_spacing = 12+1.0*speed
offset = spacing * default_spacing * np.exp(-5 / 40 * len(road.network.graph[_from][_to]))
x0 = np.max([lane.local_coordinates(v.position)[0] for v in road.vehicles]) \
if len(road.vehicles) else 3*offset
x0 += offset * road.np_random.uniform(0.9, 1.1)
v = cls(road, lane.position(x0, 0), lane.heading_at(x0), speed)
return v
@classmethod
def create_from(cls, vehicle: "Vehicle") -> "Vehicle":
"""
Create a new vehicle from an existing one.
Only the vehicle dynamics are copied, other properties are default.
:param vehicle: a vehicle
:return: a new vehicle at the same dynamical state
"""
v = cls(vehicle.road, vehicle.position, vehicle.heading, vehicle.speed)
return v
def act(self, action: Union[dict, str] = None) -> None:
"""
Store an action to be repeated.
:param action: the input action
"""
if action:
self.action = action
def step(self, dt: float) -> None:
"""
Propagate the vehicle state given its actions.
Integrate a modified bicycle model with a 1st-order response on the steering wheel dynamics.
If the vehicle is crashed, the actions are overridden with erratic steering and braking until complete stop.
The vehicle's current lane is updated.
:param dt: timestep of integration of the model [s]
"""
self.clip_actions()
delta_f = self.action['steering']
beta = np.arctan(1 / 2 * np.tan(delta_f))
v = self.speed * np.array([np.cos(self.heading + beta),
np.sin(self.heading + beta)])
self.position += v * dt
if self.impact is not None:
self.position += self.impact
self.crashed = True
self.impact = None
self.heading += self.speed * np.sin(beta) / (self.LENGTH / 2) * dt
self.speed += self.action['acceleration'] * dt
self.on_state_update()
def clip_actions(self) -> None:
if self.crashed:
self.action['steering'] = 0
self.action['acceleration'] = -1.0*self.speed
self.action['steering'] = float(self.action['steering'])
self.action['acceleration'] = float(self.action['acceleration'])
if self.speed > self.MAX_SPEED:
self.action['acceleration'] = min(self.action['acceleration'], 1.0 * (self.MAX_SPEED - self.speed))
elif self.speed < self.MIN_SPEED:
self.action['acceleration'] = max(self.action['acceleration'], 1.0 * (self.MIN_SPEED - self.speed))
def on_state_update(self) -> None:
if self.road:
self.lane_index = self.road.network.get_closest_lane_index(self.position, self.heading)
self.lane = self.road.network.get_lane(self.lane_index)
if self.road.record_history:
self.history.appendleft(self.create_from(self))
def predict_trajectory_constant_speed(self, times: np.ndarray) -> Tuple[List[np.ndarray], List[float]]:
if self.prediction_type == 'zero_steering':
action = {'acceleration': 0.0, 'steering': 0.0}
elif self.prediction_type == 'constant_steering':
action = {'acceleration': 0.0, 'steering': self.action['steering']}
else:
raise ValueError("Unknown predition type")
dt = np.diff(np.concatenate(([0.0], times)))
positions = []
headings = []
v = copy.deepcopy(self)
v.act(action)
for t in dt:
v.step(t)
positions.append(v.position.copy())
headings.append(v.heading)
return (positions, headings)
@property
def velocity(self) -> np.ndarray:
return self.speed * self.direction # TODO: slip angle beta should be used here
@property
def destination(self) -> np.ndarray:
if getattr(self, "route", None):
last_lane_index = self.route[-1]
last_lane_index = last_lane_index if last_lane_index[-1] is not None else (*last_lane_index[:-1], 0)
last_lane = self.road.network.get_lane(last_lane_index)
return last_lane.position(last_lane.length, 0)
else:
return self.position
@property
def destination_direction(self) -> np.ndarray:
if (self.destination != self.position).any():
return (self.destination - self.position) / np.linalg.norm(self.destination - self.position)
else:
return np.zeros((2,))
@property
def lane_offset(self) -> np.ndarray:
if self.lane is not None:
long, lat = self.lane.local_coordinates(self.position)
ang = self.lane.local_angle(self.heading, long)
return np.array([long, lat, ang])
else:
return np.zeros((3,))
def to_dict(self, origin_vehicle: "Vehicle" = None, observe_intentions: bool = True) -> dict:
d = {
"id": self.vehicle_id,
'presence': 1,
'x': self.position[0],
'y': self.position[1],
'vx': self.velocity[0],
'vy': self.velocity[1],
'heading': self.heading,
'cos_h': self.direction[0],
'sin_h': self.direction[1],
'cos_d': self.destination_direction[0],
'sin_d': self.destination_direction[1],
'long_off': self.lane_offset[0],
'lat_off': self.lane_offset[1],
'ang_off': self.lane_offset[2],
}
if not observe_intentions:
d["cos_d"] = d["sin_d"] = 0
if origin_vehicle:
origin_dict = origin_vehicle.to_dict()
for key in ['x', 'y', 'vx', 'vy']:
d[key] -= origin_dict[key]
return d
def __str__(self):
return "{} #{}: {}".format(self.__class__.__name__, id(self) % 1000, self.position)
def __repr__(self):
return self.__str__() | PypiClean |
/BenchExec-3.17.tar.gz/BenchExec-3.17/benchexec/filehierarchylimit.py |
import logging
import os
import threading
import time
from benchexec import container
from benchexec import util
_CHECK_INTERVAL_SECONDS = 60
_DURATION_WARNING_THRESHOLD = 1
class FileHierarchyLimitThread(threading.Thread):
"""
Thread that periodically checks whether a given file hierarchy exceeds some limits.
After this happens, the process is terminated.
"""
def __init__(
self,
path,
files_count_limit,
files_size_limit,
pid_to_kill,
callbackFn=lambda reason: None,
):
super(FileHierarchyLimitThread, self).__init__()
self.name = "FileHierarchyLimitThread-" + self.name
assert os.path.isdir(path)
self._path = path
self._files_count_limit = files_count_limit
self._files_size_limit = files_size_limit
self._pid_to_kill = pid_to_kill
self._callback = callbackFn
self._finished = threading.Event()
def _check_limit(self, files_count, files_size):
if self._files_count_limit and files_count > self._files_count_limit:
reason = "files-count"
elif self._files_size_limit and files_size > self._files_size_limit:
reason = "files-size"
else:
return None
self._callback(reason)
logging.debug(
"Killing process %d due to %s limit (%d files with %d bytes).",
self._pid_to_kill,
reason,
files_count,
files_size,
)
util.kill_process(self._pid_to_kill)
return reason
def run(self):
while not self._finished.is_set():
self._finished.wait(_CHECK_INTERVAL_SECONDS)
files_count = 0
files_size = 0
start_time = time.monotonic()
for current_dir, _dirs, files in os.walk(self._path):
for file in files:
abs_file = os.path.join(current_dir, file)
file = "/" + os.path.relpath(file, self._path)
# file has now the path as visible for tool
if (
not container.is_container_system_config_file(file)
and os.path.isfile(abs_file)
and not os.path.islink(abs_file)
):
files_count += 1
if self._files_size_limit:
try:
files_size += os.path.getsize(abs_file)
except OSError:
# possibly just deleted
pass
if self._check_limit(files_count, files_size):
return
duration = time.monotonic() - start_time
logging.debug(
"FileHierarchyLimitThread for process %d: "
"files count: %d, files size: %d, scan duration %fs",
self._pid_to_kill,
files_count,
files_size,
duration,
)
if duration > _DURATION_WARNING_THRESHOLD:
logging.warning(
"Scanning file hierarchy for enforcement of limits took %ds.",
duration,
)
def cancel(self):
self._finished.set() | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Action.py | import os
import pickle
import re
import sys
import subprocess
from subprocess import DEVNULL
import inspect
from collections import OrderedDict
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Util
import SCons.Subst
# we use these a lot, so try to optimize them
from SCons.Util import is_String, is_List
class _null:
pass
print_actions = True
execute_actions = True
print_actions_presub = False
# Use pickle protocol 1 when pickling functions for signature
# otherwise python3 and python2 will yield different pickles
# for the same object.
# This is due to default being 1 for python 2.7, and 3 for 3.x
# TODO: We can roll this forward to 2 (if it has value), but not
# before a deprecation cycle as the sconsigns will change
ACTION_SIGNATURE_PICKLE_PROTOCOL = 1
def rfile(n):
try:
return n.rfile()
except AttributeError:
return n
def default_exitstatfunc(s):
return s
strip_quotes = re.compile(r'^[\'"](.*)[\'"]$')
def _callable_contents(obj):
"""Return the signature contents of a callable Python object.
"""
try:
# Test if obj is a method.
return _function_contents(obj.__func__)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.__func__)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
# Test if obj is a function object.
return _function_contents(obj)
def _object_contents(obj):
"""Return the signature contents of any Python object.
We have to handle the case where object contains a code object
since it can be pickled directly.
"""
try:
# Test if obj is a method.
return _function_contents(obj.__func__)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.__func__)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
try:
# Test if obj is a function object.
return _function_contents(obj)
except AttributeError as ae:
# Should be a pickle-able Python object.
try:
return _object_instance_content(obj)
# pickling an Action instance or object doesn't yield a stable
# content as instance property may be dumped in different orders
# return pickle.dumps(obj, ACTION_SIGNATURE_PICKLE_PROTOCOL)
except (pickle.PicklingError, TypeError, AttributeError) as ex:
# This is weird, but it seems that nested classes
# are unpickable. The Python docs say it should
# always be a PicklingError, but some Python
# versions seem to return TypeError. Just do
# the best we can.
return bytearray(repr(obj), 'utf-8')
def _code_contents(code, docstring=None):
r"""Return the signature contents of a code object.
By providing direct access to the code object of the
function, Python makes this extremely easy. Hooray!
Unfortunately, older versions of Python include line
number indications in the compiled byte code. Boo!
So we remove the line number byte codes to prevent
recompilations from moving a Python function.
See:
- https://docs.python.org/2/library/inspect.html
- http://python-reference.readthedocs.io/en/latest/docs/code/index.html
For info on what each co\_ variable provides
The signature is as follows (should be byte/chars):
co_argcount, len(co_varnames), len(co_cellvars), len(co_freevars),
( comma separated signature for each object in co_consts ),
( comma separated signature for each object in co_names ),
( The bytecode with line number bytecodes removed from co_code )
co_argcount - Returns the number of positional arguments (including arguments with default values).
co_varnames - Returns a tuple containing the names of the local variables (starting with the argument names).
co_cellvars - Returns a tuple containing the names of local variables that are referenced by nested functions.
co_freevars - Returns a tuple containing the names of free variables. (?)
co_consts - Returns a tuple containing the literals used by the bytecode.
co_names - Returns a tuple containing the names used by the bytecode.
co_code - Returns a string representing the sequence of bytecode instructions.
"""
# contents = []
# The code contents depends on the number of local variables
# but not their actual names.
contents = bytearray("{}, {}".format(code.co_argcount, len(code.co_varnames)), 'utf-8')
contents.extend(b", ")
contents.extend(bytearray(str(len(code.co_cellvars)), 'utf-8'))
contents.extend(b", ")
contents.extend(bytearray(str(len(code.co_freevars)), 'utf-8'))
# The code contents depends on any constants accessed by the
# function. Note that we have to call _object_contents on each
# constants because the code object of nested functions can
# show-up among the constants.
z = [_object_contents(cc) for cc in code.co_consts if cc != docstring]
contents.extend(b',(')
contents.extend(bytearray(',', 'utf-8').join(z))
contents.extend(b')')
# The code contents depends on the variable names used to
# accessed global variable, as changing the variable name changes
# the variable actually accessed and therefore changes the
# function result.
z= [bytearray(_object_contents(cc)) for cc in code.co_names]
contents.extend(b',(')
contents.extend(bytearray(',','utf-8').join(z))
contents.extend(b')')
# The code contents depends on its actual code!!!
contents.extend(b',(')
contents.extend(code.co_code)
contents.extend(b')')
return contents
def _function_contents(func):
"""
The signature is as follows (should be byte/chars):
< _code_contents (see above) from func.__code__ >
,( comma separated _object_contents for function argument defaults)
,( comma separated _object_contents for any closure contents )
See also: https://docs.python.org/3/reference/datamodel.html
- func.__code__ - The code object representing the compiled function body.
- func.__defaults__ - A tuple containing default argument values for those arguments that have defaults, or None if no arguments have a default value
- func.__closure__ - None or a tuple of cells that contain bindings for the function's free variables.
:Returns:
Signature contents of a function. (in bytes)
"""
contents = [_code_contents(func.__code__, func.__doc__)]
# The function contents depends on the value of defaults arguments
if func.__defaults__:
function_defaults_contents = [_object_contents(cc) for cc in func.__defaults__]
defaults = bytearray(b',(')
defaults.extend(bytearray(b',').join(function_defaults_contents))
defaults.extend(b')')
contents.append(defaults)
else:
contents.append(b',()')
# The function contents depends on the closure captured cell values.
closure = func.__closure__ or []
try:
closure_contents = [_object_contents(x.cell_contents) for x in closure]
except AttributeError:
closure_contents = []
contents.append(b',(')
contents.append(bytearray(b',').join(closure_contents))
contents.append(b')')
retval = bytearray(b'').join(contents)
return retval
def _object_instance_content(obj):
"""
Returns consistant content for a action class or an instance thereof
:Parameters:
- `obj` Should be either and action class or an instance thereof
:Returns:
bytearray or bytes representing the obj suitable for generating a signature from.
"""
retval = bytearray()
if obj is None:
return b'N.'
if isinstance(obj, SCons.Util.BaseStringTypes):
return SCons.Util.to_bytes(obj)
inst_class = obj.__class__
inst_class_name = bytearray(obj.__class__.__name__,'utf-8')
inst_class_module = bytearray(obj.__class__.__module__,'utf-8')
inst_class_hierarchy = bytearray(repr(inspect.getclasstree([obj.__class__,])),'utf-8')
# print("ICH:%s : %s"%(inst_class_hierarchy, repr(obj)))
properties = [(p, getattr(obj, p, "None")) for p in dir(obj) if not (p[:2] == '__' or inspect.ismethod(getattr(obj, p)) or inspect.isbuiltin(getattr(obj,p))) ]
properties.sort()
properties_str = ','.join(["%s=%s"%(p[0],p[1]) for p in properties])
properties_bytes = bytearray(properties_str,'utf-8')
methods = [p for p in dir(obj) if inspect.ismethod(getattr(obj, p))]
methods.sort()
method_contents = []
for m in methods:
# print("Method:%s"%m)
v = _function_contents(getattr(obj, m))
# print("[%s->]V:%s [%s]"%(m,v,type(v)))
method_contents.append(v)
retval = bytearray(b'{')
retval.extend(inst_class_name)
retval.extend(b":")
retval.extend(inst_class_module)
retval.extend(b'}[[')
retval.extend(inst_class_hierarchy)
retval.extend(b']]{{')
retval.extend(bytearray(b",").join(method_contents))
retval.extend(b"}}{{{")
retval.extend(properties_bytes)
retval.extend(b'}}}')
return retval
# print("class :%s"%inst_class)
# print("class_name :%s"%inst_class_name)
# print("class_module :%s"%inst_class_module)
# print("Class hier :\n%s"%pp.pformat(inst_class_hierarchy))
# print("Inst Properties:\n%s"%pp.pformat(properties))
# print("Inst Methods :\n%s"%pp.pformat(methods))
def _actionAppend(act1, act2):
# This function knows how to slap two actions together.
# Mainly, it handles ListActions by concatenating into
# a single ListAction.
a1 = Action(act1)
a2 = Action(act2)
if a1 is None:
return a2
if a2 is None:
return a1
if isinstance(a1, ListAction):
if isinstance(a2, ListAction):
return ListAction(a1.list + a2.list)
else:
return ListAction(a1.list + [ a2 ])
else:
if isinstance(a2, ListAction):
return ListAction([ a1 ] + a2.list)
else:
return ListAction([ a1, a2 ])
def _do_create_keywords(args, kw):
"""This converts any arguments after the action argument into
their equivalent keywords and adds them to the kw argument.
"""
v = kw.get('varlist', ())
# prevent varlist="FOO" from being interpreted as ['F', 'O', 'O']
if is_String(v): v = (v,)
kw['varlist'] = tuple(v)
if args:
# turn positional args into equivalent keywords
cmdstrfunc = args[0]
if cmdstrfunc is None or is_String(cmdstrfunc):
kw['cmdstr'] = cmdstrfunc
elif callable(cmdstrfunc):
kw['strfunction'] = cmdstrfunc
else:
raise SCons.Errors.UserError(
'Invalid command display variable type. '
'You must either pass a string or a callback which '
'accepts (target, source, env) as parameters.')
if len(args) > 1:
kw['varlist'] = tuple(SCons.Util.flatten(args[1:])) + kw['varlist']
if kw.get('strfunction', _null) is not _null \
and kw.get('cmdstr', _null) is not _null:
raise SCons.Errors.UserError(
'Cannot have both strfunction and cmdstr args to Action()')
def _do_create_action(act, kw):
"""This is the actual "implementation" for the
Action factory method, below. This handles the
fact that passing lists to Action() itself has
different semantics than passing lists as elements
of lists.
The former will create a ListAction, the latter
will create a CommandAction by converting the inner
list elements to strings."""
if isinstance(act, ActionBase):
return act
if is_String(act):
var=SCons.Util.get_environment_var(act)
if var:
# This looks like a string that is purely an Environment
# variable reference, like "$FOO" or "${FOO}". We do
# something special here...we lazily evaluate the contents
# of that Environment variable, so a user could put something
# like a function or a CommandGenerator in that variable
# instead of a string.
return LazyAction(var, kw)
commands = str(act).split('\n')
if len(commands) == 1:
return CommandAction(commands[0], **kw)
# The list of string commands may include a LazyAction, so we
# reprocess them via _do_create_list_action.
return _do_create_list_action(commands, kw)
if is_List(act):
return CommandAction(act, **kw)
if callable(act):
try:
gen = kw['generator']
del kw['generator']
except KeyError:
gen = 0
if gen:
action_type = CommandGeneratorAction
else:
action_type = FunctionAction
return action_type(act, kw)
# Catch a common error case with a nice message:
if isinstance(act, int) or isinstance(act, float):
raise TypeError("Don't know how to create an Action from a number (%s)"%act)
# Else fail silently (???)
return None
def _do_create_list_action(act, kw):
"""A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction."""
acts = []
for a in act:
aa = _do_create_action(a, kw)
if aa is not None: acts.append(aa)
if not acts:
return ListAction([])
elif len(acts) == 1:
return acts[0]
else:
return ListAction(acts)
def Action(act, *args, **kw):
"""A factory for action objects."""
# Really simple: the _do_create_* routines do the heavy lifting.
_do_create_keywords(args, kw)
if is_List(act):
return _do_create_list_action(act, kw)
return _do_create_action(act, kw)
class ActionBase:
"""Base class for all types of action objects that can be held by
other objects (Builders, Executors, etc.) This provides the
common methods for manipulating and combining those actions."""
def __eq__(self, other):
return self.__dict__ == other
def no_batch_key(self, env, target, source):
return None
batch_key = no_batch_key
def genstring(self, target, source, env):
return str(self)
def get_contents(self, target, source, env):
result = self.get_presig(target, source, env)
if not isinstance(result,(bytes, bytearray)):
result = bytearray(result, 'utf-8')
else:
# Make a copy and put in bytearray, without this the contents returned by get_presig
# can be changed by the logic below, appending with each call and causing very
# hard to track down issues...
result = bytearray(result)
# At this point everything should be a bytearray
# This should never happen, as the Action() factory should wrap
# the varlist, but just in case an action is created directly,
# we duplicate this check here.
vl = self.get_varlist(target, source, env)
if is_String(vl): vl = (vl,)
for v in vl:
# do the subst this way to ignore $(...$) parts:
if isinstance(result, bytearray):
result.extend(SCons.Util.to_bytes(env.subst_target_source('${'+v+'}', SCons.Subst.SUBST_SIG, target, source)))
else:
raise Exception("WE SHOULD NEVER GET HERE result should be bytearray not:%s"%type(result))
# result.append(SCons.Util.to_bytes(env.subst_target_source('${'+v+'}', SCons.Subst.SUBST_SIG, target, source)))
if isinstance(result, (bytes,bytearray)):
return result
else:
raise Exception("WE SHOULD NEVER GET HERE - #2 result should be bytearray not:%s" % type(result))
# return b''.join(result)
def __add__(self, other):
return _actionAppend(self, other)
def __radd__(self, other):
return _actionAppend(other, self)
def presub_lines(self, env):
# CommandGeneratorAction needs a real environment
# in order to return the proper string here, since
# it may call LazyAction, which looks up a key
# in that env. So we temporarily remember the env here,
# and CommandGeneratorAction will use this env
# when it calls its _generate method.
self.presub_env = env
lines = str(self).split('\n')
self.presub_env = None # don't need this any more
return lines
def get_varlist(self, target, source, env, executor=None):
return self.varlist
def get_targets(self, env, executor):
"""
Returns the type of targets ($TARGETS, $CHANGED_TARGETS) used
by this action.
"""
return self.targets
class _ActionAction(ActionBase):
"""Base class for actions that create output objects."""
def __init__(self, cmdstr=_null, strfunction=_null, varlist=(),
presub=_null, chdir=None, exitstatfunc=None,
batch_key=None, targets='$TARGETS',
**kw):
self.cmdstr = cmdstr
if strfunction is not _null:
if strfunction is None:
self.cmdstr = None
else:
self.strfunction = strfunction
self.varlist = varlist
self.presub = presub
self.chdir = chdir
if not exitstatfunc:
exitstatfunc = default_exitstatfunc
self.exitstatfunc = exitstatfunc
self.targets = targets
if batch_key:
if not callable(batch_key):
# They have set batch_key, but not to their own
# callable. The default behavior here will batch
# *all* targets+sources using this action, separated
# for each construction environment.
def default_batch_key(self, env, target, source):
return (id(self), id(env))
batch_key = default_batch_key
SCons.Util.AddMethod(self, batch_key, 'batch_key')
def print_cmd_line(self, s, target, source, env):
"""
In python 3, and in some of our tests, sys.stdout is
a String io object, and it takes unicode strings only
This code assumes s is a regular string.
"""
sys.stdout.write(s + "\n")
def __call__(self, target, source, env,
exitstatfunc=_null,
presub=_null,
show=_null,
execute=_null,
chdir=_null,
executor=None):
if not is_List(target):
target = [target]
if not is_List(source):
source = [source]
if presub is _null:
presub = self.presub
if presub is _null:
presub = print_actions_presub
if exitstatfunc is _null:
exitstatfunc = self.exitstatfunc
if show is _null:
show = print_actions
if execute is _null:
execute = execute_actions
if chdir is _null:
chdir = self.chdir
save_cwd = None
if chdir:
save_cwd = os.getcwd()
try:
chdir = str(chdir.get_abspath())
except AttributeError:
if not is_String(chdir):
if executor:
chdir = str(executor.batches[0].targets[0].dir)
else:
chdir = str(target[0].dir)
if presub:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
t = ' and '.join(map(str, target))
l = '\n '.join(self.presub_lines(env))
out = "Building %s with action:\n %s\n" % (t, l)
sys.stdout.write(out)
cmd = None
if show and self.strfunction:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
try:
cmd = self.strfunction(target, source, env, executor)
except TypeError:
cmd = self.strfunction(target, source, env)
if cmd:
if chdir:
cmd = ('os.chdir(%s)\n' % repr(chdir)) + cmd
try:
get = env.get
except AttributeError:
print_func = self.print_cmd_line
else:
print_func = get('PRINT_CMD_LINE_FUNC')
if not print_func:
print_func = self.print_cmd_line
print_func(cmd, target, source, env)
stat = 0
if execute:
if chdir:
os.chdir(chdir)
try:
stat = self.execute(target, source, env, executor=executor)
if isinstance(stat, SCons.Errors.BuildError):
s = exitstatfunc(stat.status)
if s:
stat.status = s
else:
stat = s
else:
stat = exitstatfunc(stat)
finally:
if save_cwd:
os.chdir(save_cwd)
if cmd and save_cwd:
print_func('os.chdir(%s)' % repr(save_cwd), target, source, env)
return stat
def _string_from_cmd_list(cmd_list):
"""Takes a list of command line arguments and returns a pretty
representation for printing."""
cl = []
for arg in map(str, cmd_list):
if ' ' in arg or '\t' in arg:
arg = '"' + arg + '"'
cl.append(arg)
return ' '.join(cl)
default_ENV = None
def get_default_ENV(env):
"""
A fiddlin' little function that has an 'import SCons.Environment' which
can't be moved to the top level without creating an import loop. Since
this import creates a local variable named 'SCons', it blocks access to
the global variable, so we move it here to prevent complaints about local
variables being used uninitialized.
"""
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
# This is a hideously expensive way to get a default shell
# environment. What it really should do is run the platform
# setup to get the default ENV. Fortunately, it's incredibly
# rare for an Environment not to have a shell environment, so
# we're not going to worry about it overmuch.
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV
def _resolve_shell_env(env, target, source):
"""
First get default environment.
Then if SHELL_ENV_GENERATORS is set and is iterable,
call each callable in that list to allow it to alter
the created execution environment.
"""
ENV = get_default_ENV(env)
shell_gen = env.get('SHELL_ENV_GENERATORS')
if shell_gen:
try:
shell_gens = iter(shell_gen)
except TypeError:
raise SCons.Errors.UserError("SHELL_ENV_GENERATORS must be iteratable.")
else:
ENV = ENV.copy()
for generator in shell_gens:
ENV = generator(env, target, source, ENV)
if not isinstance(ENV, dict):
raise SCons.Errors.UserError(f"SHELL_ENV_GENERATORS function: {generator} must return a dict.")
return ENV
def _subproc(scons_env, cmd, error='ignore', **kw):
"""Wrapper for subprocess which pulls from construction env.
Use for calls to subprocess which need to interpolate values from
an SCons construction environment into the environment passed to
subprocess. Adds an an error-handling argument. Adds ability
to specify std{in,out,err} with "'devnull'" tag.
"""
# TODO: just uses subprocess.DEVNULL now, we can drop the "devnull"
# string now - it is a holdover from Py2, which didn't have DEVNULL.
for stream in 'stdin', 'stdout', 'stderr':
io = kw.get(stream)
if is_String(io) and io == 'devnull':
kw[stream] = DEVNULL
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None: ENV = get_default_ENV(scons_env)
# Ensure that the ENV values are all strings:
new_env = {}
for key, value in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value))
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value)
kw['env'] = new_env
try:
pobj = subprocess.Popen(cmd, **kw)
except EnvironmentError as e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen:
def __init__(self, e):
self.exception = e
# Add the following two to enable using the return value as a context manager
# for example
# with Action._subproc(...) as po:
# logic here which uses po
def __enter__(self):
return self
def __exit__(self, *args):
pass
def communicate(self, input=None):
return ('', '')
def wait(self):
return -self.exception.errno
stdin = None
class f:
def read(self): return ''
def readline(self): return ''
def __iter__(self): return iter(())
stdout = stderr = f()
pobj = dummyPopen(e)
finally:
# clean up open file handles stored in parent's kw
for k, v in kw.items():
if inspect.ismethod(getattr(v, 'close', None)):
v.close()
return pobj
class CommandAction(_ActionAction):
"""Class for command-execution actions."""
def __init__(self, cmd, **kw):
# Cmd can actually be a list or a single item; if it's a
# single item it should be the command string to execute; if a
# list then it should be the words of the command string to
# execute. Only a single command should be executed by this
# object; lists of commands should be handled by embedding
# these objects in a ListAction object (which the Action()
# factory above does). cmd will be passed to
# Environment.subst_list() for substituting environment
# variables.
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.CommandAction')
super().__init__(**kw)
if is_List(cmd):
if [c for c in cmd if is_List(c)]:
raise TypeError("CommandAction should be given only "
"a single command")
self.cmd_list = cmd
def __str__(self):
if is_List(self.cmd_list):
return ' '.join(map(str, self.cmd_list))
return str(self.cmd_list)
def process(self, target, source, env, executor=None):
if executor:
result = env.subst_list(self.cmd_list, 0, executor=executor)
else:
result = env.subst_list(self.cmd_list, 0, target, source)
silent = None
ignore = None
while True:
try: c = result[0][0][0]
except IndexError: c = None
if c == '@': silent = 1
elif c == '-': ignore = 1
else: break
result[0][0] = result[0][0][1:]
try:
if not result[0][0]:
result[0] = result[0][1:]
except IndexError:
pass
return result, ignore, silent
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
cmd_list, ignore, silent = self.process(target, source, env, executor)
if silent:
return ''
return _string_from_cmd_list(cmd_list[0])
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = _resolve_shell_env(env, target, source)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
cmd_list, ignore, silent = self.process(target, list(map(rfile, source)), env, executor)
# Use len() to filter out any "command" that's zero-length.
for cmd_line in filter(len, cmd_list):
# Escape the command line for the interpreter we are using.
cmd_line = escape_list(cmd_line, escape)
result = spawn(shell, escape, cmd_line[0], cmd_line, ENV)
if not ignore and result:
msg = "Error %s" % result
return SCons.Errors.BuildError(errstr=msg,
status=result,
action=self,
command=cmd_line)
return 0
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
from SCons.Subst import SUBST_SIG
cmd = self.cmd_list
if is_List(cmd):
cmd = ' '.join(map(str, cmd))
else:
cmd = str(cmd)
if executor:
return env.subst_target_source(cmd, SUBST_SIG, executor=executor)
else:
return env.subst_target_source(cmd, SUBST_SIG, target, source)
def get_implicit_deps(self, target, source, env, executor=None):
"""Return the implicit dependencies of this action's command line."""
icd = env.get('IMPLICIT_COMMAND_DEPENDENCIES', True)
if is_String(icd) and icd[:1] == '$':
icd = env.subst(icd)
if not icd or str(icd).lower() in ('0', 'none', 'false', 'no', 'off'):
return []
try:
icd_int = int(icd)
except ValueError:
icd_int = None
if (icd_int and icd_int > 1) or str(icd).lower() == 'all':
# An integer value greater than 1 specifies the number of entries
# to scan. "all" means to scan all.
return self._get_implicit_deps_heavyweight(target, source, env, executor, icd_int)
else:
# Everything else (usually 1 or True) means that we want
# lightweight dependency scanning.
return self._get_implicit_deps_lightweight(target, source, env, executor)
def _get_implicit_deps_lightweight(self, target, source, env, executor):
"""
Lightweight dependency scanning involves only scanning the first entry
in an action string, even if it contains &&.
"""
from SCons.Subst import SUBST_SIG
if executor:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, executor=executor)
else:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, target, source)
res = []
for cmd_line in cmd_list:
if cmd_line:
d = str(cmd_line[0])
m = strip_quotes.match(d)
if m:
d = m.group(1)
d = env.WhereIs(d)
if d:
res.append(env.fs.File(d))
return res
def _get_implicit_deps_heavyweight(self, target, source, env, executor,
icd_int):
"""
Heavyweight dependency scanning involves scanning more than just the
first entry in an action string. The exact behavior depends on the
value of icd_int. Only files are taken as implicit dependencies;
directories are ignored.
If icd_int is an integer value, it specifies the number of entries to
scan for implicit dependencies. Action strings are also scanned after
a &&. So for example, if icd_int=2 and the action string is
"cd <some_dir> && $PYTHON $SCRIPT_PATH <another_path>", the implicit
dependencies would be the path to the python binary and the path to the
script.
If icd_int is None, all entries are scanned for implicit dependencies.
"""
# Avoid circular and duplicate dependencies by not providing source,
# target, or executor to subst_list. This causes references to
# $SOURCES, $TARGETS, and all related variables to disappear.
from SCons.Subst import SUBST_SIG
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, conv=lambda x: x)
res = []
for cmd_line in cmd_list:
if cmd_line:
entry_count = 0
for entry in cmd_line:
d = str(entry)
if ((icd_int is None or entry_count < icd_int) and
not d.startswith(('&', '-', '/') if os.name == 'nt'
else ('&', '-'))):
m = strip_quotes.match(d)
if m:
d = m.group(1)
if d:
# Resolve the first entry in the command string using
# PATH, which env.WhereIs() looks in.
# For now, only match files, not directories.
p = os.path.abspath(d) if os.path.isfile(d) else None
if not p and entry_count == 0:
p = env.WhereIs(d)
if p:
res.append(env.fs.File(p))
entry_count = entry_count + 1
else:
entry_count = 0 if d == '&&' else entry_count + 1
# Despite not providing source and target to env.subst() above, we
# can still end up with sources in this list. For example, files in
# LIBS will still resolve in env.subst(). This won't result in
# circular dependencies, but it causes problems with cache signatures
# changing between full and incremental builds.
return [r for r in res if r not in target and r not in source]
class CommandGeneratorAction(ActionBase):
"""Class for command-generator actions."""
def __init__(self, generator, kw):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.CommandGeneratorAction')
self.generator = generator
self.gen_kw = kw
self.varlist = kw.get('varlist', ())
self.targets = kw.get('targets', '$TARGETS')
def _generate(self, target, source, env, for_signature, executor=None):
# ensure that target is a list, to make it easier to write
# generator functions:
if not is_List(target):
target = [target]
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
ret = self.generator(target=target,
source=source,
env=env,
for_signature=for_signature)
gen_cmd = Action(ret, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("Object returned from command generator: %s cannot be used to create an Action." % repr(ret))
return gen_cmd
def __str__(self):
try:
env = self.presub_env
except AttributeError:
env = None
if env is None:
env = SCons.Defaults.DefaultEnvironment()
act = self._generate([], [], env, 1)
return str(act)
def batch_key(self, env, target, source):
return self._generate(target, source, env, 1).batch_key(env, target, source)
def genstring(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).genstring(target, source, env)
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
act = self._generate(target, source, env, 0, executor)
if act is None:
raise SCons.Errors.UserError(
"While building `%s': "
"Cannot deduce file extension from source files: %s"
% (repr(list(map(str, target))), repr(list(map(str, source))))
)
return act(
target, source, env, exitstatfunc, presub, show, execute, chdir, executor
)
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
def get_implicit_deps(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_implicit_deps(target, source, env)
def get_varlist(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_varlist(target, source, env, executor)
def get_targets(self, env, executor):
return self._generate(None, None, env, 1, executor).get_targets(env, executor)
class LazyAction(CommandGeneratorAction, CommandAction):
"""
A LazyAction is a kind of hybrid generator and command action for
strings of the form "$VAR". These strings normally expand to other
strings (think "$CCCOM" to "$CC -c -o $TARGET $SOURCE"), but we also
want to be able to replace them with functions in the construction
environment. Consequently, we want lazy evaluation and creation of
an Action in the case of the function, but that's overkill in the more
normal case of expansion to other strings.
So we do this with a subclass that's both a generator *and*
a command action. The overridden methods all do a quick check
of the construction variable, and if it's a string we just call
the corresponding CommandAction method to do the heavy lifting.
If not, then we call the same-named CommandGeneratorAction method.
The CommandGeneratorAction methods work by using the overridden
_generate() method, that is, our own way of handling "generation" of
an action based on what's in the construction variable.
"""
def __init__(self, var, kw):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.LazyAction')
CommandAction.__init__(self, '${'+var+'}', **kw)
self.var = SCons.Util.to_String(var)
self.gen_kw = kw
def get_parent_class(self, env):
c = env.get(self.var)
if is_String(c) and '\n' not in c:
return CommandAction
return CommandGeneratorAction
def _generate_cache(self, env):
if env:
c = env.get(self.var, '')
else:
c = ''
gen_cmd = Action(c, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("$%s value %s cannot be used to create an Action." % (self.var, repr(c)))
return gen_cmd
def _generate(self, target, source, env, for_signature, executor=None):
return self._generate_cache(env)
def __call__(self, target, source, env, *args, **kw):
c = self.get_parent_class(env)
return c.__call__(self, target, source, env, *args, **kw)
def get_presig(self, target, source, env):
c = self.get_parent_class(env)
return c.get_presig(self, target, source, env)
def get_varlist(self, target, source, env, executor=None):
c = self.get_parent_class(env)
return c.get_varlist(self, target, source, env, executor)
class FunctionAction(_ActionAction):
"""Class for Python function actions."""
def __init__(self, execfunction, kw):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.FunctionAction')
self.execfunction = execfunction
try:
self.funccontents = _callable_contents(execfunction)
except AttributeError:
try:
# See if execfunction will do the heavy lifting for us.
self.gc = execfunction.get_contents
except AttributeError:
# This is weird, just do the best we can.
self.funccontents = _object_contents(execfunction)
super().__init__(**kw)
def function_name(self):
try:
return self.execfunction.__name__
except AttributeError:
try:
return self.execfunction.__class__.__name__
except AttributeError:
return "unknown_python_function"
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
def array(a):
def quote(s):
try:
str_for_display = s.str_for_display
except AttributeError:
s = repr(s)
else:
s = str_for_display()
return s
return '[' + ", ".join(map(quote, a)) + ']'
try:
strfunc = self.execfunction.strfunction
except AttributeError:
pass
else:
if strfunc is None:
return None
if callable(strfunc):
return strfunc(target, source, env)
name = self.function_name()
tstr = array(target)
sstr = array(source)
return "%s(%s, %s)" % (name, tstr, sstr)
def __str__(self):
name = self.function_name()
if name == 'ActionCaller':
return str(self.execfunction)
return "%s(target, source, env)" % name
def execute(self, target, source, env, executor=None):
exc_info = (None,None,None)
try:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
rsources = list(map(rfile, source))
try:
result = self.execfunction(target=target, source=rsources, env=env)
except KeyboardInterrupt as e:
raise
except SystemExit as e:
raise
except Exception as e:
result = e
exc_info = sys.exc_info()
if result:
result = SCons.Errors.convert_to_BuildError(result, exc_info)
result.node=target
result.action=self
try:
result.command=self.strfunction(target, source, env, executor)
except TypeError:
result.command=self.strfunction(target, source, env)
# FIXME: This maintains backward compatibility with respect to
# which type of exceptions were returned by raising an
# exception and which ones were returned by value. It would
# probably be best to always return them by value here, but
# some codes do not check the return value of Actions and I do
# not have the time to modify them at this point.
if (exc_info[1] and
not isinstance(exc_info[1],EnvironmentError)):
raise result
return result
finally:
# Break the cycle between the traceback object and this
# function stack frame. See the sys.exc_info() doc info for
# more information about this issue.
del exc_info
def get_presig(self, target, source, env):
"""Return the signature contents of this callable action."""
try:
return self.gc(target, source, env)
except AttributeError:
return self.funccontents
def get_implicit_deps(self, target, source, env):
return []
class ListAction(ActionBase):
"""Class for lists of other actions."""
def __init__(self, actionlist):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.ListAction')
def list_of_actions(x):
if isinstance(x, ActionBase):
return x
return Action(x)
self.list = list(map(list_of_actions, actionlist))
# our children will have had any varlist
# applied; we don't need to do it again
self.varlist = ()
self.targets = '$TARGETS'
def genstring(self, target, source, env):
return '\n'.join([a.genstring(target, source, env) for a in self.list])
def __str__(self):
return '\n'.join(map(str, self.list))
def presub_lines(self, env):
return SCons.Util.flatten_sequence(
[a.presub_lines(env) for a in self.list])
def get_presig(self, target, source, env):
"""Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
"""
return b"".join([bytes(x.get_contents(target, source, env)) for x in self.list])
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
for act in self.list:
stat = act(target, source, env, exitstatfunc, presub,
show, execute, chdir, executor)
if stat:
return stat
return 0
def get_implicit_deps(self, target, source, env):
result = []
for act in self.list:
result.extend(act.get_implicit_deps(target, source, env))
return result
def get_varlist(self, target, source, env, executor=None):
result = OrderedDict()
for act in self.list:
for var in act.get_varlist(target, source, env, executor):
result[var] = True
return list(result.keys())
class ActionCaller:
"""A class for delaying calling an Action function with specific
(positional and keyword) arguments until the Action is actually
executed.
This class looks to the rest of the world like a normal Action object,
but what it's really doing is hanging on to the arguments until we
have a target, source and env to use for the expansion.
"""
def __init__(self, parent, args, kw):
self.parent = parent
self.args = args
self.kw = kw
def get_contents(self, target, source, env):
actfunc = self.parent.actfunc
try:
# "self.actfunc" is a function.
contents = actfunc.__code__.co_code
except AttributeError:
# "self.actfunc" is a callable object.
try:
contents = actfunc.__call__.__func__.__code__.co_code
except AttributeError:
# No __call__() method, so it might be a builtin
# or something like that. Do the best we can.
contents = repr(actfunc)
return contents
def subst(self, s, target, source, env):
# If s is a list, recursively apply subst()
# to every element in the list
if is_List(s):
result = []
for elem in s:
result.append(self.subst(elem, target, source, env))
return self.parent.convert(result)
# Special-case hack: Let a custom function wrapped in an
# ActionCaller get at the environment through which the action
# was called by using this hard-coded value as a special return.
if s == '$__env__':
return env
elif is_String(s):
return env.subst(s, 1, target, source)
return self.parent.convert(s)
def subst_args(self, target, source, env):
return [self.subst(x, target, source, env) for x in self.args]
def subst_kw(self, target, source, env):
kw = {}
for key in list(self.kw.keys()):
kw[key] = self.subst(self.kw[key], target, source, env)
return kw
def __call__(self, target, source, env, executor=None):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.actfunc(*args, **kw)
def strfunction(self, target, source, env):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.strfunc(*args, **kw)
def __str__(self):
return self.parent.strfunc(*self.args, **self.kw)
class ActionFactory:
"""A factory class that will wrap up an arbitrary function
as an SCons-executable Action object.
The real heavy lifting here is done by the ActionCaller class.
We just collect the (positional and keyword) arguments that we're
called with and give them to the ActionCaller object we create,
so it can hang onto them until it needs them.
"""
def __init__(self, actfunc, strfunc, convert=lambda x: x):
self.actfunc = actfunc
self.strfunc = strfunc
self.convert = convert
def __call__(self, *args, **kw):
ac = ActionCaller(self, args, kw)
action = Action(ac, strfunction=ac.strfunction)
return action
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/image/Augmentor.py | import imgaug.augmenters as iaa
import tensorflow as tf
from typing import *
class Augmentor:
"""
This class is used to define the image augmentation pipeline. Augmentor class instance
can be passed to DataSplit "aug" argument to apply it to the train dataset.
# Arguments
h_flip: probability of applying horizontal flip to a batch image.
v_flip: probability of applying vertical flip to a batch image.
brightness: (probability, alpha) - tuple defines probability and the amount of brightness (+- alpha%) to apply to an image.
contrast: (probability, alpha) - tuple defines probability and the amount of contrast (+- alpha%) to apply to an image.
blur: probability of applying blur and the amount of blur to have in a tuple (from sigma, to sigma), e.g. 50% images get
blurred between 0 and 3 sigma, blur = (0.5, (0., 3.))
hue: probability of applying hue shift (tf implementation) and alpha (amount of the shift).
warp: the amount of random warp to apply
pipeline: custom pipeline (currently not supported)
"""
def __init__(self,
h_flip: float = 0.,
v_flip: float = 0.,
brightness: Tuple[float, float] = (0., 0.),
contrast: Tuple[float, float] = (0., 0.),
hue: Tuple[float, float] = (0., 0.),
sharpness: Tuple[float, float] = (0., 0.),
blur: Tuple[float, Tuple[float, float]] = (0., 0.),
warp: float = 0.,
zoom: Tuple[float, float] = (0., 0.),
rotate: Tuple[float, float] = (0., 0.),
dropout: Tuple[float, float] = (0., 0.),
pipeline: Optional[List[Any]] = None):
self.h_flip = h_flip
self.v_flip = v_flip
self.brightness = brightness
self.contrast = contrast
self.hue = hue
self.sharpness = sharpness
self.blur = blur
self.warp = warp
self.zoom = zoom
self.rotate = rotate
self.dropout = dropout
self.pipeline = pipeline
def __call__(self,
image: tf.Tensor,
labels: Optional[Any] = None) -> Union[tf.Tensor, Tuple[tf.Tensor, Any]]:
"""
Upon calling takes image and applies transformation. If labels are passed in the input tuple,
those are appended in a tuple with the augmented image upon return.
Note: Custom pipelines not yet implemented.
"""
# Tensorflow augmentors
image = tf.cond(tf.random.uniform([], 0, 1) < self.h_flip,
lambda: tf.image.flip_left_right(image),
lambda: image)
image = tf.cond(tf.random.uniform([], 0, 1) < self.v_flip,
lambda: tf.image.flip_up_down(image),
lambda: image)
image = tf.cond(tf.random.uniform([], 0, 1) < self.brightness[0],
lambda: tf.image.random_brightness(image, max_delta=self.brightness[1]),
lambda: image)
image = tf.cond(tf.random.uniform([], 0, 1) < self.contrast[0],
lambda: tf.image.random_contrast(image, lower=0.99999-self.contrast[1], upper=1+self.contrast[1]),
lambda: image)
# Hue
image = tf.cond(tf.random.uniform([], 0, 1) < self.hue[0],
lambda: tf.image.random_hue(image, self.hue[1], seed=None),
lambda: image)
# Third-party module augmentors. Applied using py_func, require to get_shape of the
# input, set_shape before returning.
input_shape = image.get_shape()
# Blur
if self.blur[0]:
augmenter = iaa.Sometimes(self.blur[0], iaa.GaussianBlur(self.blur[1]))
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter.augment_images, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
# Sharpen
if self.sharpness[0]:
augmenter = iaa.Sometimes(self.sharpness[0], iaa.Sharpen(alpha=(0, self.sharpness[1]), lightness=(0.75, 1.5)))
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter.augment_images, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
# Perspective transform
if self.warp:
def augmenter(image):
return iaa.PerspectiveTransform(scale=(0, self.warp), keep_size=True).augment_images(image)
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
# Zoom
if self.zoom[0]:
augmenter = iaa.Sometimes(self.zoom[0],
iaa.Affine(scale=(1-self.zoom[1],1+self.zoom[1]), mode="edge"))
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter.augment_images, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
# Rotate
if self.rotate[0]:
augmenter = iaa.Sometimes(self.rotate[0],
iaa.Affine(rotate=(-self.rotate[1], self.rotate[1]),
mode="edge"))
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter.augment_images, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
# Dropout
if self.dropout[0]:
augmenter = iaa.Sometimes(self.dropout[0], iaa.CoarseDropout(p=self.dropout[1], size_percent=0.10))
img_exp = tf.expand_dims(image, axis=0)
image = tf.py_function(augmenter.augment_images, [img_exp], Tout=[tf.float32])
image = tf.squeeze(image)
image.set_shape(input_shape)
if self.pipeline:
print("Not yet implemented")
image = tf.clip_by_value(image, 0.0, 1.0)
if labels is not None:
return (image, labels)
else:
return image | PypiClean |
/Firmant-0.2.3a1.tar.gz/Firmant-0.2.3a1/firmant/writers/j2.py | import jinja2
from firmant import decorators
from firmant.writers import staticrst
from firmant.writers import posts
from firmant.utils import paths
from firmant.utils import workarounds
from pysettings import settings
class Jinja2Base(object):
'''Base class used for functionality common to all J2 writers.
'''
# pylint: disable-msg=R0903
@staticmethod
def render_to_file(environment, path, template, context):
'''Render template with context and save to path.
'''
j2env = environment.get(Jinja2Base, {})
environment[Jinja2Base] = j2env
if 'env' not in j2env:
loader = getattr(settings, 'TEMPLATE_LOADER', None)
if loader is None:
loader = jinja2.PackageLoader('firmant', 'templates')
j2env['env'] = jinja2.Environment(loader=loader)
template = j2env['env'].get_template(template)
globs = j2env.get('globals', dict())
globs.update(context)
data = template.render(globs)
out = paths.create_or_truncate(path)
out.write(data.encode('utf-8'))
out.flush()
out.close()
class Jinja2StaticRst(Jinja2Base, staticrst.StaticRstWriter):
'''Render staticrst objects using the :class:`StaticRstWriter` base.
'''
extension = 'html'
template = 'flat.html'
def render(self, environment, path, obj):
'''Render the data in a Jinja2 template.
.. doctest::
>>> j2sr = Jinja2StaticRst(environment, objects)
>>> obj = j2sr.obj_list(environment, objects)[0]
>>> j2sr.render(environment, path, obj)
>>> cat(path)
About at about
'''
context = dict()
context['path'] = obj.path
context['page'] = obj
self.render_to_file(environment, path, self.template, context)
class Jinja2PostWriter(Jinja2Base, posts.PostWriter):
'''Render each post individually using Jinja2 templates.
'''
extension = 'html'
template = 'posts/single.html'
def render(self, environment, path, post):
r'''Render the data in a Jinja2 template.
.. doctest::
>>> j2pw = Jinja2PostWriter(environment, objects)
>>> obj = j2pw.obj_list(environment, objects)[0]
>>> j2pw.render(environment, path, obj)
>>> cat(path)
2009-12-31 | party by John Doe
'''
context = dict()
context['post'] = post
self.render_to_file(environment, path, self.template, context)
class Jinja2PostArchiveBase(Jinja2Base):
'''Common functionality for rendering Jinja2 archive views.
'''
# It complains about not having the key attribute (provided by children)
# pylint: disable-msg=E1101
extension = 'html'
@workarounds.abstractproperty
def template(self):
'''The template to use for rendering.
'''
@decorators.in_environment('urlmapper')
def render(self, environment, path, obj):
'''Render the archive view.
'''
# pylint: disable-msg=R0912
key = self.key(obj)
context = dict()
context['posts'] = obj[0]
urlmapper = environment['urlmapper']
if len(obj) == 3:
pages = 2
calen = 1
elif len(obj) == 2:
pages = 1
calen = False
# Construct the urls for prev/next paginated pages
if obj[pages].prev:
d = key.copy()
d['page'] = obj[pages].prev
context['page_prev'] = urlmapper.url(self.extension, **d)
else:
context['page_prev'] = None
if obj[pages].next:
d = key.copy()
d['page'] = obj[pages].next
context['page_next'] = urlmapper.url(self.extension, **d)
else:
context['page_next'] = None
# Construct the urls for prev/next archive pages
if calen and obj[calen].prev:
d = key.copy()
d['page'] = 1
if len(obj[1].prev) >= 1:
d['year'] = obj[calen].prev[0]
if len(obj[1].prev) >= 2:
d['month'] = obj[calen].prev[1]
if len(obj[1].prev) >= 3:
d['day'] = obj[calen].prev[2]
context['cal_prev'] = urlmapper.url(self.extension, **d)
else:
context['cal_prev'] = None
if calen and obj[calen].next:
d = key.copy()
d['page'] = 1
if len(obj[1].next) >= 1:
d['year'] = obj[calen].next[0]
if len(obj[1].next) >= 2:
d['month'] = obj[calen].next[1]
if len(obj[1].next) >= 3:
d['day'] = obj[calen].next[2]
context['cal_next'] = urlmapper.url(self.extension, **d)
else:
context['cal_next'] = None
self.render_to_file(environment, path, self.template, context)
class Jinja2PostArchiveAll(Jinja2PostArchiveBase, posts.PostArchiveAll):
'''Render paginated post lists with Jinja2 templates.
'''
template = 'posts/archive_all.html'
class Jinja2PostArchiveYearly(Jinja2PostArchiveBase, posts.PostArchiveYearly):
'''Render paginated post lists (grouped by year) with Jinja2 templates.
'''
template = 'posts/archive_yearly.html'
class Jinja2PostArchiveMonthly(Jinja2PostArchiveBase, posts.PostArchiveMonthly):
'''Render paginated post lists (grouped by month) with Jinja2 templates.
'''
template = 'posts/archive_monthly.html'
class Jinja2PostArchiveDaily(Jinja2PostArchiveBase, posts.PostArchiveDaily):
'''Render paginated post lists (grouped by day) with Jinja2 templates.
'''
template = 'posts/archive_daily.html'
def _setup(self):
'''Setup the test cases.
'''
import tempfile
import os
from pysettings import Settings
from firmant.routing import URLMapper
from firmant.utils.paths import cat
from testdata.chunks import c900
settings.configure(Settings({'POSTS_PER_PAGE': 2
,'OUTPUT_DIR': tempfile.mkdtemp()
,'PERMALINK_ROOT': 'http://testurl'
,'TEMPLATE_LOADER':
jinja2.FileSystemLoader('testdata/pristine/templates')
}), override=True)
urlmapper = URLMapper(settings.OUTPUT_DIR, settings.PERMALINK_ROOT)
self.globs['cat'] = cat
self.globs['environment'] = {'urlmapper': urlmapper
,Jinja2Base: {'globals':
{'urlfor': urlmapper.url}}
}
self.globs['objects'] = {'post': c900.posts, 'staticrst': c900.staticrst}
self.globs['os'] = os
self.globs['tempfile'] = os
self.globs['path'] = tempfile.NamedTemporaryFile(delete=False).name
self.globs['_path'] = self.globs['path']
def _teardown(test):
'''Cleanup the Jinja2 test cases.
'''
import os
import shutil
os.unlink(test.globs['_path'])
shutil.rmtree(settings.OUTPUT_DIR) | PypiClean |
/HireFire-1.1.tar.gz/HireFire-1.1/docs/index.rst | .. include:: ../README.rst
Proc backends
-------------
Two base classes are includes that you can use to implement custom
backends. All the other contributed backends use those base classes,
too.
``hirefire.procs.Proc``
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: hirefire.procs.Proc
:members:
``hirefire.procs.ClientProc``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: hirefire.procs.ClientProc
:members:
:inherited-members:
Contributed backends
^^^^^^^^^^^^^^^^^^^^
See the following API overview of the other supported queuing backends.
.. toctree::
:maxdepth: 2
procs
Issues & Feedback
-----------------
For bug reports, feature requests and general feedback, please use the
`Github issue tracker`_.
Thanks
------
Many thanks to the folks at Hirefire_ for building a great tool for
the Heroku ecosystem.
Authors
^^^^^^^
.. include:: ../AUTHORS.rst
Changes
-------
.. include:: ../CHANGES.rst
.. _HireFire: http://hirefire.io/
.. _`Github issue tracker`: https://github.com/jezdez/hirefire/issues
| PypiClean |
/DataGun-0.1.2.tar.gz/DataGun-0.1.2/README.md | # Deserialization and converting data to the required type using Python tools, for subsequent insertion into the database

[](https://raw.githubusercontent.com/vintasoftware/datagun/master/LICENSE)
[](https://pepy.tech/project/datagun)
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
## Installation
```
pip install datagun
```
## Documentation
## Authors
Pavel Maksimov -
[Telegram](https://t.me/pavel_maksimow),
[Facebook](https://www.facebook.com/pavel.maksimow)
Good luck friend! Put an asterisk;)
Удачи тебе, друг! Поставь звездочку ;)
Copyright (c) Pavel Maksimov.
## Change log | PypiClean |
/OccuPy-0.1.13.tar.gz/OccuPy-0.1.13/occupy_lib/map_tools.py | import mrcfile.mrcfile
import numpy as np
import mrcfile as mf
import wget
import gzip
import os
from pathlib import Path
import scipy.fft as spfft
from pkg_resources import get_distribution
__version__ = get_distribution("occupy").version
def create_radial_mask(
size: int,
dim: int,
center: int = None,
radius: float = None
):
"""
Create a circular or spherical dimensional mask or kernel
:param size: Output array length
:param dim: Output array dimension
:param center: Center of the radial region
:param radius: Radius of the radial region
:return: Boolean array
"""
assert dim == 2 or dim == 3
# use the middle of the image unless otherwise specified
if center is None:
center = (size - 1) / 2
# use the output size diameter unless otherwise specified
if radius is None:
radius = center
# Ensure lower precision
center = np.float32(center)
radius = np.float32(radius)
if dim == 2:
# _ix is equivalent to x, y = np.ogrid[:s, :s] but permits specifying dtype
x, y = np.ix_(np.arange(size, dtype=np.int32), np.arange(size, dtype=np.int32))
dist_from_center = (x - center) ** 2 + (y - center) ** 2
dist_from_center = np.sqrt(dist_from_center)
else:
# _ix is equivalent to x, y, z = np.ogrid[:s, :s, :s] but permits specifying dtype
x, y, z = np.ix_(np.arange(size, dtype=np.int32), np.arange(size, dtype=np.int32),
np.arange(size, dtype=np.int32))
dist_from_center = (x - center) ** 2 + (y - center) ** 2 + (z - center) ** 2
dist_from_center = np.sqrt(dist_from_center.astype(np.float32))
out = (dist_from_center <= radius)
return out
def new_mrc(
data: np.ndarray,
file_name: str,
parent: str = None,
vox_sz: int = None,
verbose: bool = False,
extra_header=None,
log=None
):
"""
Write data to a new mrc file, and optionally an existing (parent) file to define data parameters
If the parent has different dimensions, the box size is assumed equal with unequal sampling.
The voxel-size and any offset is thus adjusted so that the maps coincide.
:param data: Data to write
:param file_name: Output file name
:param parent: Parent file name (optional)
:param vox_sz: Output voxel size (optional)
:param verbose: Be verbose (optional)
:param extra_header: String for output header (optional)
:param log: Log-file name (optional)
:return:
"""
if parent is None and vox_sz is None:
raise ValueError('No parent or pixel-value provided for new mrc file')
offset = 0
factor = 1
# Make sure the suffix is .mrc
file_name = Path(file_name).with_suffix('')
file_name = f'{file_name}.mrc'
# Open
o_file = mf.new(file_name, overwrite=True)
# set_data() will update header info and stats
o_file.set_data(data.astype(np.float32))
# Add labels to document what happened
o_file.add_label(f'Created using OccuPy {__version__}')
if extra_header is not None:
o_file.add_label(f'{extra_header}')
adjust_to_parent(parent,file_handle=o_file)
o_file.flush()
o_file.validate()
o_file.close()
if verbose:
if log is None:
print(f'Wrote new file {file_name}')
else:
print(f'Wrote {file_name}', file=log)
def adjust_to_parent(
parent: str =None,
file_name: str = None,
file_handle: mf.mrcfile.MrcFile = None
):
"""
Adjust an mrc-file to coincide with the parent, i.e. overlap their boxes by adjusting voxel-size and offset
:param parent: Parent file name
:param file_name: File name of the mrc-file to adjust
:param file_handle: File-handle of the mrc-file to adjust
:return:
"""
if parent is None:
return
# Do not close file by default
close = False
# Open file if necessary
if file_handle is None:
assert file_name is not None
# If open, then close
close = True
file_handle = mf.open(file_name,'r+')
parent_handle = mf.open(parent)
# Relative scaling
factor = parent_handle.header['nx'] / file_handle.header['nx']
# Map centering
offset_p = parent_handle.header['nxstart']
# Map scaling
pix_size_p = parent_handle.voxel_size.x
# Adjust
file_handle.voxel_size = pix_size_p * factor
file_handle.nstart = int(round(offset_p / factor))
# Ensure axis ordering
file_handle.header['mapc'] = parent_handle.header['mapc']
file_handle.header['mapr'] = parent_handle.header['mapr']
file_handle.header['maps'] = parent_handle.header['maps']
# Flush to be sure
file_handle.flush()
# Close if necessary
parent_handle.close()
if close:
file_handle.close()
def change_voxel_size(
file: str,
sz: int = None,
parent: str = None
):
if (parent is None) and (sz is None):
raise ValueError('Change to pixel size to what? (No parent or value provided)')
f_mod = mf.open(file, 'r+')
if parent is not None:
try:
f_ref = mf.open(parent)
f_mod.voxel_size = f_ref.voxel_size
f_ref.close()
except ValueError:
print('Could not open parent file for reading pixel size')
elif sz is not None:
f_mod.voxel_size = sz
f_mod.flush()
f_mod.close()
def clip_to_range(
change: np.ndarray,
reference: np.ndarray = None,
range: np.ndarray = None
):
"""
Clip an array to have the range as a reference array
:param change: array to clip
:param reference: array to define range
:return: clipped array
"""
rp = None
if reference is None:
if range is None:
raise(ValueError, "No clip reference or range given")
else:
assert len(range)==2
assert range[0]<range[1]
rp = np.copy(range)
else:
rp = np.array([np.min(reference), np.max(reference)])
change = np.clip(change, rp[0], rp[1])
return change
def uniscale_map(
data: np.ndarray,
move: bool = False
):
"""
Rescale an array to have a range of 1, and optionally to move it to lie on [0,1]
:param data: input array
:param norm: move to [0,1]?
:return: rescaled array
"""
param = [np.min(data), np.max(data)]
data /= param[1] - param[0]
if move:
data -= np.min(data)
return data
def lowpass(
in_data: np.ndarray,
resolution: float = None,
voxel_size: float = None,
output_size: int = None,
square: bool = False,
resample: bool = False
):
"""
Low-pass a 2D- or 3D-array. Intended for cryo-EM reconstructions.
Will place a radial or square window and omit high-frequency conponents outside this window.
If resampling, the output box will be changed to crop or pad the input.
One must specify either
1) The desired output size (implies rescale)
2) The cutoff frequency (resolution) AND pixel size.
:param in_data: input array to be low-passed
:param resolution: spatial cutoff [Å]
:param output_size: output array size [pix]
:param voxel_size: input voxel size [Å]
:param square: use a square (not radial) window
:param resample: allow output to be cropped/padded
:return: low-passed array
"""
# Test square
n = np.shape(in_data)
assert len(np.unique(np.shape(n))) == 1, "Input array to lowpass is not square"
# Test dim
ndim = len(n)
assert ndim == 2 or ndim == 3, "Input array to lowpass is not 2 or 3 "
# Test even
n = n[0]
assert n % 2 == 0, "Input array size is not even"
# Test required input
assert output_size is not None or voxel_size is not None, "Lowpass needs pixel size or number of pixels."
assert output_size is not None or resolution is not None, "Lowpass needs a cutoff resolution or number of pixels"
out_voxel_size = None
# If the output size is specified, then we are resampling
if output_size is not None:
keep_shells = int(output_size / 2)
resample = True
# Otherwise the voxel size must have been specified
else:
keep_shells = int(np.floor((n * voxel_size) / resolution)) # Keep this many of the lower frequencies
out_voxel_size = np.copy(voxel_size)
# Normalization factor for unequal input/output
factor = 1
if resample:
factor = output_size / n
# More workers does not seem to have an effect on my system
workers = 1
f_data = []
with spfft.set_workers(workers):
# FFT forward
f_data = spfft.rfftn(in_data) # *2*np.pi/n
f_data = spfft.fftshift(f_data, axes=(0, 1))
# If we are resampling, then we may be able to provide the output voxel size
if resample and voxel_size is not None:
out_voxel_size = voxel_size * n / (2 * keep_shells)
# We are going to grab central information from the input and make it central in the output
mid_in = int(n / 2)
mid_out = None
if 2 * keep_shells > n:
# Pad instead
if not resample:
# Padding without resampling is not possible
return in_data, out_voxel_size
t = np.zeros((2 * keep_shells, 2 * keep_shells, keep_shells + 1), dtype=np.complex64)
edge = int((2 * keep_shells - n) / 2)
t[edge:edge + n, edge:edge + n, :-edge] = f_data
if resample:
mid_out = keep_shells
t = np.zeros((2 * keep_shells * np.ones(ndim).astype(int)), dtype=np.complex64)
t = t[..., keep_shells - 1:]
else:
mid_out = mid_in
t = np.zeros(np.shape(f_data), dtype=np.complex64)
keep_shells = int(np.min([keep_shells, n / 2]))
if ndim == 3:
t[mid_out - keep_shells:mid_out + keep_shells, mid_out - keep_shells:mid_out + keep_shells, :keep_shells + 1] = \
f_data[mid_in - keep_shells:mid_in + keep_shells, mid_in - keep_shells:mid_in + keep_shells,
:keep_shells + 1]
elif ndim == 2:
t[mid_out - keep_shells:mid_out + keep_shells, :keep_shells + 1] = \
f_data[mid_in - keep_shells:mid_in + keep_shells, :keep_shells + 1]
if not square:
mask = create_radial_mask(2 * mid_out, radius=keep_shells + 1, dim=ndim)[..., mid_out - 1:]
t = np.multiply(t, mask)
out_data = []
with spfft.set_workers(workers):
# FFT reverse
t = spfft.ifftshift(t, axes=(0, 1))
t = spfft.irfftn(t)
# The FFT must be normalized
if resample:
t *= factor ** ndim
return t, out_voxel_size
def lowpass_map(
data: np.ndarray,
cutoff: float = None,
voxel_size: float = 1.0,
resample: bool = False,
keep_scale: bool = False
):
if cutoff is None:
return data
n = np.shape(data)[0]
ndim = len(np.shape(data))
ref_scale = np.max(data)
assert ndim == 3 # TODO make work for 2D just in case
# More workers does not seem to have an effect on my system
workers = 1
f_data = []
with spfft.set_workers(workers):
# FFT reverse
f_data = spfft.rfftn(data)
f_data = spfft.fftshift(f_data, axes=(0, 1))
cutoff /= voxel_size
cutoff_level = int(np.floor(2 * (n / cutoff))) # Keep this many of the lower frequencies
mid = int(n / 2)
if resample: # TODO test/fix
mid_resample = cutoff_level // 2
mask = create_radial_mask(cutoff_level, ndim)[:, :, mid_resample:]
t = f_data[mid - cutoff_level:mid + cutoff_level, mid - cutoff_level:mid + cutoff_level, :cutoff_level + 1]
# print(t.shape,mask.shape)
t = np.multiply(t, mask)
else:
mask = create_radial_mask(n, ndim, radius=cutoff_level)[:, :, mid - 1:]
# print(f_data.shape,mask.shape,mask.sum(),mask.size,n,cutoff_level)
t = np.multiply(f_data, mask)
r_data = []
with spfft.set_workers(workers):
# FFT reverse
t = spfft.ifftshift(t, axes=(0, 1))
r_data = spfft.irfftn(t)
if keep_scale:
m = np.mean(r_data)
r_data = (r_data - m) * (ref_scale / np.max(r_data)) + m
return r_data
def gunzip(source_filepath, dest_filepath, block_size=65536):
with gzip.open(source_filepath, 'rb') as s_file, \
open(dest_filepath, 'wb') as d_file:
while True:
block = s_file.read(block_size)
if not block:
break
else:
d_file.write(block)
def fetch_EMDB(ID: str):
file_name = ''
fetch_name = f'emd_{ID}.map.gz'
map_name = Path(Path(fetch_name).stem)
if map_name.is_file():
print(f'Found already downloaded file {map_name}')
return map_name
url = f'https://ftp.ebi.ac.uk/pub/databases/emdb/structures/EMD-{ID}/map/emd_{ID}.map.gz'
print(f'Fetching {fetch_name}')
fail = False
try:
file_name = wget.download(url)
print(f'\n Done fetching {fetch_name}')
except Exception as e:
try:
print(f'Failed, trying without ssl verification...')
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
file_name = wget.download(url)
except:
print(f'Omitting ssl verification did not help.')
print(f"EMDB entry {ID} could not be fetched through url: \n {url}")
print(f'Reason: {e}')
fail = True
if not fail:
print(f'Unzipping {fetch_name}')
try:
gunzip(fetch_name,map_name)
os.remove(fetch_name)
print(f'Done unzipping')
except Exception as e:
print(f'Error trying to gunzip {fetch_name}')
print(f'Issue: {e}')
fail =True
if fail:
return None
else:
return map_name | PypiClean |
/Grammaticomastix-0.0.1rc2-py3-none-any.whl/grammaticomastix/dchars/languages/fro/dstring.py | # problem with Pylint :
# pylint: disable=E0611
# many errors like "No name 'extensions' in module 'dchars'"
import re
import unicodedata
from dchars.errors.errors import DCharsError
from dchars.dstring import DStringMotherClass
from dchars.languages.fro.dcharacter import DCharacterFRO
from dchars.languages.fro.symbols import SYMB_PUNCTUATION, \
SYMB_UPPER_CASE, \
SYMB_LOWER_CASE, \
SYMB_DIACRITICS, \
SORTING_ORDER, \
SYMB_DIACRITICS__STRESS1, \
SYMB_DIACRITICS__STRESS2, \
SYMB_DIACRITICS__STRESS12, \
SYMB_DIACRITICS__STRESS3, \
SYMB_DIACRITICS__CEDILLA
from dchars.utilities.lstringtools import number_of_occurences
from dchars.utilities.sortingvalue import SortingValue
# known transliterations :
import dchars.languages.fro.transliterations.basic.basic as basictrans
import dchars.languages.fro.transliterations.basic.ucombinations as basictrans_ucombinations
################################################################################
class DStringFRO(DStringMotherClass):
"""
class DStringFRO
DO NOT CREATE A DStringFRO object directly but use instead the
dchars.py::new_dstring function.
"""
# regex pattern used to slice a source string :
#
# NB : we use the default_symbols__pattern() function, NOT the normal
# default_symbols() function since some characters have to be
# treated apart to work with a regex.
pattern_letters = "|".join( SYMB_LOWER_CASE.default_symbols__pattern() + \
SYMB_UPPER_CASE.default_symbols__pattern() + \
SYMB_PUNCTUATION.default_symbols__pattern() )
pattern_diacritics = "|".join( SYMB_DIACRITICS.default_symbols__pattern() )
pattern = re.compile("((?P<letter>{0})(?P<diacritics>({1})+)?)".format( pattern_letters,
pattern_diacritics ))
# transliterations' methods : available direction(s) :
trans__directions = {
"basic" : basictrans.AVAILABLE_DIRECTIONS,
}
# transliteration's functions :
trans__init_from_transliteration = {
"basic" : basictrans.dstring__init_from_translit_str,
}
trans__get_transliteration = {
"basic" : basictrans.dchar__init_from_translit_str,
}
trans__get_transl_ucombinations = {
"basic" : basictrans_ucombinations.get_usefull_combinations,
}
#///////////////////////////////////////////////////////////////////////////
def __init__(self, str_src = None):
"""
DStringFRO.__init__
the three following attributes have been created by the call to
dchars.py::new_dstring() :
self.iso639_3_name : (str)
self.transliteration_method : (str)
self.options : (dict)
"""
DStringMotherClass.__init__(self)
if str_src is not None:
self.init_from_str(str_src)
#///////////////////////////////////////////////////////////////////////////
def get_usefull_combinations(self):
"""
DStringFRO.get_usefull_combinations
Return a DString with all the usefull combinations of characters,
i.e. only the 'interesting' characters (not punctuation if it's too simple
by example). The DChars stored in the dstring will be unique, id est, two
dchars will not have the same appearence (__str__())
NB : this function has nothing to do with linguistic or a strict
approach of the language. This function allows only to get the
most common and/or usefull characters of the writing system.
NB : function required by the dchars-fe project.
"""
self.clear()
dchar = DCharacterFRO(self)
for dchar in dchar.get_usefull_combinations():
already_present = False
for dchar2 in self:
if str(dchar) == str(dchar2):
already_present = True
if not already_present:
self.append( dchar )
return self
#///////////////////////////////////////////////////////////////////////////
def get_usefull_transl_combinations(self):
"""
DStringFRO.get_usefull_transl_combinations
Return a (str)string with all the usefull combinations of TRANSLITTERATED
characters, i.e. only the 'interesting' characters (not punctuation if
it's too simple by example).
NB : this function has nothing to do with linguistic or a strict
approach of the language. This function allows only to get the
most common and/or usefull characters of the writing system.
NB : function required by the dchars-fe project.
"""
# Pylint can't know that <self> has a 'transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringFRO' has no 'transliteration_method' member"
res = DStringFRO.trans__get_transl_ucombinations[self.transliteration_method]()
return res
#///////////////////////////////////////////////////////////////////////////
def get_transliteration(self):
"""
DStringFRO.get_transliteration
We try to use the method defined in self.transliteration_method;
if this attribute doesn't exist, the function use the default method.
"""
# Pylint can't know that <self> has a 'transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringFRO' has no 'transliteration_method' member"
res = []
for dchar in self:
res.append( dchar.get_transliteration(
dstring_object = self,
transliteration_method = self.transliteration_method) )
return "".join( res )
#///////////////////////////////////////////////////////////////////////////
def init_from_str(self, str_src):
"""
DStringFRO.init_from_str
Function called by __init__(), initialize <self> and return
<indexes_of_unrecognized_chars>.
str_src : str
HOW IT WORKS :
* (1) str_src -> (decomposition) unicodedata.normalize('NFD',) = normalized_src
* (2) = normalized_src -> (default symbols required) :
* replace_by_the_default_symbols() -> normalized_src
* (3) initialisation from the recognized characters.
* re.finditer(DStringFRO.pattern) give the symbols{letter+diacritics}
* (3.1) base_char
* (3.2) stress
* (3.3) cedilla
* (3.3) we add the new character
"""
#.......................................................................
# (1) str_src -> (decomposition) unicodedata.normalize('NFD',) = normalized_src
#.......................................................................
normalized_src = unicodedata.normalize('NFD', str_src)
#.......................................................................
# (2) = normalized_src -> (default symbols required) :
# replace_by_the_default_symbols() -> normalized_src
#.......................................................................
normalized_src = SYMB_PUNCTUATION.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_LOWER_CASE.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_UPPER_CASE.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_DIACRITICS.replace_by_the_default_symbols(normalized_src)
#.......................................................................
# (3) initialisation from the recognized characters.
# re.finditer(DStringFRO.pattern) give the symbols{letter+diacritics}
#.......................................................................
indexes = [] # indexes of the substring well analyzed : ( start, end )
for element in re.finditer(DStringFRO.pattern,
normalized_src):
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# we add the unknown characters at the beginning and in the middle
# of the string (see at the end of this function)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if indexes:
# <indexes> isn't empty :
# ... we add the unknown character(s) between the last character and
# the current one :
for index in range( max(indexes[-1])+1, element.start() ):
new_character = DCharacterFRO(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
else:
# <indexes> is empty :
# ... we add the unknown character(s) before the first index in <indexes> :
for index in range( 0, element.start() ):
new_character = DCharacterFRO(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
indexes.append( (element.start(), element.end()-1 ) )
data = element.groupdict()
letter = data['letter']
diacritics = data['diacritics']
punctuation = letter in SYMB_PUNCTUATION.symbol2name
capital_letter = letter in SYMB_UPPER_CASE.symbol2name
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.1) base_char
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if punctuation:
# punctuation symbol :
base_char = SYMB_PUNCTUATION.get_the_name_for_this_symbol(letter)
elif not capital_letter:
# lower case :
base_char = SYMB_LOWER_CASE.get_the_name_for_this_symbol(letter)
else:
# upper case :
base_char = SYMB_UPPER_CASE.get_the_name_for_this_symbol(letter)
stress = 0
cedilla = False
if diacritics is not None:
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.2) stress
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
stress1_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__STRESS1)
stress2_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__STRESS2)
stress12_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__STRESS12)
stress3_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__STRESS3)
if stress1_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), stress1 defined several times."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
if stress2_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), stress2 defined several times."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
if stress12_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), stress12 defined several times."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
if stress3_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), stress3 defined several times."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
if stress1_nbr + stress2_nbr + stress12_nbr + stress3_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), stress1, stress2 and stress12 " \
"simultaneously defined."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
stress = 0
if SYMB_DIACRITICS.are_these_symbols_in_a_string('stress1', diacritics):
stress = 1
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('stress2', diacritics):
stress = 2
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('stress12', diacritics):
stress = 3
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('stress3', diacritics):
stress = 4
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.3) cedilla
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
cedilla_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__CEDILLA)
if cedilla_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), cedilla defined several times."
raise DCharsError( context = "DStringFRO.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()),)
if SYMB_DIACRITICS.are_these_symbols_in_a_string('cedilla', diacritics):
cedilla = True
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.4) we add the new character
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
new_character = DCharacterFRO(dstring_object = self,
unknown_char = False,
base_char = base_char,
punctuation = punctuation,
capital_letter = capital_letter,
cedilla = cedilla,
stress = stress)
self.append( new_character )
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# we add the final unknown characters (see at the beginning of this
# function)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if indexes:
# <element> is the last one and <indexes> isn't empty :
for index in range( max(indexes[-1])+1, len(normalized_src) ):
new_character = DCharacterFRO(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
else:
# <indexes> is empty :
for index in range( 0, len(normalized_src) ):
new_character = DCharacterFRO(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
#///////////////////////////////////////////////////////////////////////////
def init_from_transliteration(self, src):
"""
DStringFRO.init_from_transliteration
src : string
Return <self>
"""
# Pylint can't know that <self> has a 'transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringFRO' has no 'transliteration_method' member"
DStringFRO.trans__init_from_transliteration[self.transliteration_method](
dstring = self,
dcharactertype = DCharacterFRO,
src = src)
return self
#///////////////////////////////////////////////////////////////////////////
def sortingvalue(self):
"""
DStringFRO.sortingvalue
Return a SortingValue object
"""
res = SortingValue()
# Pylint can't know that <self> has an 'options' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringFRO' has no 'options' member"
if self.options["sorting method"] == "default":
# base character :
data = []
for char in self:
sorting_order = -1
if char.base_char in SORTING_ORDER:
sorting_order = SORTING_ORDER[char.base_char]
data.append( ({False:0,
True:1}[char.unknown_char],
sorting_order ))
res.append( data )
else:
# Pylint can't know that <self> has an 'options' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringFRO' has no 'options' member"
err_msg = "unknown sorting method '{0}'."
raise DCharsError( context = "DStringFRO.sortingvalue",
message = err_msg.format(self.options["sorting method"]) )
return res | PypiClean |
/Flask-SQLAlchemy-Booster-0.6.31.tar.gz/Flask-SQLAlchemy-Booster-0.6.31/flask_sqlalchemy_booster/utils.py | from __future__ import absolute_import
from sqlalchemy.ext.associationproxy import (
_AssociationDict, _AssociationList, _AssociationSet)
from sqlalchemy.orm.collections import (
InstrumentedList, MappedCollection)
from sqlalchemy.orm import class_mapper
from toolspy import flatten, all_subclasses, remove_duplicates, boolify
from werkzeug.utils import secure_filename
from datetime import datetime
import uuid
import os
from sqlalchemy.sql import sqltypes
from sqlalchemy import func
import dateutil.parser
from decimal import Decimal
import six
from contextlib import contextmanager
@contextmanager
def session_scope(session_creator):
"""Provide a transactional scope around a series of operations."""
session = session_creator()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def is_list_like(rel_instance):
return (isinstance(rel_instance, list) or isinstance(rel_instance, set)
or isinstance(rel_instance, _AssociationList)
or isinstance(rel_instance, _AssociationSet)
or isinstance(rel_instance, InstrumentedList))
def is_dict_like(rel_instance):
return (isinstance(rel_instance, dict) or isinstance(
rel_instance, _AssociationDict) or isinstance(
rel_instance, MappedCollection))
def all_cols_including_subclasses(model_cls):
return remove_duplicates(
list(class_mapper(model_cls).columns.items()) +
flatten(
[list(class_mapper(subcls).columns.items())
for subcls in all_subclasses(model_cls)]
)
)
def all_rels_including_subclasses(model_cls):
return remove_duplicates(
list(class_mapper(model_cls).relationships.items()) +
flatten(
[list(class_mapper(subcls).relationships.items())
for subcls in all_subclasses(model_cls)]
)
)
def nullify_empty_values_in_dict(d):
for k in d.keys():
if d[k] == '':
d[k] = None
return d
def remove_empty_values_in_dict(d):
for k in d.keys():
if d[k] == '':
del d[k]
return d
def save_file_from_request(_file, location=None):
filename = "{}_{}_{}".format(
datetime.utcnow().strftime("%Y%m%d_%H%M%S%f"),
uuid.uuid4().hex[0:6],
secure_filename(_file.filename))
file_path = os.path.join(location, filename)
_file.save(file_path)
return file_path
def type_coerce_value(column_type, value):
if value is None:
return None
if isinstance(value, six.text_type) or isinstance(value, str):
if value.lower() == 'none' or value.lower() == 'null' or value.strip() == '':
return None
if column_type is sqltypes.Integer:
value = int(value)
elif column_type is sqltypes.Numeric:
value = Decimal(value)
elif column_type is sqltypes.Boolean:
value = boolify(value)
elif column_type is sqltypes.DateTime:
value = dateutil.parser.parse(value)
elif column_type is sqltypes.Date:
value = dateutil.parser.parse(value).date()
return value
def convert_to_proper_types(data, model_class):
columns = getattr(
getattr(model_class, '__mapper__'),
'columns')
for attr_name, value in data.items():
if attr_name in columns:
column_type = type(
columns[attr_name].type)
data[attr_name] = type_coerce_value(column_type, value)
return data
def cast_as_column_type(value, col):
col_type = type(col.type)
return type_coerce_value(col_type, value)
def tz_str(mins):
prefix = "+" if mins >= 0 else "-"
return "%s%02d:%02d" % (prefix, abs(mins) / 60, abs(mins) % 60)
def tz_convert(datetime_col, timedelta_mins):
GMT_TZ_STR = '+00:00'
return func.convert_tz(datetime_col, GMT_TZ_STR, tz_str(timedelta_mins))
def tz_converted_date(datetime_col, timedelta_mins):
return func.date(tz_convert(datetime_col, timedelta_mins))
def get_rel_from_key(parent_class, rel_key):
return next(
r for r in class_mapper(parent_class).relationships
if r.key == rel_key)
def get_rel_class_from_key(parent_class, rel_key):
return get_rel_from_key(parent_class, rel_key).mapper.class_
def attr_is_a_property(klass, attr):
return hasattr(klass, attr) and isinstance(getattr(klass, attr), property) | PypiClean |
/hft_crypto_api-1.0.6.tar.gz/hft_crypto_api-1.0.6/hftcryptoapi/bitmart/data/constants.py |
from enum import Enum, EnumMeta, unique
class MetaEnum(EnumMeta):
def __contains__(cls, item):
try:
cls(item)
except ValueError:
return False
return True
""" Base urls for API endpoints """
# Domain constants
API_URL = 'https://api-cloud.bitmart.com'
WS_URL = 'wss://ws-manager-compress.bitmart.com/api?protocol=1.1'
WS_URL_USER = 'wss://ws-manager-compress.bitmart.com/user?protocol=1.1'
CONTRACT_WS_URL = 'wss://openapi-ws.bitmart.com/api?protocol=1.1'
CONTRACT_WS_URL_USER = 'wss://openapi-ws.bitmart.com/user?protocol=1.1'
# http headers
CONTENT_TYPE = 'Content-Type'
USER_AGENT = 'User-Agent'
X_BM_KEY = 'X-BM-KEY'
X_BM_SIGN = 'X-BM-SIGN'
X_BM_TIMESTAMP = 'X-BM-TIMESTAMP'
APPLICATION_JSON = 'application/json'
GET = "GET"
POST = "POST"
DELETE = "DELETE"
VERSION = "0.1"
# connection timeout, read timeout
TIMEOUT = (5, 10)
# 1 Spot Market API
# System Status
SYSTEM_TIME = "/system/time"
SERVICE_STATUS = "/system/service"
# Public Market Data
SPOT_CURRENCY_LIST = "/spot/v1/currencies"
SPOT_TRADING_PAIRS_LIST = "/spot/v1/symbols"
SPOT_TRADING_PAIRS_DETAILS = "/spot/v1/symbols/details"
SPOT_TICKER = "/spot/v2/ticker"
SPOT_TICKER_DETAILS = "/spot/v1/ticker_detail"
SPOT_K_LIE_STEP = "/spot/v1/steps"
SPOT_K_LINE = "/spot/v1/symbols/kline"
SPOT_BOOK_DEPTH = "/spot/v1/symbols/book"
SPOT_RECENT_TRADES = "/spot/v1/symbols/trades"
# Sub-Account Data
MAIN_ACCOUNT_SPOT_ASSET = "/account/sub-account/main/v1/sub-to-main"
SUB_ACCOUNT_SPOT_ASSET_TRANSFER = "/account/sub-account/sub/v1/sub-to-main"
MAIN_ACCOUNT_SPOT_ASSET_TRANSFER = "/account/sub-account/main/v1/main-to-sub"
SUB_ACCOUNT_SUB2SUB_SPOT_ASSET_TRANSFER = "/account/sub-account/sub/v1/sub-to-sub"
MAIN_ACCOUNT_SUB2SUB_SPOT_ASSET_TRANSFER = "/account/sub-account/main/v1/sub-to-sub"
MAIN_ACCOUNT_TRANSFER_LIST = "/account/sub-account/main/v1/transfer-list"
ACCOUNT_TRANSFER_HISTORY = "/account/sub-account/v1/transfer-history"
SUB_ACCOUNT_BALANCE = "/account/sub-account/main/v1/wallet"
ALL_SUB_ACCOUNTS_LIST = "/account/sub-account/main/v1/subaccount-list"
## Funding Account Data
ACCOUNT_BALANCE = "/account/v1/wallet"
ACOUNT_ALL_CURRENCIES = "/account/v1/currencies"
ACCOUNT_SPOT_BALANCE = "/spot/v1/wallet"
ACCOUNT_DEPOSIT_ADDRESS = "/account/v1/deposit/address"
ACCOUNT_WITHDRAW_QUOTA = "/account/v1/withdraw/charge"
ACCOUNT_WITHDRAW = "/account/v1/withdraw/apply"
ACCOUNT_WITHDRAW_DEPOSIT_HISTORY = "/account/v2/deposit-withdraw/history"
ACCOUNT_DEPOSIT_WITHDRAW_DETAILS = "/account/v1/deposit-withdraw/detail"
ACCOUNT_MARGIN_DETAILS = "/spot/v1/margin/isolated/account"
ACCOUNT_MARGIN_ASSET_TRANSFER = "/spot/v1/margin/isolated/transfer"
ACCOUNT_USER_FEE = "/spot/v1/user_fee"
ACCOUNT_TRADE_FEE = "/spot/v1/trade_fee"
## Spot /Margin Trading
SPOT_PLACE_ORDER = "/spot/v2/submit_order"
SPOT_MARGIN_PLACE_ORDER = "/spot/v1/margin/submit_order"
SPOT_BATCH_ORDER = "/spot/v2/batch_orders"
SPOT_CANCEL_ORDER = "/spot/v3/cancel_order"
SPOT_CANCEL_ALL_ORDERS = "/spot/v1/cancel_orders"
SPOT_GET_ORDER_DETAILS = "/spot/v2/order_detail"
SPOT_USER_ORDER_HISTORY = "/spot/v3/orders"
SPOT_USER_TRADE_HISTORY = "/spot/v2/trades"
## Margin Loan
MARGIN_BORROW = "/spot/v1/margin/isolated/borrow"
MARING_REPAY = "/spot/v1/margin/isolated/repay"
MARGIN_BORROW_RECORD = "/spot/v1/margin/isolated/borrow_record"
MARING_REPAYMENT_RECORD = "/spot/v1/margin/isolated/repay_record"
MARGIN_TRADING_PAIR_BORROW_RATE_AND_AMOUNT = "/spot/v1/margin/isolated/pairs"
# 2 USD-M Futures Market API
# Futures Market Data
FUTURES_CONTRACT_DETAILS = "/contract/public/details"
FUTURES_MARKET_DEPTH = "/contract/public/depth"
FUTURES_OPEN_INTEREST = "/contract/public/open-interest"
FUTURES_FUNDING_RATE = "/contract/public/funding-rate"
FUTURES_K_LINE = "/contract/public/kline"
## Futures Account Data
FUTURES_CONTRACT_ASSETS_DETAIL = "/contract/private/assets-detail"
## Futures Trading
FUTURES_ORDER_DETAIL = "/contract/private/order"
FUTURES_ORDER_HISTORY = "/contract/private/order-history"
FUTURES_CURRENT_POSITION = "/contract/private/position"
FUTURES_TRADE_DETAIL = "/contract/private/trades"
FUTURES_SUBMIT_ORDER = "/contract/private/submit-order"
FUTURES_CANCEL_ORDER = "/contract/private/cancel-order"
FUTURES_CANCEL_ALL_ORDERS = "/contract/private/cancel-orders"
@unique
class Auth(int, Enum):
NONE = 1
KEYED = 2
SIGNED = 3
class Sort(str, Enum):
ASC = "asc"
DESC = "desc"
@unique
class Exchange(int, Enum):
BITMART = 1
BINANCE = 2
HUOBI = 3
@unique
class ServiceStatus(int, Enum):
WAITING = 0
WORKING = 1
COMPLETED = 2
@unique
class TimeFrame(int, Enum):
tf_1m = 1
tf_5m = 5
tf_15m = 15
tf_30m = 30
tf_1h = 60
tf_2h = 120
tf_4h = 240
tf_1d = 60*24
tf_1w = 60*24*7
@unique
class Market(str, Enum):
SPOT = "spot"
FUTURES = "futures"
SPOT_MARGIN = "margin"
@unique
class OrderMode(str, Enum):
SPOT = "spot"
ISOLATED_MARGIN = "iso_margin"
@unique
class OrderType(str, Enum):
LIMIT = "limit"
MARKET = "market"
LIMIT_MAKER = "limit_maker" # only for spot market
IOC = "ioc" # only for spot market
@unique
class SpotSide(str, Enum):
BUY = "buy"
SELL = "sell"
@unique
class Position(int, Enum):
LONG = 1
SHORT = 2
@unique
class FuturesSide(int, Enum, metaclass=MetaEnum):
BUY_OPEN_LONG = 1
BUY_CLOSE_SHORT = 2
SELL_CLOSE_LONG = 3
SELL_OPEN_SHORT = 4
@unique
class OrderOpenType(str, Enum):
ISOLATED = "isolated"
CROSS = "cross"
@unique
class OrderState(int, Enum):
STATUS_CHECK = 2
ORDER_SUCCESS = 4
PARTIALLY_FILLED = 5
FULLY_FILLED = 6
CANCELLED = 8
OUTSTANDING = 9
MIX_6_8_11 = 10
PARTIALLY_FILLED_AND_CANCELED = 11
@unique
class ExecType(str, Enum):
MAKER = "M"
TAKER = "T"
@unique
class TradeOrderType(int, Enum):
REGULAR = 0
MAKER_ONLY = 1
FILL_OR_KILL = 2
IMMEDIATE_OR_CANCEL = 3
@unique
class WayType(int, Enum):
ASK = 1
BID = 2
@unique
class FuturesContractType(int, Enum):
PERPETUAL = 1
FUTURES = 2
@unique
class BtWebSocket(str, Enum):
"""Base urls for websocket endpoints"""
PUBLIC = "wss://ws-manager-compress.bitmart.com/api?protocol=1.1"
PRIVATE = "wss://ws-manager-compress.bitmart.com/user?protocol=1.1"
@unique
class BtSocketOperation(str, Enum, metaclass=MetaEnum):
"""Base operation data for Bitmart websockets"""
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
LOGIN = "login"
@unique
class BtSpotSocketKlineChannels(str, Enum):
"""Base websocket channels for Bitmart Spot Klines"""
K_LINE_CHANNEL_1MIN = "spot/kline1m"
K_LINE_CHANNEL_3MIN = "spot/kline3m"
K_LINE_CHANNEL_5MIN = "spot/kline5m"
K_LINE_CHANNEL_15MIN = "spot/kline15m"
K_LINE_CHANNEL_30MIN = "spot/kline30m"
K_LINE_CHANNEL_1HOUR = "spot/kline1H"
K_LINE_CHANNEL_2HOURS = "spot/kline2H"
K_LINE_CHANNEL_4HOURS = "spot/kline4H"
K_LINE_CHANNEL_1DAY = "spot/kline1D"
K_LINE_CHANNEL_1WEEK = "spot/kline1W"
K_LINE_CHANNEL_1MONTH = "spot/kline1M"
@unique
class BtFuturesSocketKlineChannels(str, Enum):
K_LINE_CHANNEL_1MIN = "futures/klineBin1m"
K_LINE_CHANNEL_5MIN = "futures/klineBin5m"
K_LINE_CHANNEL_15MIN = "futures/klineBin15m"
K_LINE_CHANNEL_30MIN = "futures/klineBin30m"
K_LINE_CHANNEL_1HOUR = "futures/klineBin1h"
K_LINE_CHANNEL_2HOURS = "futures/klineBin2h"
K_LINE_CHANNEL_4HOURS = "futures/klineBin4h"
K_LINE_CHANNEL_1DAY = "futures/klineBin1d"
K_LINE_CHANNEL_1WEEK = "futures/klineBin1w"
@unique
class BtSpotSocketDepthChannels(str, Enum):
DEPTH_CHANNEL_5LEVEL = "spot/depth5"
DEPTH_CHANNEL_20LEVEL = "spot/dept20"
DEPTH_CHANNEL_50LEVEL = "spot/depth50"
@unique
class BtFuturesSocketDepthChannels(str, Enum):
DEPTH_CHANNEL_5LEVEL = "futures/depth5"
DEPTH_CHANNEL_20LEVEL = "futures/dept20"
DEPTH_CHANNEL_50LEVEL = "futures/depth50"
BtSpotTickerChannel = "spot/ticker"
BtSpotTradeChannel = "spot/trade"
BtSpotOrderChannel = "spot/user/order"
BtFuturesTickerChannel = "futures/ticker"
BtFuturesTPrivatePositionChannel = "futures/position"
BtFuturesTPrivateAssetChannel = "futures/asset" | PypiClean |
/csst_feh-0.0.4.tar.gz/csst_feh-0.0.4/CSST_feh/dwarf_feh.py | def dwarf_feh(u,g,i,error):
'''
This program is designed specially to estimate the metallicity of the dwarf stars
from the CSST filter systems using metallicity-depedent stellar loci of (u-g) versus
(g-i), the metallicity can be derived with the maximum likelihood approach in the meanwhile.
Args:
u: array-like, shape (n, )
CSST u band
g: array-like, shape (n, )
CSST g band
i: array-like, shape (n, )
CSST i band
error: float
color error. An assumption that (u-g) and (g-i) are independent Gaussian variables
is made.
The output are two files named dwarf_feh_predicted.csv and dwarf_feh_error.csv,
the former stores the photometric metallicity and the latter stores the random error
of photometric metallicity.
'''
import numpy as np
import pandas as pd
import glob
import os
xdata=g-i
ydata=u-g
#First step: refuse data beyond the applicability range
m=[]
n=[]
a,b=0.66,1.24 #a,b denote the lower and upper limit of given (g-i), respectively
ind=np.where((xdata>a)&(xdata<b))
xdata=xdata[ind]
ydata=ydata[ind]
for i in np.arange(0,len(xdata)):
x=xdata[i]
y=ydata[i]
m.append(x) # m is a list to store (g-i) data
n.append(y) # n is a list to store (u-g) data
np.savetxt("dwarf_g-i.csv",m)
np.savetxt("dwarf_u-g.csv",n)
xdata,ydata=g-i,u-g
m=[]
n=[]
a,b=0.39,0.66
ind=np.where((xdata>a)&(xdata<b))
xdata=xdata[ind]
ydata=ydata[ind]
c=-1.5*np.ones(len(xdata)) # c is [Fe/H]=-1.5 contour
a00= 1.04509737 # ten polynmial coefficients
a01= 0.13847731
a02= 0.02231436
a03=-0.00305052
a10=-0.84510066
a11= 0.18719969
a12=-0.00678258
a20= 2.57309411
a21=-0.0508573
a30=-0.92393672
need=a00+a01*c+a02*c**2+a03*c**3+a10*xdata+a11*xdata*c+a12*xdata*c**2\
+a20*xdata**2+a21*xdata**2*c+a30*xdata**3 # choose data above [Fe/H]= -1.5 contour when 0.39<(g-i)<0.66
ind=np.where(ydata>=need)
xdata=xdata[ind]
ydata=ydata[ind]
for i in np.arange(0,len(xdata)):
x=xdata[i]
y=ydata[i]
m.append(x)
n.append(y)
np.savetxt("dwarf1_g-i.csv",m)
np.savetxt("dwarf1_u-g.csv",n)
xdata,ydata=g-i,u-g
m=[]
n=[]
a,b=0.26,0.39
ind=np.where((xdata>a)&(xdata<b))
xdata=xdata[ind]
ydata=ydata[ind]
c=-1*np.ones(len(xdata)) # c is [Fe/H]=-1 contour
need=a00+a01*c+a02*c**2+a03*c**3+a10*xdata+a11*xdata*c+a12*xdata*c**2\
+a20*xdata**2+a21*xdata**2*c+a30*xdata**3
ind=np.where(ydata>=need) # choose data that above [Fe/H]=-1 contour when 0.26<(g-i)<0.39
xdata=xdata[ind]
ydata=ydata[ind]
for i in np.arange(0,len(xdata)):
x=xdata[i]
y=ydata[i]
m.append(x)
n.append(y)
np.savetxt("dwarf2_g-i.csv",m)
np.savetxt("dwarf2_u-g.csv",n)
csv_list=glob.glob('*g-i.csv')
for i in csv_list:
fr=open(i,'r',encoding='utf-8').read()
with open('g-i_use.csv','a',encoding='utf-8') as f:
f.write(fr)
csv_list=glob.glob('*u-g.csv')
for i in csv_list:
fr=open(i,'r',encoding='utf-8').read()
with open('u-g_use.csv','a',encoding='utf-8') as f:
f.write(fr)
#Second step: predict [Fe/H] with derived polynomial
m=[]
n=[]
xdata=np.loadtxt("g-i_use.csv",delimiter=',')
ydata=np.loadtxt("u-g_use.csv",delimiter=',')
for i in np.arange(0,len(xdata)):
x1=xdata[i] # x1 denotes (g-i)
y1=ydata[i] # y1 denotes (u-g)
if (x1>0.66):
f1=np.linspace(-4,1,101) # given [Fe/H]
x10=x1+error*np.random.randn(101) #g,i are both Gaussian variables
y10=y1+error*np.random.randn(101) #u,g are both Gaussian variables
need=a00+a01*f1+a02*f1**2+a03*f1**3+a10*x10+a11*x10*f1+a12*x10*f1**2\
+a20*x10**2+a21*x10**2*f1+a30*x10**3
sigma1=0.013062754676023238-0.002093386575314095* x1 #given (g-i) random error
sigma2=0.02765716484703738-0.0019499350415479824 *y1 #given (u-g) random error
likelihood=((2*np.pi)**0.5*sigma2)**(-1)*(np.e)**(-((y10-need)**2)/(2*sigma2**2))
f=np.argmax(likelihood)
m.append(f1[f]) # m is a list to store [Fe/H]
sigma_feh=((sigma2**2-sigma1**2*(a10+a11*f1[f]+a12*(f1[f])**2+\
2*a20*x1+2*a21*x1*f1[f]+3*a30*x1**2)**2)/(a01+2*a02*f1[f]\
+3*a03*(f1[f])**2+a11*x1+2*a12*x1*f1[f]+a21*x1**2))**0.5
np.seterr(divide='ignore',invalid='ignore')
n.append(sigma_feh) # n is a list to store the tandom error of [Fe/H]
elif (0.39<=x1<=0.66):
f1=np.linspace(-1.5,1,51)
x10=x1+error*np.random.randn(51)
y10=y1+error*np.random.randn(51)
need=a00+a01*f1+a02*f1**2+a03*f1**3+a10*x10+a11*x10*f1+a12*x10*f1**2\
+a20*x10**2+a21*x10**2*f1+a30*x10**3
sigma1=0.013062754676023238-0.002093386575314095 *x1
sigma2=0.02765716484703738-0.0019499350415479824 *y1
likelihood=((2*np.pi)**0.5*sigma2)**(-1)*(np.e)**(-((y10-need)**2)/(2*sigma2**2))
f=np.argmax(likelihood)
m.append(f1[f])
sigma_feh=((sigma2**2-sigma1**2*(a10+a11*f1[f]+a12*(f1[f])**2+\
2*a20*x1+2*a21*x1*f1[f]+3*a30*x1**2)**2)/(a01+2*a02*f1[f]\
+3*a03*(f1[f])**2+a11*x1+2*a12*x1*f1[f]+a21*x1**2))**0.5
np.seterr(divide='ignore',invalid='ignore')
n.append(sigma_feh)
else:
f1=np.linspace(-1,1,41)
x10=x1+error*np.random.randn(41)
y10=y1+error*np.random.randn(41)
need=a00+a01*f1+a02*f1**2+a03*f1**3+a10*x10+a11*x10*f1+a12*x10*f1**2\
+a20*x10**2+a21*x10**2*f1+a30*x10**3
sigma1=0.013062754676023238-0.002093386575314095 *x1
sigma2=0.02765716484703738-0.0019499350415479824 *y1
likelihood=((2*np.pi)**0.5*sigma2)**(-1)*(np.e)**(-((y10-need)**2)/(2*sigma2**2))
f=np.argmax(likelihood)
m.append(f1[f])
sigma_feh=((sigma2**2-sigma1**2*(a10+a11*f1[f]+a12*(f1[f])**2+\
2*a20*x1+2*a21*x1*f1[f]+3*a30*x1**2)**2)/(a01+2*a02*f1[f]\
+3*a03*(f1[f])**2+a11*x1+2*a12*x1*f1[f]+a21*x1**2))**0.5
np.seterr(divide='ignore',invalid='ignore')
n.append(sigma_feh)
np.savetxt("dwarf_feh_estimated.csv",m)
np.savetxt("dwarf_feh_error.csv",n)
#Last step: output files and delete intermediate files
e1=pd.read_csv('dwarf_feh_estimated.csv')
e2=pd.read_csv('dwarf_feh_error.csv')
file=[e1,e2]
data=pd.concat(file,axis=1)
data.to_csv("dwarf_feh_predicted.csv",index=0,sep=',')
os.remove("dwarf_u-g.csv")
os.remove("dwarf_g-i.csv")
os.remove("dwarf1_u-g.csv")
os.remove("dwarf1_g-i.csv")
os.remove("dwarf2_u-g.csv")
os.remove("dwarf2_g-i.csv")
os.remove("u-g_use.csv")
os.remove("g-i_use.csv")
os.remove("dwarf_feh_estimated.csv")
os.remove("dwarf_feh_error.csv") | PypiClean |
/LalaTools-0.0.11-py3-none-any.whl/lalatools/manage_file_system/file_system.py | import os
def del_head(path):
"""
Removes the directory part of a path, returning only the filename.
:param path: A string representing a path, or a list of strings representing multiple paths.
:return: If a string was given as input, returns a string representing the filename.
If a list was given as input, returns a list of strings representing the filenames.
:raises TypeError: If the input is not a string or a list.
"""
if isinstance(path, str):
re_path = os.path.basename(path)
return re_path
elif isinstance(path, list):
re_path = [os.path.basename(_path) for _path in path]
return re_path
else:
raise TypeError(f"Unexpected type {type(path)}. Expected 'list' or 'str'.")
def replace_split_mark(path, mark="/"):
"""
Replaces the directory separators in a path with a specified character.
:param path: A string representing a path, or a list of strings representing multiple paths.
:param mark: The character to use as the new directory separator.
:return: If a string was given as input, returns a string representing the path with the new directory separators.
If a list was given as input, returns a list of strings representing the paths with the new directory separators.
:raises TypeError: If the input is not a string or a list.
"""
if isinstance(path, str):
re_path = path.replace("\\\\", "\\").replace("\\", "/").replace("/", mark)
return re_path
elif isinstance(path, list):
re_path = [filepath.replace("\\\\", "\\").replace("\\", "/").replace("/", mark) for filepath in path]
return re_path
else:
raise TypeError(f"Unexpected type {type(path)}. Expected 'list' or 'str'.")
def get_file_list(path, is_dir=True, is_file=True, abs_path=None, all_files=False, extension="", contain="", mark="/"):
"""
Get a list of files and/or directories from the specified path.
:param path: The directory path from where to get the list.
:param is_dir: If True, include directories in the list.
:param is_file: If True, include files in the list.
:param abs_path: If True, include absolute path. If False, include only file/directory names.
:param all_files: If True, include all files/directories recursively from the path.
:param extension: If specified, include only files with this extension.
:param contain: If specified, include only files/directories containing this string.
:param mark: The character to use as directory separator. Default is "/".
:return: A list of files and/or directories.
"""
onlyfiles = []
onlydirs = []
if extension and extension[0]!=".":
extension='.'+extension
if all_files:
if abs_path is None:
abs_path=True
if is_file:
onlyfiles = [os.path.join(cur_dir, file) for cur_dir, dirs, files in os.walk(path)
for file in files if file.endswith(extension) and contain in file]
if is_dir and not extension:
onlydirs = [os.path.join(cur_dir, dir) for cur_dir, dirs, files in os.walk(path)
for dir in dirs if contain in dir]
else:
if abs_path is None:
abs_path=False
if is_file:
onlyfiles = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f)) and f.endswith(extension) and contain in f]
if is_dir and not extension:
onlydirs = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isdir(os.path.join(path, f)) and contain in f]
file_list = onlyfiles + onlydirs
file_list = replace_split_mark(file_list, mark=mark)
if not abs_path:
file_list = [os.path.basename(_path) for _path in file_list]
file_list.sort()
return file_list | PypiClean |
/GenRS-0.0.5.tar.gz/GenRS-0.0.5/README.md | # GenRS
A generative learning-based Framework for Recommendation Systems algorithms
# Software requirements:
- Python 3.6 or higher
- Tensorflow 1.15 GPU
- Numpy 1.17
- Scipy 1.3
- Pandas 0.25
- Bottleneck 1.3
# Algorithms list available:
- [CFGAN](https://dl.acm.org/doi/pdf/10.1145/3269206.3271743?casa_token=INiS3p2UrDAAAAAA:EQRrS7IBusVt_F8IYiAUtsIGGHHd17ki69QEcNkJwFkq5PuiBvX96OzC8CVcoWEkqhckTg8X7f4)
- [IRGAN](https://arxiv.org/pdf/1705.10513.pdf)
- [EASE](https://arxiv.org/pdf/1905.03375.pdf)
- [VAE](https://arxiv.org/pdf/1802.05814)
- [DAE](https://arxiv.org/pdf/1802.05814)
## TO DO:
- Check which dataset you want to use from [here](https://drive.google.com/drive/u/2/folders/1mX0QMJ8kHTlU-ziK95SWvZb0ehjl5FWb)
- Download and extract the preferred
### Set the framework configuration
- Download cfg.JSON file from https://github.com/cedo1995/GenRS/tree/master/Cfg
- Check if **path** contains the path to your chosen dataset file
- Check separator (**sep**) used in selected dataset and update it if necessary
- Check **algos** you want to compute respecting the list of string *lowercase* format as predefined
- Define the number of *users* to use as *validation* and *test set* through **heldout_us_val** and **heldout_us_test** param
- Check **metrics** you want to compute from: ["precision@k", "recall@k", "ndcg@k", "ap@k", "auc"]
### Set the algorithms configuration
- Download {*alg*}_cfg.JSON where {alg} correspond to name of algos previously set in **algos** parameter in cfg.JSON file
### Import RecSys module by:
from GenRS.Main.rec_sys import RecSys
### Define the path to cfg.JSON file and {alg}_cfg.JSON files
### Execute
RecSys(path_frm_cfg, list_algs_cfg_path)
### Results will be into *console.log.txt* file
| PypiClean |
/HEBO-0.3.2-py3-none-any.whl/hebo/optimizers/hebo_embedding.py |
# This program is free software; you can redistribute it and/or modify it under
# the terms of the MIT license.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
import numpy as np
import pandas as pd
import torch
from torch.quasirandom import SobolEngine
from sklearn.preprocessing import power_transform
from hebo.design_space.design_space import DesignSpace
from hebo.design_space.numeric_param import NumericPara
from hebo.models.model_factory import get_model
from hebo.acquisitions.acq import Acquisition, MACE, Mean, Sigma
from hebo.acq_optimizers.evolution_optimizer import EvolutionOpt
from .abstract_optimizer import AbstractOptimizer
from .hebo import HEBO
torch.set_num_threads(min(1, torch.get_num_threads()))
def gen_emb_space(eff_dim : int, scale : float) -> DesignSpace:
scale = -1 * scale if scale < 0 else scale
space = DesignSpace().parse([{'name' : f'y{i}', 'type' : 'num', 'lb' : -1 * scale, 'ub' : scale} for i in range(eff_dim)])
return space
def check_design_space(space : DesignSpace) -> bool:
"""
All parameters should be continuous parameters and the range should be [-1, 1]
"""
for k, v in space.paras.items():
if not isinstance(v, NumericPara):
return False
lb = space.opt_lb
ub = space.opt_ub
if not (lb + torch.ones(space.num_paras)).abs().sum() < 1e-6:
return False
if not (ub - torch.ones(space.num_paras)).abs().sum() < 1e-6:
return False
return True
def gen_proj_matrix(eff_dim : int, dim : int, strategy : str = 'alebo'):
if strategy == 'hesbo':
matrix = np.zeros((eff_dim,dim))
for i in range(dim):
sig = np.random.choice([-1,1])
idx = np.random.choice(eff_dim)
matrix[idx,i] = sig * 1.0
else:
matrix = np.random.randn(eff_dim, dim)
if strategy == 'alebo':
matrix = matrix / np.sqrt((matrix**2).sum(axis = 0))
return matrix
def gen_mace_cls(proj_matrix):
class MACE_Embedding(Acquisition):
def __init__(self, model, best_y, **conf):
super().__init__(model, **conf)
self.mace = MACE(model, best_y, **conf)
self.proj_matrix = torch.FloatTensor(proj_matrix)
@property
def num_constr(self):
return 1
@property
def num_obj(self):
return 3
def eval(self, x : torch.FloatTensor, xe : torch.LongTensor) -> torch.FloatTensor:
assert xe is None or xe.shape[1] == 0
mace_acq = self.mace(x, xe)
x_orig = torch.mm(x, self.proj_matrix)
bound_vio = (x_orig.abs() - 1.0).clamp(min = 0.).sum(axis = 1).view(-1, 1)
return torch.cat([mace_acq, bound_vio], dim = 1)
return MACE_Embedding
class HEBO_Embedding(AbstractOptimizer):
support_parallel_opt = True
support_combinatorial = False
support_contextual = False
def __init__(self,
space : DesignSpace,
model_name = 'gpy',
eff_dim : int = 1,
scale : float = 1,
strategy : str = 'alebo',
clip : bool = False,
rand_sample = None):
super().__init__(space)
assert check_design_space(space)
self.space = space
self.scale = scale
self.eff_dim = eff_dim
self.proj_matrix = gen_proj_matrix(eff_dim, space.num_paras, strategy)
self.eff_space = gen_emb_space(eff_dim, scale)
self.clip = clip
self.acq_cls = MACE if self.clip else gen_mace_cls(self.proj_matrix) # If we use
self.mace = HEBO(self.eff_space, model_name, rand_sample, acq_cls = self.acq_cls)
self.mace.quasi_sample = self.quasi_sample
def quasi_sample(self, n, fix_input = None, factor = 16):
assert fix_input is None
if self.clip:
return self.eff_space.sample(n)
B = torch.FloatTensor(self.proj_matrix)
L = torch.cholesky(B.mm(B.t()))
samp = pd.DataFrame(columns = self.eff_space.numeric_names)
while samp.shape[0] < n:
samp_hd = self.space.sample(100)
alpha = B.mm(torch.FloatTensor(samp_hd.values.T))
samp_ld = pd.DataFrame(factor * torch.cholesky_solve(alpha, L).t().numpy(), columns = samp.columns)
samp_pj = self.project(samp_ld)
samp_ld = samp_ld[samp_pj.max(axis = 1) <= 1.0]
samp_ld = samp_ld[samp_pj.min(axis = 1) >= -1.0]
if samp_ld.shape[0] == samp_hd.shape[0]:
factor /= 0.8
continue
elif samp_ld.shape[0] == 0:
factor *= 0.8
samp = samp.append(samp_ld, ignore_index = True)
return samp.head(n)
def project(self, df_x_ld : pd.DataFrame) -> pd.DataFrame:
x = df_x_ld[self.eff_space.numeric_names].values
x_hd = np.matmul(x, self.proj_matrix)
if self.clip:
x_hd = np.tanh(x_hd)
return pd.DataFrame(x_hd, columns = self.space.numeric_names)
def suggest(self, n_suggestions : int = 1):
df_suggest = self.mace.suggest(n_suggestions)
return df_suggest
def observe(self, X, y):
self.mace.observe(X, y)
@property
def best_x(self)->pd.DataFrame:
return self.mace.best_x
@property
def best_y(self)->float:
return self.mace.best_y | PypiClean |
/Direct_Downloader-0.7-py3-none-any.whl/Direct_Downloader-0.7.data/scripts/direct_downloader.py |
# 3rd Party Modules
import requests
# Python Modules
import os
import re
from threading import Thread, active_count
from queue import Queue
import shutil
import logging
from time import sleep
""" Setting up logging """
LOG_FORMAT = "[%(levelname)s] %(asctime)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
class Download_Manager():
def __init__(self, list_of_urls: list, threads: int, directory_path: str):
# Setting up queue
self.queue = Queue()
# Number of threads
self.number_of_threads = threads
# Directory downloading to
self.directory_path = directory_path
# Putting all urls into the queue
for url in list_of_urls:
# If url is blank, continue
if url == '' or url == ' ':
continue
self.queue.put(url)
def start(self):
""" Start the threads to download urls within the queue """
# Setting up thread
self.workers = [Download_Worker(self.queue, self.directory_path)
for pos in range(self.number_of_threads)]
# While the queue is not empty and the amount of threads are only 1
# NOTE: When all threads are done, there should only be the main thread
while not self.queue.empty() or active_count() > 1:
# logging.debug('QUEUE: ' + str(self.queue.qsize()))
sleep(0.1)
class Download_Worker():
def __init__(self, queue, directory_path):
# Init Queue
self.queue = queue
# Path to download to
if directory_path[-1:] != '/' or directory_path[-1:] != '\\':
self.directory_path = directory_path + '/'
else:
self.directory_path = directory_path
# Init Thread
self.thread = Thread(target=self.download, daemon=True, args=())
self.thread.start()
def delete_file(self, path: str):
# Delete path if exists
if os.path.exists(path):
os.remove(path)
def get_file_name(self, path: str):
# The name of the file will be extracted from the url.
file_name_start_pos = path.rfind('/') + 1
file_name = path[file_name_start_pos:]
return file_name
def download(self):
""" Downloads a url that is stored within the queue variable """
while not self.queue.empty():
try:
# Store the url to use
url = self.queue.get()
file_name = self.get_file_name(url)
# If a file within the directory exists, skip the file
if os.path.exists(self.directory_path + file_name):
logging.debug('Skipping: ' + url)
continue
# if self.queue.empty():
# return
# else:
# continue
# Attempt connection to url
req = requests.get(url, stream=True)
# If could not finish download alert user and skip
if req.status_code != 200:
logging.debug('Could not download:' + url)
continue
# Start storing the contents of the url within a file.
logging.info('Downloading: ' + url)
with open(self.directory_path + file_name, 'wb') as current_file:
req.raw.decode_content = True
shutil.copyfileobj(req.raw, current_file)
# print('\n' + url, '- Done.')
except Exception as e:
# If an error occured during downloading,
# then delete the incomplete file
logging.debug('ERROR DOWNLOADING: ' + str(e))
self.delete_file(self.directory_path + file_name) | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/summernote/lang/summernote-mn-MN.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(self, function() {
return /******/ (() => { // webpackBootstrap
var __webpack_exports__ = {};
// Starsoft Mongolia LLC Temuujin Ariunbold
(function ($) {
$.extend($.summernote.lang, {
'mn-MN': {
font: {
bold: 'Тод',
italic: 'Налуу',
underline: 'Доогуур зураас',
clear: 'Цэвэрлэх',
height: 'Өндөр',
name: 'Фонт',
superscript: 'Дээд илтгэгч',
subscript: 'Доод илтгэгч',
strikethrough: 'Дарах',
size: 'Хэмжээ'
},
image: {
image: 'Зураг',
insert: 'Оруулах',
resizeFull: 'Хэмжээ бүтэн',
resizeHalf: 'Хэмжээ 1/2',
resizeQuarter: 'Хэмжээ 1/4',
floatLeft: 'Зүүн талд байрлуулах',
floatRight: 'Баруун талд байрлуулах',
floatNone: 'Анхдагч байрлалд аваачих',
shapeRounded: 'Хүрээ: Дугуй',
shapeCircle: 'Хүрээ: Тойрог',
shapeThumbnail: 'Хүрээ: Хураангуй',
shapeNone: 'Хүрээгүй',
dragImageHere: 'Зургийг энд чирч авчирна уу',
dropImage: 'Drop image or Text',
selectFromFiles: 'Файлуудаас сонгоно уу',
maximumFileSize: 'Файлын дээд хэмжээ',
maximumFileSizeError: 'Файлын дээд хэмжээ хэтэрсэн',
url: 'Зургийн URL',
remove: 'Зургийг устгах',
original: 'Original'
},
video: {
video: 'Видео',
videoLink: 'Видео холбоос',
insert: 'Видео оруулах',
url: 'Видео URL?',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion болон Youku)'
},
link: {
link: 'Холбоос',
insert: 'Холбоос оруулах',
unlink: 'Холбоос арилгах',
edit: 'Засварлах',
textToDisplay: 'Харуулах бичвэр',
url: 'Энэ холбоос хаашаа очих вэ?',
openInNewWindow: 'Шинэ цонхонд нээх'
},
table: {
table: 'Хүснэгт',
addRowAbove: 'Add row above',
addRowBelow: 'Add row below',
addColLeft: 'Add column left',
addColRight: 'Add column right',
delRow: 'Delete row',
delCol: 'Delete column',
delTable: 'Delete table'
},
hr: {
insert: 'Хэвтээ шугам оруулах'
},
style: {
style: 'Хэв маяг',
p: 'p',
blockquote: 'Иш татах',
pre: 'Эх сурвалж',
h1: 'Гарчиг 1',
h2: 'Гарчиг 2',
h3: 'Гарчиг 3',
h4: 'Гарчиг 4',
h5: 'Гарчиг 5',
h6: 'Гарчиг 6'
},
lists: {
unordered: 'Эрэмбэлэгдээгүй',
ordered: 'Эрэмбэлэгдсэн'
},
options: {
help: 'Тусламж',
fullscreen: 'Дэлгэцийг дүүргэх',
codeview: 'HTML-Code харуулах'
},
paragraph: {
paragraph: 'Хэсэг',
outdent: 'Догол мөр хасах',
indent: 'Догол мөр нэмэх',
left: 'Зүүн тийш эгнүүлэх',
center: 'Төвд эгнүүлэх',
right: 'Баруун тийш эгнүүлэх',
justify: 'Мөрийг тэгшлэх'
},
color: {
recent: 'Сүүлд хэрэглэсэн өнгө',
more: 'Өөр өнгөнүүд',
background: 'Дэвсгэр өнгө',
foreground: 'Үсгийн өнгө',
transparent: 'Тунгалаг',
setTransparent: 'Тунгалаг болгох',
reset: 'Анхдагч өнгөөр тохируулах',
resetToDefault: 'Хэвд нь оруулах'
},
shortcut: {
shortcuts: 'Богино холбоос',
close: 'Хаалт',
textFormatting: 'Бичвэрийг хэлбэржүүлэх',
action: 'Үйлдэл',
paragraphFormatting: 'Догол мөрийг хэлбэржүүлэх',
documentStyle: 'Бичиг баримтын хэв загвар',
extraKeys: 'Extra keys'
},
help: {
'insertParagraph': 'Insert Paragraph',
'undo': 'Undoes the last command',
'redo': 'Redoes the last command',
'tab': 'Tab',
'untab': 'Untab',
'bold': 'Set a bold style',
'italic': 'Set a italic style',
'underline': 'Set a underline style',
'strikethrough': 'Set a strikethrough style',
'removeFormat': 'Clean a style',
'justifyLeft': 'Set left align',
'justifyCenter': 'Set center align',
'justifyRight': 'Set right align',
'justifyFull': 'Set full align',
'insertUnorderedList': 'Toggle unordered list',
'insertOrderedList': 'Toggle ordered list',
'outdent': 'Outdent on current paragraph',
'indent': 'Indent on current paragraph',
'formatPara': 'Change current block\'s format as a paragraph(P tag)',
'formatH1': 'Change current block\'s format as H1',
'formatH2': 'Change current block\'s format as H2',
'formatH3': 'Change current block\'s format as H3',
'formatH4': 'Change current block\'s format as H4',
'formatH5': 'Change current block\'s format as H5',
'formatH6': 'Change current block\'s format as H6',
'insertHorizontalRule': 'Insert horizontal rule',
'linkDialog.show': 'Show Link Dialog'
},
history: {
undo: 'Буцаах',
redo: 'Дахин хийх'
},
specialChar: {
specialChar: 'Тусгай тэмдэгт',
select: 'Тусгай тэмдэгт сонгох'
}
}
});
})(jQuery);
/******/ return __webpack_exports__;
/******/ })()
;
});
//# sourceMappingURL=summernote-mn-MN.js.map | PypiClean |
/EMPOL_GUI-2.2.8.tar.gz/EMPOL_GUI-2.2.8/EMPOL_GUI/single_obj_stack.py | import numpy as np
import math
import matplotlib.pyplot as plt
from astropy.io import fits
import os
import fnmatch
from astropy.io.fits import getheader
from astropy.stats import sigma_clipped_stats
from astropy.modeling import models, fitting
from scipy.optimize import curve_fit
from scipy import interpolate
import natsort
from astropy.stats import sigma_clipped_stats
from scipy import ndimage
import shutil
from photutils.centroids import centroid_com, centroid_quadratic
from photutils.centroids import centroid_1dg, centroid_2dg
#from trial_gui import *
def COM(image,x,y,r,s): ## Function to find the centre of mass of an image
x = int(round(x))
y = int(round(y))
#image = image - np.median(image)
subimage = image[y-r:y+s,x-r:x+s] ## Prod Subimge containing only the star of interest by providing appx centres
i=x-r ## (0,0) location of subimage , later to be added to COM coord
j=y-r
p,q = ndimage.maximum_position(subimage) ## Getting values of actual centres of the subimage measurements.center_of_mass
#print "COM of subimage (x,y):", p,q
a= q+i ## Getting information about the actual coordinates of that particular centre
b= p+j ##added opposite since COM fn gives coordinates as (y,x)
#print "Actual centres (x,y) :", a,b
return (a,b)
def COM_iter(image,a,b,p,q):
while True:
a1,b1 = COM(image,a,b,p,q)
if(math.sqrt((a1-a)**2+(b1-b)**2)<0.1):
break
else:
a = a1
b = b1
return a1,b1
def cent_2g(Image,x,y,h,k ):
x=int(round(x))
y=int(round(y))
sub = Image[y-k:y+k, x-h:x+h]
xs, ys = centroid_2dg(sub)
xp, yp = x-h+xs, y-k+ys
return(xp,yp)
def cent(Image,x,y,h,k ):
x = int(round(x))
y = int(round(y))
sub = Image[y-k:y+k, x-h:x+h]
xs, ys = centroid_quadratic(sub)
xp, yp = x-h+xs, y-k+ys
return(xp,yp)
def single_click(event):
global X1,Y1
if event.button == 3:
X1, Y1 = event.xdata, event.ydata
plt.close(1)
def Gauss2D(xy, x0, y0, s, A): # 2D- gaussian fiting is better than ndimage_maximum_posiyion and Center_of_mass
x,y=xy
c=1/(2*s**2)
f = A*np.exp(-c*((x-x0)**2+(y-y0)**2))
return(f)
k=20
def center(subimg,r):
x = np.arange(0, subimg.shape[1],1)
y = np.arange(0, subimg.shape[0], 1)
xx, yy = np.meshgrid(x,y)
x0 = r
y0 = r
s = max(*subimg.shape)*0.1
A=np.max(subimg)
initial_guess=[x0,y0,s,A]
param, err = curve_fit(Gauss2D, (xx.ravel(), yy.ravel()), subimg.ravel(), p0=initial_guess)
return(param)
def single_stack(path, star, band, cycl_len ):
imgpath = path +'/'+star
bias_path = path +'/Bias'
Flat_path = path +'/Flat'
img_list = os.listdir(imgpath)
img_list = natsort.natsorted(img_list)
#print(img_list)
#for j in range(n):
# print(imgs[i+m], flats[i])
# imgN = fits.getdata(os.path.join(imgpath, imgs[i+m]))# sorting numerically
#print(img_list)
Simgs = []
Sname = []
for B in range(0,len(band)): # loop on bands
flat_list = os.listdir(Flat_path)
flat_list = natsort.natsorted(flat_list)
#print(flat_list)
#flats=[]
#for files in flat_list:
# if fnmatch.fnmatch(files, '*_'+band[B]+'_*'):
# flats.append(files)
#print(flats)
flat = fits.getdata(os.path.join(Flat_path, band[B]+'mean.fits'))
sets=[]
for files in img_list:
if fnmatch.fnmatch(files, star+'_'+band[B]+'_*s_s*_1.fits'):
sets.append(files)
p = len(sets) # number of sets
print('number of sets: ', p)
#print(sets)
imgs =[]
if p==0:
p = 1
for k in range(1,p+1): # there is no set as s0, set number starts from 1 (like - s1), so k starts from 1 and loop itterate p times (when second term is p+1)
setlist = []
for files in img_list:
if p ==1 :
if fnmatch.fnmatch(files, '*_'+band[B]+'_*s_*'):
setlist.append(files)
else:
if fnmatch.fnmatch(files, '*_'+band[B]+'_*s_s'+str(k)+'_*'):
setlist.append(files)
imgs.append(setlist)
#print(imgs)
set_imgs = []
set_name=[]
#print(imgs)
for q in range(1,p+1): # loop on sets
print('set number: ', q)
#print(imgs[q-1][0], flats[0])
data = fits.getdata(os.path.join(imgpath,imgs[q-1][0]))
data = data.astype('>f8')
img1 = (data[0,:,:]-fits.getdata(os.path.join(bias_path, 'masterBias.fits')))/flat#/fits.getdata(os.path.join(Flat_path, flats[0]))
mean, med, std = sigma_clipped_stats(img1)
plt.imshow(img1,cmap='gray', vmin = med-5*std, vmax = med+5*std)
plt.colorbar()
plt.title('click on center of bright star '+str(imgs[q-1][0]))
cmd=plt.connect('button_release_event', single_click)
plt.show()
k=20
a=int(np.round(X1))
b=int(np.round(Y1))
#print(X1, Y1, a, b)
#sub_img1=img1[b-30:b+31, a-30:a+31]
#sub1_Cx,sub1_Cy=center(sub_img1, 30)[0],center(sub_img1, 30)[1]
#actual1_Cx,actual1_Cy = a-30+sub1_Cx,b-30+sub1_Cy
#ax = int(np.round(actual1_Cx))
#by = int(np.round(actual1_Cy))
#print(band[B])
actual1_Cx,actual1_Cy = cent(img1,X1,Y1,30,30)
ax = int(np.round(actual1_Cx))
by = int(np.round(actual1_Cy))
final_path=imgpath+'/final_frac_med_'+band[B]+'_set_'+str(q)
if os.path.exists(final_path):
shutil.rmtree(final_path)
com = 'mkdir '+final_path
os.system(com)
n =int(len(imgs[q-1])/cycl_len)
SSimgs = []
SSname=[]
for i in range(0,cycl_len): # loop on individual images in a cycle
m=0
Cx=ax # initial centeral x-coordinate
Cy=by # initial Y-coordinate
img = np.zeros((256,256, n))
for j in range(n):
#print(imgs[q-1][i+m])#, flats[i])
image = fits.getdata(os.path.join(imgpath, imgs[q-1][i+m]))
image = image.astype('>f8')
imge = (image[0,:,:]-fits.getdata(os.path.join(bias_path, 'masterBias.fits')))/flat#/fits.getdata(os.path.join(Flat_path, flats[i]))
#sub_img=img[Cy-k:Cy+k+1, Cx-k:Cx+k+1, j]
#sub_Cx,sub_Cy=center(sub_img,20)[0],center(sub_img,20)[1]
#actual_Cx,actual_Cy = Cx-k+sub_Cx,Cy-k+sub_Cy
actual_Cx,actual_Cy = cent(imge, Cx, Cy, 20, 20)
if(np.isnan(actual_Cx)==True):
print('COM === TRUE ')
actual_Cx, actual_Cy = COM_iter(image, Cx, Cy,20,21)
#plt.scatter(actual_Cx, actual_Cy, c='red')
#plt.show()
Cx = actual_Cx
Cy = actual_Cy
#Cx = int(np.round(actual_Cx))
#Cy = int(np.round(actual_Cy))
#print(ax, by, Cx, Cy)
shiftX, shiftY = actual1_Cx-actual_Cx, actual1_Cy-actual_Cy
#print(shiftX, shiftY)
img[:,:,j]=ndimage.shift(imge, [shiftY, shiftX], output=None, order=3, mode='constant', cval=0.0, prefilter=False) # mode = 'nearest'
#x_all.append(Cx)
#y_all.append(Cy)
m=m+cycl_len
header= getheader(os.path.join(imgpath, imgs[q-1][i]))
Final = np.median(img, axis=2)
SSimgs.append(Final)
SSname.append(star+'_'+band[B]+'_'+str(i)+'.fits')
F = os.path.join(final_path, star+'_'+band[B]+'_'+str(i)+'.fits')
fits.writeto(F, Final, header)
set_imgs.append(SSimgs)
set_name.append(SSname)
Simgs.append(set_imgs)
Sname.append(set_name)
#print(Sname)
#print(len(Sname), len(Sname[0]), len(Sname[0][0]))
return(Simgs)
#band = ['R'] #'i' # choose the filter
#path = '/home/ubu123/Desktop/PRL/EMPOL/empol_gui/EMPOL_V1/V1_Data'
#bias_path = '/home/ubu123/Desktop/PRL/EMPOL/empol_gui/EMPOL_V1/V1_Data/Bias'
#Flat_path = '/home/ubu123/Desktop/PRL/EMPOL/empol_gui/EMPOL_V1/V1_Data/Flat'
#Stack = single_stack(path, star, band, cycl_len )
#print(len(Stack), len(Stack[0]), len(Stack[0][0]))
#print(Stack.shape) | PypiClean |
/BlueWhale3_SingleCell-1.3.5-py3-none-any.whl/orangecontrib/single_cell/widgets/owloaddata.py | import os
import sys
from typing import List
from serverfiles import sizeformat
from AnyQt.QtCore import Qt, QFileInfo, QTimer, Signal
from AnyQt.QtGui import QStandardItemModel, QStandardItem
from AnyQt.QtWidgets import (
QSizePolicy, QGridLayout, QHBoxLayout, QFormLayout,
QLabel, QComboBox, QSpinBox, QCheckBox, QPushButton,
QStyle, QApplication, QFileDialog, QFileIconProvider,
QWidget
)
from AnyQt.QtCore import pyqtSlot as Slot
from Orange.data import Table
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils.filedialogs import RecentPath
from Orange.widgets.utils.buttons import VariableTextPushButton
from orangecontrib.single_cell.widgets.load_data import get_data_loader, Loader
from orangecontrib.single_cell.i18n_config import *
def __(key):
return i18n.t('single_cell.owloaddata.' + key)
Formats = [
__("format.count"),
__("format.tab"),
__("format.csv"),
__("format.matrix"),
__("format.xls"),
__("format.pickle"),
__("format.all_file")
]
AnnotationFormats = [
__("format.meta"),
__("format.tab"),
__("format.csv"),
__("format.all_file"),
]
def RecentPath_asqstandarditem(pathitem):
# type: (RecentPath) -> QStandardItem
icon_provider = QFileIconProvider()
# basename of a normalized name (strip right path component separators)
basename = os.path.basename(os.path.normpath(pathitem.abspath))
item = QStandardItem(
icon_provider.icon(QFileInfo(pathitem.abspath)),
basename
)
item.setToolTip(pathitem.abspath)
item.setData(pathitem, Qt.UserRole)
return item
def init_recent_paths_model(model, paths, relpaths=[]):
# type: (QStandardItemModel, List[RecentPath]) -> None
for pathitem in paths:
item = RecentPath_asqstandarditem(pathitem)
abspath = pathitem.search(searchpaths=relpaths)
if not (abspath and os.path.exists(abspath)):
item.setEnabled(False)
item.setSelectable(False)
item.setToolTip(item.toolTip() + __("msg.miss_file"))
model.appendRow(item)
def insert_recent_path(model, path, relpaths=[]):
# type: (QStandardItemModel, RecentPath) -> int
index = -1
for i in range(model.rowCount()):
item = model.item(i, 0)
pathitem = item.data(Qt.UserRole)
if isinstance(pathitem, RecentPath) and \
samepath(pathitem.abspath, path.abspath):
index = i
break
if index != -1:
item = model.takeRow(index)
else:
item = RecentPath_asqstandarditem(path)
model.insertRow(0, item)
return 0
def samepath(p1, p2):
return (os.path.normpath(os.path.normcase(p1)) ==
os.path.normpath(os.path.normcase(p2)))
class RunaroundSettingsHandler(settings.SettingsHandler):
def pack_data(self, widget):
widget._saveState()
return super().pack_data(widget)
class OWLoadData(widget.OWWidget):
name = ""
icon = "icons/LoadData.svg"
priority = 110
class Outputs:
data = widget.Output("Data", Table, label=i18n.t("single_cell.common.data"))
class Information(widget.OWWidget.Information):
modified = widget.Msg(__("msg.modified"))
class Warning(widget.OWWidget.Warning):
sampling_in_effect = widget.Msg(__("msg.sampling_in_effect"))
class Error(widget.OWWidget.Error):
row_annotation_mismatch = widget.Msg(__("msg.row_annotation_mismatch"))
col_annotation_mismatch = widget.Msg(__("msg.col_annotation_mismatch"))
inadequate_headers = widget.Msg(__("msg.inadequate_headers"))
reading_error = widget.Msg(__("msg.reading_error"))
_recent = settings.Setting([]) # type: List[RecentPath]
_recent_row_annotations = settings.Setting([]) # type: List[RecentPath]
_recent_col_annotations = settings.Setting([]) # type: List[RecentPath]
_cells_in_rows = settings.Setting(False)
_col_annotations_enabled = settings.Setting(False)
_row_annotations_enabled = settings.Setting(False)
_last_path = settings.Setting("") # type: str
_header_rows_count = settings.Setting(1) # type: int
_header_cols_count = settings.Setting(1) # type: int
_sample_rows_enabled = settings.Setting(False) # type: bool
_sample_cols_enabled = settings.Setting(False) # type: bool
_sample_cols_p = settings.Setting(10.0) # type: bool
_sample_rows_p = settings.Setting(10.0) # type: bool
settingsHandler = RunaroundSettingsHandler()
want_main_area = False
resizing_enabled = False
cells_in_rows_changed = Signal()
def __init__(self):
super().__init__()
self._current_path = ""
self._data_loader = Loader()
icon_open_dir = self.style().standardIcon(QStyle.SP_DirOpenIcon)
# Top grid with file selection combo box
self.file_layout = grid = QGridLayout()
lb = QLabel(__("label.file"))
lb.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.recent_combo = cb = QComboBox(
sizeAdjustPolicy=QComboBox.AdjustToMinimumContentsLengthWithIcon,
minimumContentsLength=20,
toolTip=__("label.file_tip")
)
self.recent_model = cb.model() # type: QStandardItemModel
self.recent_combo.activated[int].connect(self._select_recent)
browse = QPushButton("...", autoDefault=False, icon=icon_open_dir,
clicked=self.browse)
# reload = QPushButton("Reload", autoDefault=False, icon=icon_reload)
grid.addWidget(lb, 0, 0, Qt.AlignVCenter)
grid.addWidget(cb, 0, 1)
grid.addWidget(browse, 0, 2)
# grid.addWidget(reload, 0, 3)
self.summary_label = label = QLabel("", self)
label.ensurePolished()
f = label.font()
if f.pointSizeF() != -1:
f.setPointSizeF(f.pointSizeF() * 5 / 6)
else:
f.setPixelSize(f.pixelSize() * 5 / 6)
label.setFont(f)
grid.addWidget(label, 1, 1, 1, 3)
self.controlArea.layout().addLayout(grid)
box = gui.widgetBox(
self.controlArea, __("box.header_row_label"), spacing=-1
)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
self.header_rows_spin = spin = QSpinBox(
box, minimum=0, maximum=3, value=self._header_rows_count,
keyboardTracking=False
)
spin.valueChanged.connect(self.set_header_rows_count)
hl.addWidget(QLabel(__("row.data_start"), box))
hl.addWidget(self.header_rows_spin)
hl.addWidget(QLabel(__("row.header_row"), box))
hl.addStretch(10)
box.layout().addLayout(hl)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
self.header_cols_spin = spin = QSpinBox(
box, minimum=0, maximum=3, value=self._header_cols_count,
keyboardTracking=False
)
spin.valueChanged.connect(self.set_header_cols_count)
hl.addWidget(QLabel(__("row.first"), box))
hl.addWidget(self.header_cols_spin)
hl.addWidget(QLabel(__("row.column_row_label"), box))
hl.addStretch(10)
box.layout().addLayout(hl)
self.data_struct_box = box = gui.widgetBox(
self.controlArea, __("box.data_struct")
)
gui.radioButtons(
box, self, "_cells_in_rows",
[__("btn.gene_cell"),
__("btn.cell_gene")],
callback=self._cells_in_rows_changed
)
box = gui.widgetBox(
self.controlArea, __("box.sample_data"), spacing=-1)
grid = QGridLayout()
grid.setContentsMargins(0, 0, 0, 0)
box.layout().addLayout(grid)
self.sample_rows_cb = cb = QCheckBox(checked=self._sample_rows_enabled)
self.sample_rows_p_spin = spin = QSpinBox(
minimum=0, maximum=100, value=self._sample_rows_p
)
spin.valueChanged.connect(self.set_sample_rows_p)
suffix = QLabel(__("row.cell"))
cb.toggled.connect(self.set_sample_rows_enabled)
grid.addWidget(cb, 0, 0)
grid.addWidget(spin, 0, 1)
grid.addWidget(suffix, 0, 2)
self.sample_cols_cb = cb = QCheckBox(checked=self._sample_cols_enabled)
self.sample_cols_p_spin = spin = QSpinBox(
minimum=0, maximum=100, value=self._sample_cols_p
)
spin.valueChanged.connect(self.set_sample_cols_p)
suffix = QLabel(__("row.gene"))
cb.toggled.connect(self.set_sample_cols_enabled)
grid.addWidget(cb, 1, 0)
grid.addWidget(spin, 1, 1)
grid.addWidget(suffix, 1, 2)
grid.setColumnStretch(3, 10)
self.annotation_files_box = box = gui.widgetBox(
self.controlArea, __("box.annotation_files")
)
form = QFormLayout(
formAlignment=Qt.AlignLeft,
rowWrapPolicy=QFormLayout.WrapAllRows,
)
box.layout().addLayout(form)
self.row_annotations_cb = cb = QCheckBox(
__("btn.cell_annotation"), checked=self._row_annotations_enabled
)
self._row_annotations_w = w = QWidget(enabled=self._row_annotations_enabled)
cb.toggled.connect(self.set_row_annotations_enabled)
cb.toggled.connect(w.setEnabled)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
w.setLayout(hl)
self.row_annotations_combo = QComboBox(
sizeAdjustPolicy=QComboBox.AdjustToMinimumContentsLengthWithIcon,
minimumContentsLength=18
)
self.row_annotations_combo.activated.connect(
self._row_annotations_combo_changed
)
hl.addWidget(self.row_annotations_combo)
hl.addWidget(QPushButton("...", box, autoDefault=False,
icon=icon_open_dir,
clicked=self.browse_row_annotations))
# hl.addWidget(QPushButton("Reload", box, autoDefault=False,
# icon=icon_reload))
form.addRow(cb, w)
self.col_annotations_cb = cb = QCheckBox(
__("btn.gene_annotation"), checked=self._col_annotations_enabled
)
self._col_annotations_w = w = QWidget(enabled=self._col_annotations_enabled)
cb.toggled.connect(self.set_col_annotations_enabled)
cb.toggled.connect(w.setEnabled)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
w.setLayout(hl)
self.col_annotations_combo = QComboBox(
sizeAdjustPolicy=QComboBox.AdjustToMinimumContentsLengthWithIcon,
minimumContentsLength=18
)
self.col_annotations_combo.activated.connect(
self._col_annotations_combo_changed
)
hl.addWidget(self.col_annotations_combo)
hl.addWidget(QPushButton("...", box, autoDefault=False,
icon=icon_open_dir,
clicked=self.browse_col_annotations))
# hl.addWidget(QPushButton("Reload", box, autoDefault=False,
# icon=icon_reload))
form.addRow(cb, w)
self.controlArea.layout().addStretch(10)
self.load_data_button = button = VariableTextPushButton(
__("btn.load_data"), autoDefault=True, textChoiceList=[__("btn.load_data"), __("btn.reload")]
)
self.load_data_button.setAutoDefault(True)
button.clicked.connect(self.commit, Qt.QueuedConnection)
self.controlArea.layout().addWidget(button, alignment=Qt.AlignRight)
init_recent_paths_model(
self.recent_model,
[self.resolve_path(p) for p in self._recent],
)
init_recent_paths_model(
self.row_annotations_combo.model(),
[self.resolve_path(p) for p in self._recent_row_annotations]
)
init_recent_paths_model(
self.col_annotations_combo.model(),
[self.resolve_path(p) for p in self._recent_col_annotations]
)
self.__update_summary()
self._update_warning()
if self._last_path != "" and os.path.exists(self._last_path):
QTimer.singleShot(
0, lambda: self.set_current_path(self._last_path)
)
else:
self.recent_combo.setCurrentIndex(-1)
def resolve_path(self, path):
basedir = self.workflowEnv().get("basedir", None)
if not basedir or not path:
return path
return path.resolve([("basedir", basedir)]) or path
def _cells_in_rows_changed(self):
self._data_loader.transposed = not self._cells_in_rows
self._invalidate()
self.cells_in_rows_changed.emit()
def _row_annotations_combo_changed(self):
path = self.row_annotations_combo.currentData(Qt.UserRole)
if isinstance(path, RecentPath) and os.path.exists(path.abspath):
self._data_loader.row_annotation_file = path # type: RecentPath
else:
self._data_loader.row_annotation_file = None
self._invalidate()
def _col_annotations_combo_changed(self):
path = self.col_annotations_combo.currentData(Qt.UserRole)
if isinstance(path, RecentPath) and os.path.exists(path.abspath):
self._data_loader.col_annotation_file = path # type: RecentPath
else:
self._data_loader.col_annotation_file = None
self._invalidate()
def _update_warning(self):
if (self._sample_rows_enabled and self._sample_rows_p < 100) or \
(self._sample_cols_enabled and self._sample_cols_p < 100):
self.Warning.sampling_in_effect()
else:
self.Warning.sampling_in_effect.clear()
def set_sample_rows_enabled(self, enabled, commit=True):
if self._sample_rows_enabled != enabled:
self._sample_rows_enabled = enabled
self.sample_rows_cb.setChecked(enabled)
self._update_warning()
self._data_loader.sample_rows_enabled = enabled
if commit:
self._invalidate()
def set_sample_cols_enabled(self, enabled, commit=True):
if self._sample_cols_enabled != enabled:
self._sample_cols_enabled = enabled
self.sample_cols_cb.setChecked(enabled)
self._update_warning()
self._data_loader.sample_cols_enabled = enabled
if commit:
self._invalidate()
def set_sample_rows_p(self, p, commit=True):
if self._sample_rows_p != p:
self._sample_rows_p = p
self._update_warning()
self.sample_rows_p_spin.setValue(p)
self._data_loader.sample_rows_p = p
if commit:
self._invalidate()
def set_sample_cols_p(self, p, commit=True):
if self._sample_cols_p != p:
self._sample_cols_p = p
self._update_warning()
self.sample_cols_p_spin.setValue(p)
self._data_loader.sample_cols_p = p
if commit:
self._invalidate()
def set_header_rows_count(self, n, commit=True):
if self._header_rows_count != n:
self._header_rows_count = n
self.header_rows_spin.setValue(n)
self._data_loader.header_rows_count = n
if commit:
self._invalidate()
def set_header_cols_count(self, n, commit=True):
if self._header_cols_count != n:
self._header_cols_count = n
self.header_cols_spin.setValue(n)
self._data_loader.header_cols_count = n
if commit:
self._invalidate()
def set_row_annotations_enabled(self, enabled, commit=True):
if self._row_annotations_enabled != enabled:
self._row_annotations_enabled = enabled
self.row_annotations_cb.setChecked(enabled)
self._data_loader.row_annotations_enabled = enabled
if commit:
self._invalidate()
def set_col_annotations_enabled(self, enabled, commit=True):
if self._col_annotations_enabled != enabled:
self._col_annotations_enabled = enabled
self.col_annotations_cb.setChecked(enabled)
self._data_loader.col_annotations_enabled = enabled
if commit:
self._invalidate()
def set_current_path(self, path):
if samepath(self._current_path, path):
return
model = self.recent_model
index = -1
pathitem = None
for i in range(model.rowCount()):
item = model.item(i)
data = item.data(Qt.UserRole) if item is not None else None
if isinstance(data, RecentPath) and samepath(path, data.abspath):
index, pathitem = i, data
break
rpaths = []
if pathitem is None:
assert index == -1
pathitem = RecentPath.create(path, rpaths)
if index != -1:
item = model.takeRow(index)
else:
item = RecentPath_asqstandarditem(pathitem)
model.insertRow(0, item)
self._current_path = path
self.recent_combo.setCurrentIndex(0)
self._data_loader = get_data_loader(path)
self.__update_summary()
self.setup_gui()
self._invalidate()
def setup_gui(self):
""" Use loader predefined values. If the value is None, set
loader's parameter to widget's setting value.
"""
loader = self._data_loader
if loader.header_rows_count is not None:
self.set_header_rows_count(loader.header_rows_count, False)
else:
loader.header_rows_count = self._header_rows_count
if loader.header_cols_count is not None:
self.set_header_cols_count(loader.header_cols_count, False)
else:
loader.header_cols_count = self._header_cols_count
if loader.transposed is not None:
self._cells_in_rows = not loader.transposed
else:
loader.transposed = not self._cells_in_rows
if loader.sample_rows_enabled is not None:
self.set_sample_rows_enabled(loader.sample_rows_enabled, False)
else:
loader.sample_rows_enabled = self._sample_rows_enabled
if loader.sample_cols_enabled is not None:
self.set_sample_cols_enabled(loader.sample_cols_enabled, False)
else:
loader.sample_cols_enabled = self._sample_cols_enabled
if loader.sample_rows_p is not None:
self.set_sample_rows_p(loader.sample_rows_p, False)
else:
loader.sample_rows_p = self._sample_rows_p
if loader.sample_cols_p is not None:
self.set_sample_cols_p(loader.sample_cols_p, False)
else:
loader.sample_cols_p = self._sample_cols_p
if loader.row_annotation_file is not None:
index = insert_recent_path(
self.row_annotations_combo.model(),
self.resolve_path(loader.row_annotation_file)
)
self.row_annotations_combo.setCurrentIndex(index)
self.set_row_annotations_enabled(
loader.row_annotations_enabled, False
)
else:
self.row_annotations_combo.setCurrentIndex(-1)
self.set_row_annotations_enabled(False, False)
if loader.col_annotation_file is not None:
index = insert_recent_path(
self.col_annotations_combo.model(),
self.resolve_path(loader.col_annotation_file)
)
self.col_annotations_combo.setCurrentIndex(index)
self.set_col_annotations_enabled(
loader.col_annotations_enabled, False)
else:
self.col_annotations_combo.setCurrentIndex(-1)
self.set_col_annotations_enabled(False, False)
self.header_rows_spin.setEnabled(loader.FIXED_FORMAT)
self.header_cols_spin.setEnabled(loader.FIXED_FORMAT)
self.data_struct_box.setEnabled(loader.FIXED_FORMAT)
self.annotation_files_box.setEnabled(loader.ENABLE_ANNOTATIONS)
def __update_summary(self):
size = self._data_loader.file_size
ncols = self._data_loader.n_cols
nrows = self._data_loader.n_rows
text = []
if size is not None:
text += [sizeformat(size)]
if nrows is not None:
text += [__("text.rows").format(nrows)]
if nrows is not None:
text += [__("text.columns").format(ncols)]
self.summary_label.setText(", ".join(text))
def current_path(self):
return self._current_path
def _select_recent(self, index):
# type: (int) -> None
# select a file from the recent list (entered via combo box `activate`)
assert 0 <= index < self.recent_model.rowCount()
item = self.recent_model.item(index)
pathitem = item.data(Qt.UserRole)
assert isinstance(pathitem, RecentPath)
self.set_current_path(pathitem.abspath)
@Slot()
def browse(self):
dlg = QFileDialog(self)
dlg.setAcceptMode(QFileDialog.AcceptOpen)
dlg.setFileMode(QFileDialog.ExistingFile)
filters = Formats
dlg.setNameFilters(filters)
if filters:
dlg.selectNameFilter(filters[0])
if dlg.exec_() == QFileDialog.Accepted:
filename = dlg.selectedFiles()[0]
self.set_current_path(filename)
@Slot()
def browse_row_annotations(self):
dlg = QFileDialog(
self, acceptMode=QFileDialog.AcceptOpen,
fileMode=QFileDialog.ExistingFile
)
filters = AnnotationFormats
dlg.setNameFilters(filters)
if filters:
dlg.selectNameFilter(filters[0])
if dlg.exec_() == QFileDialog.Accepted:
filename = dlg.selectedFiles()[0]
m = self.row_annotations_combo.model() # type: QStandardItemModel
pathitem = RecentPath.create(filename, [])
index = insert_recent_path(m, pathitem)
self.row_annotations_combo.setCurrentIndex(index)
self._invalidate()
@Slot()
def browse_col_annotations(self):
dlg = QFileDialog(
self, acceptMode=QFileDialog.AcceptOpen,
fileMode=QFileDialog.ExistingFile
)
filters = AnnotationFormats
dlg.setNameFilters(filters)
if filters:
dlg.selectNameFilter(filters[0])
if dlg.exec_() == QFileDialog.Accepted:
filename = dlg.selectedFiles()[0]
m = self.col_annotations_combo.model() # type: QStandardItemModel
pathitem = RecentPath.create(filename, [])
index = insert_recent_path(m, pathitem)
self.col_annotations_combo.setCurrentIndex(index)
self._invalidate()
def _invalidate(self):
self.set_modified(True)
def set_modified(self, modified):
if modified:
text = __("btn.load_data")
else:
text = __("btn.reload")
self.load_data_button.setText(text)
self.load_data_button.setAutoDefault(modified)
# Setting autoDefault once also sets default which persists even after
# settings autoDefault back to False??
self.load_data_button.setDefault(modified)
self.Information.modified(shown=modified)
def commit(self):
path = self._current_path
if not path:
return
self.Outputs.data.send(self._data_loader())
self.show_error_messages()
self.set_modified(False)
def show_error_messages(self):
self.Error.row_annotation_mismatch.clear()
self.Error.col_annotation_mismatch.clear()
self.Error.inadequate_headers.clear()
errors = self._data_loader.errors
if len(errors["row_annot_mismatch"]):
self.Error.row_annotation_mismatch(*errors["row_annot_mismatch"])
if len(errors["col_annot_mismatch"]):
self.Error.col_annotation_mismatch(*errors["col_annot_mismatch"])
if len(errors["inadequate_headers"]):
self.Error.inadequate_headers(*errors["inadequate_headers"])
if len(errors["reading_error"]):
self.Error.reading_error()
def onDeleteWidget(self):
super().onDeleteWidget()
def _saveState(self):
maxitems = 15
def dataiter(model, role=Qt.UserRole):
return (model.data(model.index(i, 0), role)
for i in range(model.rowCount()))
def recent_paths(model):
return [self.relocate_path(el) for el in dataiter(model)
if isinstance(el, RecentPath)][:maxitems]
self._recent = recent_paths(self.recent_model)
self._recent_row_annotations = recent_paths(
self.row_annotations_combo.model())
self._recent_col_annotations = recent_paths(
self.col_annotations_combo.model())
self._last_path = self._current_path
def relocate_path(self, path):
basedir = self.workflowEnv().get("basedir", None)
if not basedir or not path:
return path
return RecentPath.create(path.abspath, [("basedir", basedir)])
def saveSettings(self):
self._saveState()
super().saveSettings()
def main(argv=None):
app = QApplication(argv or [])
w = OWLoadData()
w.show()
w.raise_()
app.exec_()
w.saveSettings()
w.onDeleteWidget()
if __name__ == "__main__":
sys.exit(main(sys.argv)) | PypiClean |
/DataAnalysis-0.0.6.tar.gz/DataAnalysis-0.0.6/README.md | # DataAnalysis
DataAnalysis é uma biblioteca que pode ser usada para o pré-processamento de um arquivo csv.
#### Parâmetros:
```diff
input_file: nome do arquivo com a extensão csv
api_small_talks: url da api de small talks
content_column: nome ou índice da coluna de conteúdo do arquivo csv
encoding: codificação do arquivo
sep: separador usado no arquivo
batch: número de batches para usar na api de small talks
```
## Installation
Use o gerenciador de pacotes [pip](https://pip.pypa.io/en/stable/) para instalar o DataAnalysis
```bash
pip install DataAnalysis
```
## Usage
```python
import DataAnalysis as da
p = da.PreProcessing(input_file, api_small_talks, content_column, encoding, sep, batch)
p.process(output_file, lower = True, punctuation = True, abbreviation = True, typo = True, small_talk = True,
emoji = True, wa_emoji = True, accentuation = True, number = True, relevant = False, cpf = True,
url = True, email = True, money = True, code = True, time = True, date = True, tagging = True)
```
## License
[MIT](https://choosealicense.com/licenses/mit/) | PypiClean |
/Hikka_TL_New-2.0.4-py3-none-any.whl/hikkatl/tl/types/help.py | from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeAccessPointRule, TypeChat, TypeDataJSON, TypeDocument, TypeJSONValue, TypeMessageEntity, TypePeer, TypePremiumSubscriptionOption, TypeRecentMeUrl, TypeUser
from ...tl.types.help import TypeCountry, TypeCountryCode, TypeTermsOfService
class AppConfig(TLObject):
CONSTRUCTOR_ID = 0xdd18782e
SUBCLASS_OF_ID = 0x14381c9a
# noinspection PyShadowingBuiltins
def __init__(self, hash: int, config: 'TypeJSONValue'):
"""
Constructor for help.AppConfig: Instance of either AppConfigNotModified, AppConfig.
"""
self.hash = hash
self.config = config
def to_dict(self):
return {
'_': 'AppConfig',
'hash': self.hash,
'config': self.config.to_dict() if isinstance(self.config, TLObject) else self.config
}
def _bytes(self):
return b''.join((
b'.x\x18\xdd',
struct.pack('<i', self.hash),
self.config._bytes(),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
_config = reader.tgread_object()
return cls(hash=_hash, config=_config)
class AppConfigNotModified(TLObject):
CONSTRUCTOR_ID = 0x7cde641d
SUBCLASS_OF_ID = 0x14381c9a
def to_dict(self):
return {
'_': 'AppConfigNotModified'
}
def _bytes(self):
return b''.join((
b'\x1dd\xde|',
))
@classmethod
def from_reader(cls, reader):
return cls()
class AppUpdate(TLObject):
CONSTRUCTOR_ID = 0xccbbce30
SUBCLASS_OF_ID = 0x5897069e
# noinspection PyShadowingBuiltins
def __init__(self, id: int, version: str, text: str, entities: List['TypeMessageEntity'], can_not_skip: Optional[bool]=None, document: Optional['TypeDocument']=None, url: Optional[str]=None, sticker: Optional['TypeDocument']=None):
"""
Constructor for help.AppUpdate: Instance of either AppUpdate, NoAppUpdate.
"""
self.id = id
self.version = version
self.text = text
self.entities = entities
self.can_not_skip = can_not_skip
self.document = document
self.url = url
self.sticker = sticker
def to_dict(self):
return {
'_': 'AppUpdate',
'id': self.id,
'version': self.version,
'text': self.text,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'can_not_skip': self.can_not_skip,
'document': self.document.to_dict() if isinstance(self.document, TLObject) else self.document,
'url': self.url,
'sticker': self.sticker.to_dict() if isinstance(self.sticker, TLObject) else self.sticker
}
def _bytes(self):
return b''.join((
b'0\xce\xbb\xcc',
struct.pack('<I', (0 if self.can_not_skip is None or self.can_not_skip is False else 1) | (0 if self.document is None or self.document is False else 2) | (0 if self.url is None or self.url is False else 4) | (0 if self.sticker is None or self.sticker is False else 8)),
struct.pack('<i', self.id),
self.serialize_bytes(self.version),
self.serialize_bytes(self.text),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
b'' if self.document is None or self.document is False else (self.document._bytes()),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
b'' if self.sticker is None or self.sticker is False else (self.sticker._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_can_not_skip = bool(flags & 1)
_id = reader.read_int()
_version = reader.tgread_string()
_text = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
if flags & 2:
_document = reader.tgread_object()
else:
_document = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
if flags & 8:
_sticker = reader.tgread_object()
else:
_sticker = None
return cls(id=_id, version=_version, text=_text, entities=_entities, can_not_skip=_can_not_skip, document=_document, url=_url, sticker=_sticker)
class ConfigSimple(TLObject):
CONSTRUCTOR_ID = 0x5a592a6c
SUBCLASS_OF_ID = 0x29183ac4
def __init__(self, date: Optional[datetime], expires: Optional[datetime], rules: List['TypeAccessPointRule']):
"""
Constructor for help.ConfigSimple: Instance of ConfigSimple.
"""
self.date = date
self.expires = expires
self.rules = rules
def to_dict(self):
return {
'_': 'ConfigSimple',
'date': self.date,
'expires': self.expires,
'rules': [] if self.rules is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.rules]
}
def _bytes(self):
return b''.join((
b'l*YZ',
self.serialize_datetime(self.date),
self.serialize_datetime(self.expires),
struct.pack('<i', len(self.rules)),b''.join(x._bytes() for x in self.rules),
))
@classmethod
def from_reader(cls, reader):
_date = reader.tgread_date()
_expires = reader.tgread_date()
_rules = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_rules.append(_x)
return cls(date=_date, expires=_expires, rules=_rules)
class CountriesList(TLObject):
CONSTRUCTOR_ID = 0x87d0759e
SUBCLASS_OF_ID = 0xea31fe88
# noinspection PyShadowingBuiltins
def __init__(self, countries: List['TypeCountry'], hash: int):
"""
Constructor for help.CountriesList: Instance of either CountriesListNotModified, CountriesList.
"""
self.countries = countries
self.hash = hash
def to_dict(self):
return {
'_': 'CountriesList',
'countries': [] if self.countries is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.countries],
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x9eu\xd0\x87',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.countries)),b''.join(x._bytes() for x in self.countries),
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_countries = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_countries.append(_x)
_hash = reader.read_int()
return cls(countries=_countries, hash=_hash)
class CountriesListNotModified(TLObject):
CONSTRUCTOR_ID = 0x93cc1f32
SUBCLASS_OF_ID = 0xea31fe88
def to_dict(self):
return {
'_': 'CountriesListNotModified'
}
def _bytes(self):
return b''.join((
b'2\x1f\xcc\x93',
))
@classmethod
def from_reader(cls, reader):
return cls()
class Country(TLObject):
CONSTRUCTOR_ID = 0xc3878e23
SUBCLASS_OF_ID = 0xa22e9e28
def __init__(self, iso2: str, default_name: str, country_codes: List['TypeCountryCode'], hidden: Optional[bool]=None, name: Optional[str]=None):
"""
Constructor for help.Country: Instance of Country.
"""
self.iso2 = iso2
self.default_name = default_name
self.country_codes = country_codes
self.hidden = hidden
self.name = name
def to_dict(self):
return {
'_': 'Country',
'iso2': self.iso2,
'default_name': self.default_name,
'country_codes': [] if self.country_codes is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.country_codes],
'hidden': self.hidden,
'name': self.name
}
def _bytes(self):
return b''.join((
b'#\x8e\x87\xc3',
struct.pack('<I', (0 if self.hidden is None or self.hidden is False else 1) | (0 if self.name is None or self.name is False else 2)),
self.serialize_bytes(self.iso2),
self.serialize_bytes(self.default_name),
b'' if self.name is None or self.name is False else (self.serialize_bytes(self.name)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.country_codes)),b''.join(x._bytes() for x in self.country_codes),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_hidden = bool(flags & 1)
_iso2 = reader.tgread_string()
_default_name = reader.tgread_string()
if flags & 2:
_name = reader.tgread_string()
else:
_name = None
reader.read_int()
_country_codes = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_country_codes.append(_x)
return cls(iso2=_iso2, default_name=_default_name, country_codes=_country_codes, hidden=_hidden, name=_name)
class CountryCode(TLObject):
CONSTRUCTOR_ID = 0x4203c5ef
SUBCLASS_OF_ID = 0x76f34665
def __init__(self, country_code: str, prefixes: Optional[List[str]]=None, patterns: Optional[List[str]]=None):
"""
Constructor for help.CountryCode: Instance of CountryCode.
"""
self.country_code = country_code
self.prefixes = prefixes
self.patterns = patterns
def to_dict(self):
return {
'_': 'CountryCode',
'country_code': self.country_code,
'prefixes': [] if self.prefixes is None else self.prefixes[:],
'patterns': [] if self.patterns is None else self.patterns[:]
}
def _bytes(self):
return b''.join((
b'\xef\xc5\x03B',
struct.pack('<I', (0 if self.prefixes is None or self.prefixes is False else 1) | (0 if self.patterns is None or self.patterns is False else 2)),
self.serialize_bytes(self.country_code),
b'' if self.prefixes is None or self.prefixes is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.prefixes)),b''.join(self.serialize_bytes(x) for x in self.prefixes))),
b'' if self.patterns is None or self.patterns is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.patterns)),b''.join(self.serialize_bytes(x) for x in self.patterns))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_country_code = reader.tgread_string()
if flags & 1:
reader.read_int()
_prefixes = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_prefixes.append(_x)
else:
_prefixes = None
if flags & 2:
reader.read_int()
_patterns = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_patterns.append(_x)
else:
_patterns = None
return cls(country_code=_country_code, prefixes=_prefixes, patterns=_patterns)
class DeepLinkInfo(TLObject):
CONSTRUCTOR_ID = 0x6a4ee832
SUBCLASS_OF_ID = 0x984aac38
def __init__(self, message: str, update_app: Optional[bool]=None, entities: Optional[List['TypeMessageEntity']]=None):
"""
Constructor for help.DeepLinkInfo: Instance of either DeepLinkInfoEmpty, DeepLinkInfo.
"""
self.message = message
self.update_app = update_app
self.entities = entities
def to_dict(self):
return {
'_': 'DeepLinkInfo',
'message': self.message,
'update_app': self.update_app,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities]
}
def _bytes(self):
return b''.join((
b'2\xe8Nj',
struct.pack('<I', (0 if self.update_app is None or self.update_app is False else 1) | (0 if self.entities is None or self.entities is False else 2)),
self.serialize_bytes(self.message),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_update_app = bool(flags & 1)
_message = reader.tgread_string()
if flags & 2:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
return cls(message=_message, update_app=_update_app, entities=_entities)
class DeepLinkInfoEmpty(TLObject):
CONSTRUCTOR_ID = 0x66afa166
SUBCLASS_OF_ID = 0x984aac38
def to_dict(self):
return {
'_': 'DeepLinkInfoEmpty'
}
def _bytes(self):
return b''.join((
b'f\xa1\xaff',
))
@classmethod
def from_reader(cls, reader):
return cls()
class InviteText(TLObject):
CONSTRUCTOR_ID = 0x18cb9f78
SUBCLASS_OF_ID = 0xcf70aa35
def __init__(self, message: str):
"""
Constructor for help.InviteText: Instance of InviteText.
"""
self.message = message
def to_dict(self):
return {
'_': 'InviteText',
'message': self.message
}
def _bytes(self):
return b''.join((
b'x\x9f\xcb\x18',
self.serialize_bytes(self.message),
))
@classmethod
def from_reader(cls, reader):
_message = reader.tgread_string()
return cls(message=_message)
class NoAppUpdate(TLObject):
CONSTRUCTOR_ID = 0xc45a6536
SUBCLASS_OF_ID = 0x5897069e
def to_dict(self):
return {
'_': 'NoAppUpdate'
}
def _bytes(self):
return b''.join((
b'6eZ\xc4',
))
@classmethod
def from_reader(cls, reader):
return cls()
class PassportConfig(TLObject):
CONSTRUCTOR_ID = 0xa098d6af
SUBCLASS_OF_ID = 0xc666c0ad
# noinspection PyShadowingBuiltins
def __init__(self, hash: int, countries_langs: 'TypeDataJSON'):
"""
Constructor for help.PassportConfig: Instance of either PassportConfigNotModified, PassportConfig.
"""
self.hash = hash
self.countries_langs = countries_langs
def to_dict(self):
return {
'_': 'PassportConfig',
'hash': self.hash,
'countries_langs': self.countries_langs.to_dict() if isinstance(self.countries_langs, TLObject) else self.countries_langs
}
def _bytes(self):
return b''.join((
b'\xaf\xd6\x98\xa0',
struct.pack('<i', self.hash),
self.countries_langs._bytes(),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
_countries_langs = reader.tgread_object()
return cls(hash=_hash, countries_langs=_countries_langs)
class PassportConfigNotModified(TLObject):
CONSTRUCTOR_ID = 0xbfb9f457
SUBCLASS_OF_ID = 0xc666c0ad
def to_dict(self):
return {
'_': 'PassportConfigNotModified'
}
def _bytes(self):
return b''.join((
b'W\xf4\xb9\xbf',
))
@classmethod
def from_reader(cls, reader):
return cls()
class PremiumPromo(TLObject):
CONSTRUCTOR_ID = 0x5334759c
SUBCLASS_OF_ID = 0xc987a338
def __init__(self, status_text: str, status_entities: List['TypeMessageEntity'], video_sections: List[str], videos: List['TypeDocument'], period_options: List['TypePremiumSubscriptionOption'], users: List['TypeUser']):
"""
Constructor for help.PremiumPromo: Instance of PremiumPromo.
"""
self.status_text = status_text
self.status_entities = status_entities
self.video_sections = video_sections
self.videos = videos
self.period_options = period_options
self.users = users
def to_dict(self):
return {
'_': 'PremiumPromo',
'status_text': self.status_text,
'status_entities': [] if self.status_entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.status_entities],
'video_sections': [] if self.video_sections is None else self.video_sections[:],
'videos': [] if self.videos is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.videos],
'period_options': [] if self.period_options is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.period_options],
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users]
}
def _bytes(self):
return b''.join((
b'\x9cu4S',
self.serialize_bytes(self.status_text),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.status_entities)),b''.join(x._bytes() for x in self.status_entities),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.video_sections)),b''.join(self.serialize_bytes(x) for x in self.video_sections),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.videos)),b''.join(x._bytes() for x in self.videos),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.period_options)),b''.join(x._bytes() for x in self.period_options),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
))
@classmethod
def from_reader(cls, reader):
_status_text = reader.tgread_string()
reader.read_int()
_status_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_status_entities.append(_x)
reader.read_int()
_video_sections = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_video_sections.append(_x)
reader.read_int()
_videos = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_videos.append(_x)
reader.read_int()
_period_options = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_period_options.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(status_text=_status_text, status_entities=_status_entities, video_sections=_video_sections, videos=_videos, period_options=_period_options, users=_users)
class PromoData(TLObject):
CONSTRUCTOR_ID = 0x8c39793f
SUBCLASS_OF_ID = 0x9d595542
def __init__(self, expires: Optional[datetime], peer: 'TypePeer', chats: List['TypeChat'], users: List['TypeUser'], proxy: Optional[bool]=None, psa_type: Optional[str]=None, psa_message: Optional[str]=None):
"""
Constructor for help.PromoData: Instance of either PromoDataEmpty, PromoData.
"""
self.expires = expires
self.peer = peer
self.chats = chats
self.users = users
self.proxy = proxy
self.psa_type = psa_type
self.psa_message = psa_message
def to_dict(self):
return {
'_': 'PromoData',
'expires': self.expires,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'chats': [] if self.chats is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.chats],
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users],
'proxy': self.proxy,
'psa_type': self.psa_type,
'psa_message': self.psa_message
}
def _bytes(self):
return b''.join((
b'?y9\x8c',
struct.pack('<I', (0 if self.proxy is None or self.proxy is False else 1) | (0 if self.psa_type is None or self.psa_type is False else 2) | (0 if self.psa_message is None or self.psa_message is False else 4)),
self.serialize_datetime(self.expires),
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(x._bytes() for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
b'' if self.psa_type is None or self.psa_type is False else (self.serialize_bytes(self.psa_type)),
b'' if self.psa_message is None or self.psa_message is False else (self.serialize_bytes(self.psa_message)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_proxy = bool(flags & 1)
_expires = reader.tgread_date()
_peer = reader.tgread_object()
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
if flags & 2:
_psa_type = reader.tgread_string()
else:
_psa_type = None
if flags & 4:
_psa_message = reader.tgread_string()
else:
_psa_message = None
return cls(expires=_expires, peer=_peer, chats=_chats, users=_users, proxy=_proxy, psa_type=_psa_type, psa_message=_psa_message)
class PromoDataEmpty(TLObject):
CONSTRUCTOR_ID = 0x98f6ac75
SUBCLASS_OF_ID = 0x9d595542
def __init__(self, expires: Optional[datetime]):
"""
Constructor for help.PromoData: Instance of either PromoDataEmpty, PromoData.
"""
self.expires = expires
def to_dict(self):
return {
'_': 'PromoDataEmpty',
'expires': self.expires
}
def _bytes(self):
return b''.join((
b'u\xac\xf6\x98',
self.serialize_datetime(self.expires),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
return cls(expires=_expires)
class RecentMeUrls(TLObject):
CONSTRUCTOR_ID = 0xe0310d7
SUBCLASS_OF_ID = 0xf269c477
def __init__(self, urls: List['TypeRecentMeUrl'], chats: List['TypeChat'], users: List['TypeUser']):
"""
Constructor for help.RecentMeUrls: Instance of RecentMeUrls.
"""
self.urls = urls
self.chats = chats
self.users = users
def to_dict(self):
return {
'_': 'RecentMeUrls',
'urls': [] if self.urls is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.urls],
'chats': [] if self.chats is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.chats],
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users]
}
def _bytes(self):
return b''.join((
b'\xd7\x10\x03\x0e',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.urls)),b''.join(x._bytes() for x in self.urls),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.chats)),b''.join(x._bytes() for x in self.chats),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_urls = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_urls.append(_x)
reader.read_int()
_chats = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_chats.append(_x)
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(urls=_urls, chats=_chats, users=_users)
class Support(TLObject):
CONSTRUCTOR_ID = 0x17c6b5f6
SUBCLASS_OF_ID = 0x7159bceb
def __init__(self, phone_number: str, user: 'TypeUser'):
"""
Constructor for help.Support: Instance of Support.
"""
self.phone_number = phone_number
self.user = user
def to_dict(self):
return {
'_': 'Support',
'phone_number': self.phone_number,
'user': self.user.to_dict() if isinstance(self.user, TLObject) else self.user
}
def _bytes(self):
return b''.join((
b'\xf6\xb5\xc6\x17',
self.serialize_bytes(self.phone_number),
self.user._bytes(),
))
@classmethod
def from_reader(cls, reader):
_phone_number = reader.tgread_string()
_user = reader.tgread_object()
return cls(phone_number=_phone_number, user=_user)
class SupportName(TLObject):
CONSTRUCTOR_ID = 0x8c05f1c9
SUBCLASS_OF_ID = 0x7f50b7c2
def __init__(self, name: str):
"""
Constructor for help.SupportName: Instance of SupportName.
"""
self.name = name
def to_dict(self):
return {
'_': 'SupportName',
'name': self.name
}
def _bytes(self):
return b''.join((
b'\xc9\xf1\x05\x8c',
self.serialize_bytes(self.name),
))
@classmethod
def from_reader(cls, reader):
_name = reader.tgread_string()
return cls(name=_name)
class TermsOfService(TLObject):
CONSTRUCTOR_ID = 0x780a0310
SUBCLASS_OF_ID = 0x20ee8312
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeDataJSON', text: str, entities: List['TypeMessageEntity'], popup: Optional[bool]=None, min_age_confirm: Optional[int]=None):
"""
Constructor for help.TermsOfService: Instance of TermsOfService.
"""
self.id = id
self.text = text
self.entities = entities
self.popup = popup
self.min_age_confirm = min_age_confirm
def to_dict(self):
return {
'_': 'TermsOfService',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'text': self.text,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'popup': self.popup,
'min_age_confirm': self.min_age_confirm
}
def _bytes(self):
return b''.join((
b'\x10\x03\nx',
struct.pack('<I', (0 if self.popup is None or self.popup is False else 1) | (0 if self.min_age_confirm is None or self.min_age_confirm is False else 2)),
self.id._bytes(),
self.serialize_bytes(self.text),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
b'' if self.min_age_confirm is None or self.min_age_confirm is False else (struct.pack('<i', self.min_age_confirm)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_popup = bool(flags & 1)
_id = reader.tgread_object()
_text = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
if flags & 2:
_min_age_confirm = reader.read_int()
else:
_min_age_confirm = None
return cls(id=_id, text=_text, entities=_entities, popup=_popup, min_age_confirm=_min_age_confirm)
class TermsOfServiceUpdate(TLObject):
CONSTRUCTOR_ID = 0x28ecf961
SUBCLASS_OF_ID = 0x293c2977
def __init__(self, expires: Optional[datetime], terms_of_service: 'TypeTermsOfService'):
"""
Constructor for help.TermsOfServiceUpdate: Instance of either TermsOfServiceUpdateEmpty, TermsOfServiceUpdate.
"""
self.expires = expires
self.terms_of_service = terms_of_service
def to_dict(self):
return {
'_': 'TermsOfServiceUpdate',
'expires': self.expires,
'terms_of_service': self.terms_of_service.to_dict() if isinstance(self.terms_of_service, TLObject) else self.terms_of_service
}
def _bytes(self):
return b''.join((
b'a\xf9\xec(',
self.serialize_datetime(self.expires),
self.terms_of_service._bytes(),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
_terms_of_service = reader.tgread_object()
return cls(expires=_expires, terms_of_service=_terms_of_service)
class TermsOfServiceUpdateEmpty(TLObject):
CONSTRUCTOR_ID = 0xe3309f7f
SUBCLASS_OF_ID = 0x293c2977
def __init__(self, expires: Optional[datetime]):
"""
Constructor for help.TermsOfServiceUpdate: Instance of either TermsOfServiceUpdateEmpty, TermsOfServiceUpdate.
"""
self.expires = expires
def to_dict(self):
return {
'_': 'TermsOfServiceUpdateEmpty',
'expires': self.expires
}
def _bytes(self):
return b''.join((
b'\x7f\x9f0\xe3',
self.serialize_datetime(self.expires),
))
@classmethod
def from_reader(cls, reader):
_expires = reader.tgread_date()
return cls(expires=_expires)
class UserInfo(TLObject):
CONSTRUCTOR_ID = 0x1eb3758
SUBCLASS_OF_ID = 0x5c53d7d8
def __init__(self, message: str, entities: List['TypeMessageEntity'], author: str, date: Optional[datetime]):
"""
Constructor for help.UserInfo: Instance of either UserInfoEmpty, UserInfo.
"""
self.message = message
self.entities = entities
self.author = author
self.date = date
def to_dict(self):
return {
'_': 'UserInfo',
'message': self.message,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'author': self.author,
'date': self.date
}
def _bytes(self):
return b''.join((
b'X7\xeb\x01',
self.serialize_bytes(self.message),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities),
self.serialize_bytes(self.author),
self.serialize_datetime(self.date),
))
@classmethod
def from_reader(cls, reader):
_message = reader.tgread_string()
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
_author = reader.tgread_string()
_date = reader.tgread_date()
return cls(message=_message, entities=_entities, author=_author, date=_date)
class UserInfoEmpty(TLObject):
CONSTRUCTOR_ID = 0xf3ae2eed
SUBCLASS_OF_ID = 0x5c53d7d8
def to_dict(self):
return {
'_': 'UserInfoEmpty'
}
def _bytes(self):
return b''.join((
b'\xed.\xae\xf3',
))
@classmethod
def from_reader(cls, reader):
return cls() | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.