id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/crm/model/lead_endpoint_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.crm.model.lead_request import LeadRequest
globals()['LeadRequest'] = LeadRequest
class LeadEndpointRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'model': (LeadRequest,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'model': 'model', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, model, *args, **kwargs): # noqa: E501
"""LeadEndpointRequest - a model defined in OpenAPI
Args:
model (LeadRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model = model
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, model, *args, **kwargs): # noqa: E501
"""LeadEndpointRequest - a model defined in OpenAPI
Args:
model (LeadRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model: Union["LeadRequest"] = model | PypiClean |
/DendroPy_calver-2023.330.2-py3-none-any.whl/dendropy/model/protractedspeciation.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Models, modeling and model-fitting of the protracted speciation, as described in::
Etienne, R.S., Morlon, H., and Lambert, A. 2014. Estimating the
duration of speciation from phylogenies. Evolution 2014: 2430-2440.
doi:10.1111/evo.12433
"""
import math
import heapq
import dendropy
import itertools
from dendropy.utility import GLOBAL_RNG
from dendropy.utility.error import ProcessFailedException
from dendropy.utility.error import TreeSimTotalExtinctionException
from dendropy.calculate import probability
def _D(speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate):
"""
Returns value of D, as given in eq. 5 in Etienne et al.
(2014).
Parameters
----------
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
Returns
-------
t : float
The duration of speciation.
"""
D = math.sqrt(
pow(speciation_completion_rate + speciation_initiation_rate - incipient_species_extinction_rate, 2)
+ (4.0 * speciation_completion_rate * incipient_species_extinction_rate)
)
return D
def _phi(speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate):
"""
Returns value of $\varphi$, as given in eq. 6 in Etienne et al.
(2014).
Parameters
----------
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
Returns
-------
t : float
The duration of speciation.
"""
phi = speciation_completion_rate - speciation_initiation_rate + incipient_species_extinction_rate
return phi
def expected_duration_of_speciation(
speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate,
D=None,
):
"""
Returns mean duration of speciation, following Eqs. 4 in Etienne et al.
(2014):
The duration of speciation differs from the speciation-completion
time in that the latter is the waiting time until a single
incipient lineage completes the speciation process if extinction
was zero, whereas the former is the time needed for an incipient
species or one of its descendants to complete speciation, condi-
tional on the fact that speciation completes, that is, this is the
time taken by any species that succeeded in speciating completely.
Parameters
----------
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
D : float
Value of ``D`` (as given in Eq. 5 in Etienne et al. 2014). Will be
calculated if not specified.
Returns
-------
t : float
The duration of speciation.
"""
if D is None:
D = _D(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
t1 = 2.0/(D - speciation_completion_rate + speciation_initiation_rate - incipient_species_extinction_rate )
t2 = math.log(2.0/(1+((speciation_completion_rate - speciation_initiation_rate + incipient_species_extinction_rate)/D)))
t = t1 * t2
return t
def probability_of_duration_of_speciation(
tau,
speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate,
D=None,
phi=None,
):
"""
Returns probability of duration of speciation, tau, following Eqs. 6
in Etienne et al.
Parameters
----------
tau : float
The duration of speciation.
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
D : float
Value of ``D`` (as given in Eq. 5 in Etienne et al. 2014). Will be
calculated if not specified.
phi : float
Value of ``phi`` (as given in Eq. 7 in Etienne et al. 2014). Will be
calculated if not specified.
Returns
-------
p : float
The probability of the duration of speciation, tau.
"""
if D is None:
D = _D(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
if phi is None:
phi = _phi(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
n1 = 2.0 * pow(D, 2) * math.exp(-D * tau) * (D + phi)
d1 = pow(D + phi + math.exp(-D * tau) * (D-phi), 2)
return n1/d1
def log_probability_of_duration_of_speciation(
tau,
speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate,
D=None,
phi=None,
):
"""
Returns probability of duration of speciation, tau, following Eqs. 6
in Etienne et al.
Parameters
----------
tau : float
The duration of speciation.
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
D : float
Value of ``D`` (as given in Eq. 5 in Etienne et al. 2014). Will be
calculated if not specified.
phi : float
Value of ``phi`` (as given in Eq. 7 in Etienne et al. 2014). Will be
calculated if not specified.
Returns
-------
p : float
The probability of the duration of speciation, tau.
"""
if D is None:
D = _D(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
if phi is None:
phi = _phi(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
n1 = math.log(2.0) + (2 * math.log(D)) - (D * tau) + math.log(D + phi)
d1 = 2 * (math.log(D + phi + math.exp(-D * tau)*(D-phi)))
return n1 - d1
def maximum_probability_duration_of_speciation(
speciation_initiation_rate,
speciation_completion_rate,
incipient_species_extinction_rate,
D=None,
phi=None,
):
"""
Returns duration of speciation that maximizes probability under given
process parameters, following eq. 8 of Etienne et al (2014).
Parameters
----------
speciation_initiation_rate : float
The birth rate, b (the incipient species birth
rate and the "good" species birth rate are assumed to be equal):
the rate at which new (incipient) species are produced from
either incipient or "good" species lineages.
speciation_completion_rate : float
The rate at which incipient species get converted to good
species, $lambda_1$.
incipient_species_extinction_rate : float
The incipient species exctinction rate, $\mu_1$: the rate at which
incipient species go extinct.
D : float
Value of ``D`` (as given in Eq. 5 in Etienne et al. 2014). Will be
calculated if not specified.
phi : float
Value of ``phi`` (as given in Eq. 7 in Etienne et al. 2014). Will be
calculated if not specified.
Returns
-------
t : float
The duration of speciation with the maximum probability under the
given process parameters.
"""
if D is None:
D = _D(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
if phi is None:
phi = _phi(
speciation_initiation_rate=speciation_initiation_rate,
speciation_completion_rate=speciation_completion_rate,
incipient_species_extinction_rate=incipient_species_extinction_rate)
x = 1.0/D * math.log((D-phi)/(D+phi))
return max(0, x)
class ProtractedSpeciationProcess(object):
class _Lineage(object):
@classmethod
def from_pbd_entry(cls, pbd_lineage_entry):
parent_lineage_id = int(pbd_lineage_entry[1])
if parent_lineage_id < 0:
parent_lineage_id = abs(parent_lineage_id)
is_parent_orthospecies = False
else:
is_parent_orthospecies = True
extinction_time = float(pbd_lineage_entry[4])
if extinction_time < 0:
extinction_time = None
lineage = cls(
lineage_id=int(pbd_lineage_entry[0]),
parent_lineage_id=parent_lineage_id,
is_parent_orthospecies=is_parent_orthospecies,
origin_time=float(pbd_lineage_entry[2]),
speciation_completion_time=float(pbd_lineage_entry[3]),
extinction_time=extinction_time,
species_id=int(pbd_lineage_entry[5]),
)
return lineage
def __init__(self, **kwargs):
self.lineage_id = kwargs.pop("lineage_id")
self.parent_lineage_id = kwargs.pop("parent_lineage_id")
self.is_parent_orthospecies = kwargs.pop("is_parent_orthospecies")
self._origin_time = kwargs.pop("origin_time")
self.speciation_completion_time = kwargs.pop("speciation_completion_time")
self.extinction_time = kwargs.pop("extinction_time")
self.species_id = kwargs.pop("species_id")
self.label = kwargs.pop("label", None)
self.time = kwargs.pop("time", None)
self.lineage_node = kwargs.pop("lineage_node", None)
self.species_node = kwargs.pop("species_node", None)
def __lt__(self, o):
# shouldn't matter, but we actually want younger lineages first,
# so we sort in *reverse* lineage_id
return self.lineage_id > o.lineage_id
def clone(self):
return self.__class__(
lineage_id=self.lineage_id,
parent_lineage_id=self.parent_lineage_id,
is_parent_orthospecies=self.is_parent_orthospecies,
origin_time=self._origin_time,
speciation_completion_time=self.speciation_completion_time,
extinction_time=self.extinction_time,
species_id=self.species_id,
label=self.label,
time=self.time,
lineage_node=self.lineage_node,
species_node=self.species_node,
)
def _get_is_extinct(self):
return not self.extinction_time is None
is_extinct = property(_get_is_extinct)
# Non-mutable as _LineageQueue uses this as a sort key in the heap Only
# way to modify is to pop, create new and reinsert
def _get_origin_time(self):
return self._origin_time
origin_time = property(_get_origin_time)
class _LineageQueue(object):
class LineageQueueEmptyException(Exception):
pass
def __init__(self):
self._lineages = []
heapq.heapify(self._lineages)
self._lineage_id_lineage_entry_map = {}
self._lineage_id_to_original_lineage_map = {}
def new_lineage(self, **kwargs):
lineage = ProtractedSpeciationProcess._Lineage(**kwargs)
self.push_lineage(lineage, is_copy=False)
return lineage
def push_lineage(self, lineage, is_copy=True):
# assert lineage.lineage_id not in self._lineage_id_lineage_entry_map
# assert lineage.lineage_id not in self._lineage_id_to_original_lineage_map
if is_copy:
stored_lineage = lineage.clone()
else:
stored_lineage = lineage
self._lineage_id_to_original_lineage_map[lineage.lineage_id] = lineage
self._lineage_id_lineage_entry_map[stored_lineage.lineage_id] = stored_lineage
heapq.heappush(self._lineages, (-stored_lineage.origin_time, stored_lineage))
def register_lineage_reference(self, lineage):
self._lineage_id_to_original_lineage_map[lineage.lineage_id] = lineage
def has_lineage(self, lineage):
return lineage.lineage_id in self._lineage_id_lineage_entry_map
def get_lineage(self, lineage_id):
return self._lineage_id_lineage_entry_map[lineage_id]
def get_lineage_by_index(self, idx):
return self._lineages[0][1]
def get_original_lineage(self, lineage_id):
return self._lineage_id_to_original_lineage_map[lineage_id]
def pop_youngest_lineage(self):
# try:
# key, lineage = heapq.heappop(self._lineages)
# except IndexError:
# raise ProtractedSpeciationProcess._LineageQueue.LineageQueueEmptyException from IndexError
if not self._lineages:
raise ProtractedSpeciationProcess._LineageQueue.LineageQueueEmptyException
key, lineage = heapq.heappop(self._lineages)
# del self._lineage_id_lineage_entry_map[lineage.lineage_id]
# del self._lineage_id_to_original_lineage_map[lineage.lineage_id]
return lineage
def __len__(self):
return len(self._lineages)
def get_initial_lineage(self):
return self._lineages[0][1]
def __init__(self,
speciation_initiation_from_orthospecies_rate,
speciation_initiation_from_incipient_species_rate,
speciation_completion_rate,
orthospecies_extinction_rate,
incipient_species_extinction_rate,
lineage_label_format_template=None,
species_label_format_template=None,
rng=None,
**kwargs
):
self.speciation_initiation_from_orthospecies_rate = speciation_initiation_from_orthospecies_rate
self.orthospecies_extinction_rate = orthospecies_extinction_rate
self.speciation_initiation_from_incipient_species_rate = speciation_initiation_from_incipient_species_rate
self.speciation_completion_rate = speciation_completion_rate
self.incipient_species_extinction_rate = incipient_species_extinction_rate
self.is_initial_lineage_orthospecies = kwargs.get("is_initial_lineage_orthospecies", False)
self.species_lineage_sampling_scheme = kwargs.get("species_lineage_sampling_scheme", "random") # 'random', 'oldest', 'youngest'
if lineage_label_format_template is None:
self.lineage_label_format_template = "S{species_id}.L{lineage_id}"
else:
self.lineage_label_format_template = lineage_label_format_template
if species_label_format_template is None:
self.species_label_format_template = "S{species_id}"
else:
self.species_label_format_template = species_label_format_template
if rng is None:
self.rng = GLOBAL_RNG
else:
self.rng = rng
self.lineage_tree_to_species_tree_node_attr = "species_tree_node"
self.species_tree_to_lineage_tree_node_attr = "lineage_tree_nodes"
def generate_sample(self, **kwargs):
"""
Samples from the Protracted Speciation Model process, returning a tuple:
- the lineage tree: this tree has all nodes/lineages, i.e. both
"good" species as well as incipient species.
- the (ortho- or confirmed- or "good"-)species tree: the tree
only has "good" species, i.e. with all incipient species
pruned out.
Each tip node on the lineage tree will have an attribute,
``species_tree_node``, which is a reference to the species tree tip to
which the lineage tip corresponds.
Each tip node on the species tree will have an attribute,
``lineage_tree_nodes``, which is a reference to an iterable of
tip nodes on the lineage tree that are associated with
this species tree node.
Parameters
----------
max_time : float or |None|
Terminate and return results if this time is exceeded. If |None|,
then do not terminate based on run time.
min_extant_orthospecies : int or |None|
Terminate and return results when at least this number of tips are
found in the confirmed-species tree (i.e., the pruned tree
consisting of only "good" species) and other specified conditions
are met.
max_extant_orthospecies : int or |None|
Terminate and return results when at least this number of tips are
found in the confirmed-species tree (i.e., the pruned tree
consisting of only "good" species).
num_extant_lineages : int or |None|
Terminate and return results when this number of tips are found in
the lineage tree (i.e. the tree with both incipient and good
species). If |None|, then do not terminate based on the
number of tipes on the incipient species tree.
is_retry_on_total_extinction : bool
If |False|, then a TreeSimTotalExtinctionException will be raised
if all lineages go extinct before the termination conditions are
met. Defaults to |True|: if all lineages go extinct before the
termination conditions are met, then the simulation is rerun, up to
a maximum of ``max_retries``.
max_retries : int
Maximum number of runs to execute in the event of
prematurely-terminated simulations due to all lineages going
extinct. Once this number or re-runs is exceed, then
TreeSimTotalExtinctionException is raised. Defaults to 1000. Set to
|None| to never quit trying.
Returns
-------
lineage_tree : |Tree| instance
A tree from the protracted speciation process, with all lineages
(good species as well as incipient species).
orthospecies_tree : |Tree| instance
A tree from the protracted speciation process with only "good" species.
"""
is_retry_on_total_extinction = kwargs.pop("is_retry_on_total_extinction", True)
max_retries = kwargs.pop("max_retries", 1000)
num_retries = 0
lineage_tree = None
orthospecies_tree = None
while True:
try:
lineage_tree, orthospecies_tree = self._generate_trees(**kwargs)
break
except ProcessFailedException:
if not is_retry_on_total_extinction:
raise
num_retries += 1
if max_retries is not None and num_retries > max_retries:
raise
assert lineage_tree is not None
return lineage_tree, orthospecies_tree
def _generate_trees(self, **kwargs):
max_time = kwargs.get("max_time", None)
num_extant_lineages = kwargs.get("num_extant_lineages", None)
min_extant_lineages = kwargs.get("min_extant_lineages", None)
max_extant_lineages = kwargs.get("max_extant_lineages", None)
num_extant_orthospecies = kwargs.get("num_extant_orthospecies", None)
min_extant_orthospecies = kwargs.get("min_extant_orthospecies", None)
max_extant_orthospecies = kwargs.get("max_extant_orthospecies", None)
lineage_taxon_namespace = kwargs.get("lineage_taxon_namespace", None)
species_taxon_namespace = kwargs.get("species_taxon_namespace", None)
lineage_data = []
for idx in range(2):
lineage_data.append({
"lineage_id": 0,
"species_id": 0,
"lineage_collection": []
})
phase_idx = 0
while phase_idx < 2:
# Run two passes if 'max_time' specified, with each pass building
# up one side of the root and only accepted if there are at least
# one surviving lineage; this way, condition crown age on
# 'max_time'.
self._generate_lineages(
lineage_data=lineage_data,
max_time=max_time,
num_extant_lineages=num_extant_lineages,
min_extant_lineages=min_extant_lineages,
max_extant_lineages=max_extant_lineages,
num_extant_orthospecies=num_extant_orthospecies,
min_extant_orthospecies=min_extant_orthospecies,
max_extant_orthospecies=max_extant_orthospecies,
phase_idx=phase_idx,
lineage_taxon_namespace=lineage_taxon_namespace,
species_taxon_namespace=species_taxon_namespace,
)
if num_extant_orthospecies is not None or max_extant_orthospecies is not None or min_extant_orthospecies is not None:
if "lineage_tree" in lineage_data[phase_idx]:
return lineage_data[phase_idx]["lineage_tree"], lineage_data[phase_idx]["orthospecies_tree"]
else:
raise ProcessFailedException()
elif num_extant_lineages is not None or max_extant_lineages is not None or min_extant_lineages is not None:
break
elif phase_idx == 0 and (len(lineage_data[0]["orthospecies_lineages"]) + len(lineage_data[0]["incipient_species_lineages"]) > 0):
phase_idx += 1
elif phase_idx == 1 and self._check_good(
orthospecies_lineages=lineage_data[1]["orthospecies_lineages"],
incipient_species_lineages=lineage_data[1]["incipient_species_lineages"]):
phase_idx += 1
# lineage_collection = itertools.chain(
# lineage_data[0]["lineage_collection"],
# lineage_data[1]["lineage_collection"],
# )
lineage_collection = lineage_data[0]["lineage_collection"] + lineage_data[1]["lineage_collection"]
lineage_tree = self._compile_lineage_tree(
lineage_collection=lineage_collection,
max_time=max_time if max_time is not None else lineage_data[phase_idx]["final_time"],
is_drop_extinct=True,
)
orthospecies_tree = self._compile_species_tree(
lineage_collection=lineage_collection,
max_time=max_time if max_time is not None else lineage_data[phase_idx]["final_time"],
)
return self._finalize_trees(
lineage_tree=lineage_tree,
lineage_taxon_namespace=lineage_taxon_namespace,
orthospecies_tree=orthospecies_tree,
species_taxon_namespace=species_taxon_namespace,
lineage_collection=lineage_collection,
)
def _generate_lineages(self, **kwargs):
current_time = 0.0
lineage_data = kwargs.get("lineage_data")
max_time = kwargs.get("max_time", None)
num_extant_lineages = kwargs.get("num_extant_lineages", None)
min_extant_lineages = kwargs.get("min_extant_lineages", None)
max_extant_lineages = kwargs.get("max_extant_lineages", None)
num_extant_orthospecies = kwargs.get("num_extant_orthospecies", None)
min_extant_orthospecies = kwargs.get("min_extant_orthospecies", None)
max_extant_orthospecies = kwargs.get("max_extant_orthospecies", None)
phase_idx = kwargs.get("phase_idx")
lineage_taxon_namespace = kwargs.get("lineage_taxon_namespace", None)
species_taxon_namespace = kwargs.get("species_taxon_namespace", None)
if phase_idx == 0:
lineage_data[phase_idx]["lineage_id"] = 1
lineage_data[phase_idx]["species_id"] = 1
initial_lineage = self._new_lineage(
lineage_id=lineage_data[phase_idx]["lineage_id"],
parent_lineage=None,
origin_time=-1e-10,
)
lineage_data[phase_idx]["orthospecies_lineages"] = [initial_lineage]
lineage_data[phase_idx]["incipient_species_lineages"] = []
else:
lineage_data[phase_idx]["lineage_id"] = lineage_data[0].get("lineage_id", 0) + 1
lineage_data[phase_idx]["species_id"] = lineage_data[0].get("species_id", 0)
initial_lineage = self._new_lineage(
lineage_id=lineage_data[phase_idx]["lineage_id"],
parent_lineage=lineage_data[0]["lineage_collection"][0],
origin_time=current_time,
)
lineage_data[phase_idx]["orthospecies_lineages"] = []
lineage_data[phase_idx]["incipient_species_lineages"] = [initial_lineage]
lineage_data[phase_idx]["lineage_collection"] = [initial_lineage]
lineage_collection = lineage_data[phase_idx]["lineage_collection"]
orthospecies_lineages = lineage_data[phase_idx]["orthospecies_lineages"]
incipient_species_lineages = lineage_data[phase_idx]["incipient_species_lineages"]
while True:
num_orthospecies = len(orthospecies_lineages)
num_incipient_species = len(incipient_species_lineages)
if num_incipient_species + num_orthospecies == 0:
raise TreeSimTotalExtinctionException()
## Draw time to next event
event_rates = []
# Event type 0
event_rates.append(self.speciation_initiation_from_orthospecies_rate * num_orthospecies)
# Event type 1
event_rates.append(self.orthospecies_extinction_rate * num_orthospecies)
# Event type 2
event_rates.append(self.speciation_initiation_from_incipient_species_rate * num_incipient_species)
# Event type 3
event_rates.append(self.speciation_completion_rate * num_incipient_species)
# Event type 4
event_rates.append(self.incipient_species_extinction_rate * num_incipient_species)
# All events
rate_of_any_event = sum(event_rates)
# Waiting time
waiting_time = self.rng.expovariate(rate_of_any_event)
# waiting_time = -math.log(self.rng.uniform(0, 1))/rate_of_any_event
if max_time and (current_time + waiting_time) > max_time:
current_time = max_time
break
# we do this here so that the (newest) tip lineages have the
# waiting time to the next event branch lengths
if (num_extant_lineages is not None
or min_extant_lineages is not None
or max_extant_lineages is not None):
has_lineage_count_requirements = True
if (
(num_extant_lineages is None or ((num_incipient_species + num_orthospecies) == num_extant_lineages))
and (min_extant_lineages is None or ((num_incipient_species + num_orthospecies) >= min_extant_lineages))
and (max_extant_lineages is None or ((num_incipient_species + num_orthospecies) == max_extant_lineages))
):
is_lineage_count_requirements_met = True
else:
is_lineage_count_requirements_met = False
else:
has_lineage_count_requirements = False
is_lineage_count_requirements_met = None
if max_extant_lineages is not None and (num_incipient_species + num_orthospecies) > max_extant_lineages:
raise ProcessFailedException()
if num_extant_orthospecies is not None or max_extant_orthospecies is not None or min_extant_orthospecies is not None:
## note: very expensive operation to count orthospecies leaves!
has_orthospecies_count_requirements = True
is_orthospecies_count_requirements_met = False
final_time = current_time + self.rng.uniform(0, waiting_time)
lineage_collection_snapshot = [lineage.clone() for lineage in itertools.chain(lineage_data[0]["lineage_collection"], lineage_data[1]["lineage_collection"])]
try:
orthospecies_tree = self._compile_species_tree(
lineage_collection=lineage_collection_snapshot,
max_time=final_time,
)
num_leaves = len(orthospecies_tree.leaf_nodes())
if (
(num_extant_orthospecies is None or num_leaves == num_extant_orthospecies)
and (min_extant_orthospecies is None or num_leaves >= min_extant_orthospecies)
and (max_extant_orthospecies is None or num_leaves <= max_extant_orthospecies)
):
lineage_tree = self._compile_lineage_tree(
lineage_collection=lineage_collection_snapshot,
max_time=final_time,
is_drop_extinct=True,
)
lineage_tree, orthospecies_tree = self._finalize_trees(
lineage_tree=lineage_tree,
lineage_taxon_namespace=lineage_taxon_namespace,
orthospecies_tree=orthospecies_tree,
species_taxon_namespace=species_taxon_namespace,
lineage_collection=lineage_collection_snapshot,
)
lineage_data[phase_idx]["lineage_tree"] = lineage_tree
lineage_data[phase_idx]["orthospecies_tree"] = orthospecies_tree
is_orthospecies_count_requirements_met = True
except ProcessFailedException:
pass
if max_extant_orthospecies is not None and num_leaves > max_extant_orthospecies:
raise ProcessFailedException
else:
has_orthospecies_count_requirements = False
is_orthospecies_count_requirements_met = None
if (
( (has_lineage_count_requirements and is_lineage_count_requirements_met) and (has_orthospecies_count_requirements and is_orthospecies_count_requirements_met) )
or ( (has_lineage_count_requirements and is_lineage_count_requirements_met) and (not has_orthospecies_count_requirements) )
or ( (not has_lineage_count_requirements) and (has_orthospecies_count_requirements and is_orthospecies_count_requirements_met) )
):
final_time = current_time + self.rng.uniform(0, waiting_time)
lineage_data[phase_idx]["final_time"] = final_time
break
else:
# add to current time
current_time += waiting_time
# Select event
event_type_idx = probability.weighted_index_choice(weights=event_rates, rng=self.rng)
assert (event_type_idx >= 0 and event_type_idx <= 4)
if event_type_idx == 0:
# Splitting of new incipient species lineage from orthospecies lineage
parent_lineage = self.rng.choice(orthospecies_lineages)
lineage_data[phase_idx]["lineage_id"] += 1
new_lineage = self._new_lineage(
lineage_id=lineage_data[phase_idx]["lineage_id"],
parent_lineage=parent_lineage,
origin_time=current_time,
)
lineage_collection.append(new_lineage)
incipient_species_lineages.append(new_lineage)
elif event_type_idx == 1:
# Extinction of an orthospecies lineage
lineage_idx = self.rng.randint(0, len(orthospecies_lineages)-1)
orthospecies_lineages[lineage_idx].extinction_time = current_time
del orthospecies_lineages[lineage_idx]
elif event_type_idx == 2:
# Splitting of new incipient species lineage from incipient lineage
parent_lineage = self.rng.choice(incipient_species_lineages)
lineage_data[phase_idx]["lineage_id"] += 1
new_lineage = self._new_lineage(
lineage_id=lineage_data[phase_idx]["lineage_id"],
parent_lineage=parent_lineage,
origin_time=current_time,
)
lineage_collection.append(new_lineage)
incipient_species_lineages.append(new_lineage)
elif event_type_idx == 3:
# Completion of speciation
lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
lineage = incipient_species_lineages[lineage_idx]
lineage.speciation_completion_time = current_time
lineage_data[phase_idx]["species_id"] += 1
lineage.species_id = lineage_data[phase_idx]["species_id"]
orthospecies_lineages.append(lineage)
del incipient_species_lineages[lineage_idx]
elif event_type_idx == 4:
# Extinction of an incipient_species lineage
lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
incipient_species_lineages[lineage_idx].extinction_time = current_time
del incipient_species_lineages[lineage_idx]
else:
raise Exception("Unexpected event type index: {}".format(event_type_idx))
def _new_lineage(self,
lineage_id,
parent_lineage,
origin_time=None,
):
if parent_lineage is None:
parent_lineage_id = 0
species_id = 1
is_parent_orthospecies = None
else:
parent_lineage_id = parent_lineage.lineage_id
species_id = parent_lineage.species_id
is_parent_orthospecies = parent_lineage.speciation_completion_time is not None
lineage = ProtractedSpeciationProcess._Lineage(
lineage_id=lineage_id,
parent_lineage_id=parent_lineage_id,
is_parent_orthospecies=is_parent_orthospecies,
origin_time=origin_time,
speciation_completion_time=None,
extinction_time=None,
species_id=species_id,
)
lineage._check_parent_lineage = parent_lineage # for _check_good
return lineage
def _correlate_lineage_and_species_trees(self, lineage_collection):
seen_lineage_nodes = set()
seen_species_nodes = set()
lineage_id_to_species_maps = {}
species_id_lineage_node_collection_map = {}
species_id_species_node_map = {}
for lineage in lineage_collection:
if not lineage.lineage_node:
assert not lineage.species_node
continue
if lineage.lineage_node.taxon is None:
continue
lineage_node = lineage.lineage_node
species_node = lineage.species_node
assert lineage_node not in seen_lineage_nodes
seen_lineage_nodes.add(lineage_node)
try:
species_id_lineage_node_collection_map[lineage_node._species_id].add(lineage_node)
except KeyError:
species_id_lineage_node_collection_map[lineage_node._species_id] = set([lineage_node])
if species_node is not None:
assert species_node not in seen_species_nodes
seen_species_nodes.add(lineage.species_node)
species_id_species_node_map[species_node._species_id] = species_node
setattr(species_node, self.species_tree_to_lineage_tree_node_attr, species_id_lineage_node_collection_map[species_node._species_id])
for species_id, lineage_nodes in species_id_lineage_node_collection_map.items():
for nd in lineage_nodes:
setattr(nd, self.lineage_tree_to_species_tree_node_attr, species_id_species_node_map[species_id])
def _finalize_trees(self,
lineage_tree,
lineage_taxon_namespace,
orthospecies_tree,
species_taxon_namespace,
lineage_collection,
):
self._build_taxa(tree=lineage_tree, taxon_namespace=lineage_taxon_namespace)
self._build_taxa(tree=orthospecies_tree, taxon_namespace=species_taxon_namespace)
self._correlate_lineage_and_species_trees(lineage_collection=lineage_collection)
return lineage_tree, orthospecies_tree
def _compile_species_tree(self,
lineage_collection,
max_time,
):
if self.species_lineage_sampling_scheme == "oldest":
lt = sorted(lineage_collection, key=lambda x: x.origin_time)
elif self.species_lineage_sampling_scheme == "youngest":
lt = sorted(lineage_collection, key=lambda x: x.origin_time, reverse=True)
elif self.species_lineage_sampling_scheme == "random":
lt = self.rng.sample(lineage_collection, len(lineage_collection))
else:
raise ValueError(sampling_scheme)
seen_species_ids = set()
to_restore_species_extinction_times = {}
for lineage_entry in lt:
if lineage_entry.extinction_time is None:
if lineage_entry.species_id not in seen_species_ids:
seen_species_ids.add(lineage_entry.species_id)
else:
to_restore_species_extinction_times[lineage_entry] = lineage_entry.extinction_time
lineage_entry.extinction_time = max_time # pseudo-extinction
t = self._compile_tree(
lineage_collection=lt,
max_time=max_time,
tree_type="species",
is_drop_extinct=True,
)
for k,v in to_restore_species_extinction_times.items():
k.extinction_time = v
return t
def _compile_lineage_tree(self,
lineage_collection,
max_time,
is_drop_extinct=True,
):
return self._compile_tree(
lineage_collection=lineage_collection,
max_time=max_time,
tree_type="lineage",
is_drop_extinct=is_drop_extinct,
)
def _compile_tree(self,
lineage_collection,
max_time,
tree_type,
is_drop_extinct=True,
):
if tree_type == "lineage":
label_template = self.lineage_label_format_template
node_attr = "lineage_node"
elif tree_type == "species":
label_template = self.species_label_format_template
node_attr = "species_node"
else:
raise ValueError(tree_type)
if len(lineage_collection) == 1:
tree = dendropy.Tree(is_rooted=True)
label = label_template.format(species_id=1, lineage_id=0)
tree.seed_node._taxon_label = label
tree.seed_node._lineage_id = 0
tree.seed_node._species_id = 1
tree.seed_node.edge.length = max_time
tree.seed_node._time = max_time
setattr(lineage_collection[0], node_attr, tree.seed_node)
return tree
lineage_queue = self._build_lineage_queue(
lineage_collection=lineage_collection,
max_time=max_time,
is_drop_extinct=is_drop_extinct,
node_attr=node_attr,
label_template=label_template,
)
while True:
daughter_lineage = lineage_queue.pop_youngest_lineage()
parent_lineage_id = daughter_lineage.parent_lineage_id
try:
parent_lineage = lineage_queue.get_lineage(parent_lineage_id)
start_time = daughter_lineage.origin_time
sp_comp_time = daughter_lineage.speciation_completion_time
daughter_node = getattr(daughter_lineage, node_attr)
end_time = daughter_node._time
parent_sp_comp_time = parent_lineage.speciation_completion_time
parent_node = getattr(parent_lineage, node_attr)
parent_end_time = parent_node._time
ch1 = parent_node
ch1.edge.length = parent_end_time - start_time
ch2 = daughter_node
ch2.edge.length = end_time - start_time
new_node = dendropy.Node()
new_node.add_child(ch1)
new_node.add_child(ch2)
new_node._time = daughter_lineage.origin_time
setattr(parent_lineage, node_attr, new_node)
except KeyError:
if parent_lineage_id != 0:
parent_lineage = lineage_queue.get_original_lineage(parent_lineage_id) #lineage_id_lineage_entry_idx_map[parent_lineage_id]
parent_lineage_clone = parent_lineage.clone()
setattr(parent_lineage_clone, node_attr, getattr(daughter_lineage, node_attr))
parent_sp_comp_time2 = parent_lineage.speciation_completion_time
if not (parent_sp_comp_time2 is not None and (parent_sp_comp_time2 < daughter_lineage.origin_time)):
parent_lineage_clone.speciation_completion_time = daughter_lineage.speciation_completion_time
lineage_queue.push_lineage(parent_lineage_clone, is_copy=False)
if len(lineage_queue) < 2:
if len(lineage_queue) == 0:
raise ProcessFailedException
elif len(lineage_queue) == 1:
initial_lineage = lineage_queue.get_initial_lineage()
else:
initial_lineage = None
while True:
try:
initial_lineage = lineage_queue.pop_youngest_lineage()
except ProtractedSpeciationProcess._LineageQueue.LineageQueueEmptyException as e:
break
assert initial_lineage is not None
seed_node = getattr(initial_lineage, node_attr)
seed_node.edge.length = initial_lineage.origin_time
tree = dendropy.Tree(
seed_node=seed_node,
is_rooted=True,
)
return tree
if parent_lineage_id == 0:
raise ValueError
def _build_taxa(self, tree, taxon_namespace):
if taxon_namespace is None:
taxon_namespace = dendropy.TaxonNamespace()
assert len(tree.taxon_namespace) == 0
tree.taxon_namespace = taxon_namespace
for nd in tree.leaf_node_iter():
nd.taxon = tree.taxon_namespace.require_taxon(label=nd._taxon_label)
del nd._taxon_label
return tree
def _build_lineage_queue(self,
lineage_collection,
max_time,
is_drop_extinct,
node_attr,
label_template,
):
lineageq = ProtractedSpeciationProcess._LineageQueue()
for lineage in lineage_collection:
if not is_drop_extinct or not lineage.is_extinct:
node = dendropy.Node()
if is_drop_extinct:
node._time = max_time
else:
node._time = lineage.extinction_time if lineage.extinction_time is not None else max_time
label = label_template.format(species_id=lineage.species_id, lineage_id=lineage.lineage_id)
node._taxon_label = label
node._lineage_id = lineage.lineage_id
node._species_id = lineage.species_id
setattr(lineage, node_attr, node)
lineageq.push_lineage(lineage=lineage, is_copy=True)
else:
lineageq.register_lineage_reference(lineage=lineage)
return lineageq
def _check_good(self, orthospecies_lineages, incipient_species_lineages):
if orthospecies_lineages:
return True
if not incipient_species_lineages:
return False
for lineage in incipient_species_lineages:
parent_lineage = lineage._check_parent_lineage
origin_time = lineage.origin_time
while parent_lineage is not None and parent_lineage.lineage_id > 1:
if parent_lineage.speciation_completion_time is not None and parent_lineage.speciation_completion_time < origin_time:
return True
origin_time = parent_lineage.origin_time
parent_lineage = parent_lineage._check_parent_lineage
return False | PypiClean |
/MindYourNeighbors-1.0.0.tar.gz/MindYourNeighbors-1.0.0/README.md | # MindYourNeighbors
Launching scripts depending on you direct neighbors
[](https://travis-ci.org/jaesivsm/MindYourNeighbors) [](https://coveralls.io/github/jaesivsm/MindYourNeighbors?branch=master) [](https://codeclimate.com/github/jaesivsm/MindYourNeighbors)
## How ?
*MindYourNeighbors* basically parse the ip-neighbor and, if results are found that are not excluded by the configuration, a configured command will be launched once.
## Why ?
The original use case :
A linux box used as a router as well as a home server. On this box runs several pieces of software which can be very bandwith consuming (ie: transmission).
I wanted to shut down this software when other users were using the network.
## Features
*MindYourNeighbors* behavior's can be controlled through configuration file placed either in `~/.config/mind_your_neighbords.cfg` or in `/etc/mind_your_neighbords.cfg`.
The configuration file is organized in sections. The **DEFAULT** section holds the default options which will be inherited by all the other sections (except for the logging level and the cache file path). All the other section will be read one by one ; each time the condition defined in `filter_on_regex` or `filter_on_machine` is matched and isn't excluded by those defined in neither `exclude`, `filter_out_regex` or `filter_out_machine` the cache will be filled with either a marker `neighbor` or a marker `no neighbor`.
Cache length can't exceed the `threshold`, as only the **REACHABLE** lines in the result of the `ip neigh` command are taken in account and as those lines vary quite a bit, the threshold parameter allows you to configure how quickly a change of state can occure.
When the cache is filled the only `neighbor` or `no neighbor` markers, the corresponding command is executed once.
##### Known Machines
You can fill a section with `known_machine` as title in which you'll write the name and mac address of machine you're aware of. You'll then be able to filter them out or in through `filter_out_machine` and `filter_on_machine`.
Please refer to the configuration example file for practical use cases.
## Options list
#### Default section options
* `loglevel`: allows you to choose the verbosity level in the syslog between `DEBUG`, `INFO`, `WARN`, `ERROR` and `FATAL`.
* `cache_file`: the path to the file where *MindYourNeighbors* will store its cache.
#### By sections, overridable options
* `threshold`: the number of consecutive matches (or un matches) it takes for *MindYourNeighbors* to execute the "match" or "no match" command.
* `filter_on_regex`: a regex to filters lines from the `ip neigh` command, lines will have to match to be counted
* `filter_out_regex`: a regex to filters lines from the `ip neigh` command, matching line will be excluded
* `exclude`: a comma separated list of string. If one of those string should be found in a `ip neigh` line, it should be excluded.
* `filter_on_machine`: a comma separated list of machine names to filter in (they must be registered in `known_machine`)
* `filter_out_machine`: a comma separated list of machine names to filter out (they must be registered in `known_machine`)
* `command_match`: A command to execute when the cache has been filed with `neighbor` marker.
* `command_no_match`: A command to execute when the cache has been filed with `no neighbor` marker.
* `device`: if none provide the `ip neigh` command will be parsed else `ip neigh show dev <device>`.
* `enable`: a boolean (`true` or `false`), enabling or disabling a section.
* `nslookup`: a boolean (`true` or `false`), making *MindYourNeighbors* looking up domain names for IP it'll print in the logs. Can be useful for debuging.
* `error_on_stderr`: a boolean (`true` or `false`); if `true` and the command print something on *stderr*, the command will be ignored and executed again.
* `cron`: a cron styled time description in which the section will be activated
| PypiClean |
/CaMo-0.0.5-py3-none-any.whl/camo/structure/linear_scm.py | from typing import Any, Dict, Iterable, Optional, Tuple
import numpy as np
import pandas as pd
from .causal_model import CausalModel
class LinearSCM(CausalModel):
_Beta: pd.DataFrame
_Gamma: pd.DataFrame
_Do: pd.Series
def __init__(
self,
V: Optional[Iterable[str]] = None,
Beta: Optional[np.array] = None,
Gamma: Optional[np.array] = None
):
# Build weighted adjacency matrix between endogenous variables
self._Beta = pd.DataFrame(Beta, index=V, columns=V, copy=True)
# Build weighted adjacency matrix between endogenous-exogenous variables
self._Gamma = np.identity(len(self._Beta)) if Gamma is None else Gamma
self._Gamma = pd.DataFrame(self._Gamma, columns=self._Beta.columns, copy=True)
self._Gamma.index = [
"$U_{" + ''.join(self._Gamma.columns[self._Gamma.loc[i] != 0]) + "}$"
for i in self._Gamma.index
]
# Initialize vector of interventions
self._Do = pd.DataFrame([[np.nan] * len(self._Beta)], columns=self._Beta.columns)
# Get edges from adjacency matrix
E = self._Beta[self._Beta != 0].stack().index.tolist()
E += self._Gamma[self._Gamma != 0].stack().index.tolist()
super().__init__(self._Beta.index, self._Gamma.index, E)
def add_vertex(self, v: str) -> None:
raise NotImplementedError() # FIXME:
def del_vertex(self, v: str) -> None:
raise NotImplementedError() # FIXME:
def add_edge(self, u: str, v: str) -> None:
raise NotImplementedError() # FIXME:
def del_edge(self, u: str, v: str) -> None:
raise NotImplementedError() # FIXME:
def copy(self):
return type(self)(self._Beta.index, self._Beta, self._Gamma)
@property
def F(self) -> Dict[str, Any]:
return self._Beta.T.to_dict("series")
@property
def Beta(self) -> pd.DataFrame:
return self._Beta.copy()
@property
def Gamma(self) -> pd.DataFrame:
return self._Gamma.copy()
def do(self, **kwargs):
# Check if v is endogenous
if not (kwargs.keys() & self._V):
raise ValueError()
# Copy model
out = self.copy()
# Set intervened variables
for (v, k) in kwargs.items():
# Fix v variable to constant k
out._Beta[v], out._Gamma[v], out._Do[v] = 0, 0, k
# Remove incoming edges
for u in out.parents(v):
out.del_edge(u, v)
return out
@classmethod
def from_structure(
cls,
V: Iterable[str],
E: Iterable[Tuple[str, str]]
):
V, U = list(V), set()
# Check if both vertices are in a vertex set
# else, add to exogenous variables
for (u, v) in E:
if u not in V:
U.add(u)
if v not in V:
U.add(v)
U = list(U)
Beta = np.zeros((len(V), len(V)))
Beta = pd.DataFrame(Beta, index=V, columns=V)
for (u, v) in E:
if u in V and v in V:
Beta.loc[u, v] = 1
Gamma = None
if U:
Gamma = np.zeros((len(U), len(V)))
Gamma = pd.DataFrame(Gamma, index=U, columns=V)
for (u, v) in E:
if u in U and v in V:
Gamma.loc[u, v] = 1
return cls(V, Beta, Gamma) | PypiClean |
/Blend-0.1.5.tar.gz/Blend-0.1.5/README.rst | =====
Blend
=====
Merge, analyze, and optimize client-side assets for web applications and static web sites.
Example
=======
Given the following directory structure::
project
lib
jquery-1.7.2.min.js
src
app.js
components
menu.js
common
helpers.js
And the following ``app.js``::
/* app.js */
//= require jquery
//= require menu
var app = {};
And the following ``menu.js``::
/* menu.js */
//= require jquery
//= require helpers
var menu = {};
And the following ``helpers.js``::
/* helpers.js */
var helpers = {};
Running ``blend`` with no arguments from the ``project`` directory will produce this directory structure::
project
lib
jquery-1.7.2.min.js
output
app.js
app.min.js
menu.js
menu.min.js
src
app.js
components
menu.js
common
helpers.js
Where ``app.js`` has the following content::
/* app.js */
/* ... the minified JQuery code, included only once */
var helpers = {};
var menu = {}
var app = {};
Usage
=====
blend [options] [file1 [file2 [fileN]]]
Command Line Options
--------------------
Output
~~~~~~
``-o OUTPUT, --output=OUTPUT``
Where the file output will be written. The default is a directory at the root of the
project directory named ``output``
Path
~~~~~
``-p PATH, --path=PATH``
A directory to be searched for required files. Multiple directories can specified by
repeating the flag. If you do not
specify any directory with the PATH flag then only the working directory will be searched for required files.
Skip Working Directory
~~~~~~~~~~~~~~~~~~~~~~
``-s, --skipcwd``
Exclude the current working directory from the requirement search paths.
Specify A Configuration File
~~~~~~~~~~~~~~~~~~~~~~~~~~
``-c, --config``
Specify a JSON confguration file that describes the analyzers and minifiers to be used.
Installation
============
From the Python Package Index
-----------------------------
::
pip install blend
From Source
-----------
::
git clone git://github.com/azavea/blend.git
cd blend
python setup.py install
Documentation
=============
http://azavea-blend.readthedocs.org
License
============
MIT
| PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_cy-gb.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Dydd Sul",
"Dydd Llun",
"Dydd Mawrth",
"Dydd Mercher",
"Dydd Iau",
"Dydd Gwener",
"Dydd Sadwrn"
],
"MONTH": [
"Ionawr",
"Chwefror",
"Mawrth",
"Ebrill",
"Mai",
"Mehefin",
"Gorffennaf",
"Awst",
"Medi",
"Hydref",
"Tachwedd",
"Rhagfyr"
],
"SHORTDAY": [
"Sul",
"Llun",
"Maw",
"Mer",
"Iau",
"Gwen",
"Sad"
],
"SHORTMONTH": [
"Ion",
"Chwef",
"Mawrth",
"Ebrill",
"Mai",
"Meh",
"Gorff",
"Awst",
"Medi",
"Hyd",
"Tach",
"Rhag"
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/yy HH:mm",
"shortDate": "dd/MM/yy",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u00a3",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "cy-gb",
"pluralCat": function(n, opt_precision) { if (n == 0) { return PLURAL_CATEGORY.ZERO; } if (n == 1) { return PLURAL_CATEGORY.ONE; } if (n == 2) { return PLURAL_CATEGORY.TWO; } if (n == 3) { return PLURAL_CATEGORY.FEW; } if (n == 6) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/HEBO-0.3.2-py3-none-any.whl/hebo/models/nn/eac/eac_ensemble.py |
# This program is free software; you can redistribute it and/or modify it under
# the terms of the MIT license.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
from copy import deepcopy
import pandas as pd
from multiprocessing import Pool
import torch
from torch import nn
from torch import FloatTensor, LongTensor
from torch.utils.data import DataLoader, TensorDataset
# from torch.utils.tensorboard import SummaryWriter
from hebo.design_space.design_space import DesignSpace
from hebo.models.base_model import BaseModel
from hebo.models.scalers import TorchMinMaxScaler, TorchStandardScaler
from hebo.models.nn.eac.eac_model import EACRNN, EACTransformerEncoder, EACMLP
from hebo.models.nn.deep_ensemble import DeepEnsemble
class EACEnsemble(DeepEnsemble):
support_ts = True # support Thompson sampling
support_multi_output = True
support_warm_start = True
def __init__(self,
num_cont: int,
num_enum: int,
num_out: int,
**conf):
super(EACEnsemble, self).__init__(num_cont, num_enum, num_out, **conf)
self.stages = self.conf.get('stages', [])
self.space = self.conf.get('space', None)
self._check_stages()
self.model_type = self.conf.setdefault('model_type', 'rnn')
if self.model_type.lower() == 'rnn' or self.model_type.lower() == 'lstm':
self.basenet_cls = EACRNN
elif self.model_type.lower() == 'transformer':
self.basenet_cls = EACTransformerEncoder
elif self.model_type.lower() == 'mlp':
self.basenet_cls = EACMLP
else:
raise NotImplementedError(f'{self.model_type} has not been supported.')
def _check_stages(self):
stages = [name.split('#')[-1] for name in self.space.para_names if '#' in name]
assert set(stages) == set(self.stages), \
'Stages should be consistent to that in space'
def fit(self, Xc: FloatTensor, Xe: LongTensor, y: FloatTensor, **fitting_conf):
valid = torch.isfinite(y).any(dim=1)
X = self.space.inverse_transform(Xc[valid], Xe[valid])
X = self.mask_stage(X)
Xc_, Xe_ = self.space.transform(X)
y_ = y[valid]
if not self.fitted:
self.fit_scaler(Xc=Xc_, Xe=Xe_, y=y_)
self.yscaler.fit(y_)
Xc_t, Xe_t, y_t = self.trans(Xc_, Xe_, y_)
if self.num_process > 1:
with Pool(self.num_process) as p:
self.models = p.starmap(
self.fit_one,
[(Xc_t.clone(), Xe_t.clone(), y_t.clone(), idx)
for idx in range(self.num_ensembles)])
else:
self.models = [self.fit_one(Xc_t.clone(), Xe_t.clone(), y_t.clone(), idx)
for idx in range(self.num_ensembles)]
assert None not in self.models
self.sample_idx = 0
with torch.no_grad():
py, _ = self.predict(Xc_, Xe_)
err = py - y_
self.noise_est = (err**2).mean(dim = 0).detach().clone()
for i in range(self.num_ensembles):
self.models[i].eval()
def predict(self, Xc: FloatTensor, Xe: LongTensor) -> (FloatTensor, FloatTensor):
X = self.space.inverse_transform(Xc, Xe)
X = self.mask_stage(X)
Xc_, Xe_ = self.space.transform(X)
Xc_t, Xe_t = self.trans(Xc_, Xe_)
preds = torch.stack([self.models[i](Xc=Xc_t, Xe=Xe_t)
for i in range(self.num_ensembles)])
if self.output_noise:
mu = preds[:, :, :self.num_out]
sigma2 = preds[:, :, self.num_out:]
py = mu.mean(dim=0)
ps2 = mu.var(dim=0, unbiased=False) + sigma2.mean(dim=0)
else:
py = preds.mean(dim=0)
ps2 = 1e-8 + preds.var(dim=0, unbiased=False)
return self.yscaler.inverse_transform(FloatTensor(py)), \
ps2 * self.yscaler.std**2
def sample_f(self):
assert self.fitted
idx = self.sample_idx
self.sample_idx = (self.sample_idx + 1) % self.num_ensembles
def f(Xc: FloatTensor, Xe: LongTensor) -> FloatTensor:
model = self.models[idx]
X = self.space.inverse_transform(Xc, Xe)
X = self.mask_stage(X)
Xc_, Xe_ = self.space.transform(X)
Xc_t, Xe_t = self.trans(Xc_, Xe_)
pred = model(Xc_t, Xe_t)[:, :self.num_out]
return self.yscaler.inverse_transform(pred)
return f
def mask_stage(self, X: pd.DataFrame) -> pd.DataFrame:
for i, stage in enumerate(self.stages):
stage_null = X[stage] == 'null'
rest_stages = self.stages[i+1:]
X.loc[stage_null, rest_stages] = 'null'
return X | PypiClean |
/morelia-0.9.2-py3-none-any.whl/morelia/grammar.py | import copy
import itertools
import re
from abc import ABC, abstractmethod
from typing import Iterable, Type
from morelia.exceptions import MissingStepError
from morelia.i18n import TRANSLATIONS
PLACEHOLDER_RE = re.compile(r"\<(\w+)\>")
class Visitor(ABC): # pragma: nocover
@abstractmethod
def visit_feature(self, node: "Feature", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_scenario(self, node: "Scenario", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_step(self, node: "Step", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_background(self, node: "Background") -> None:
pass
@abstractmethod
def visit_row(self, node: "Row") -> None:
pass
@abstractmethod
def visit_examples(self, node: "Examples", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_comment(self, node: "Comment") -> None:
pass
def visit_children(self, children: Iterable["Node"]) -> None:
for child in children:
child.accept(self)
class Node(ABC):
allowed_parents = () # type: Iterable[Type[Node]]
def __init__(
self, source="", line_number=0, language="en", labels=None, predecessors=[]
):
self.__source = source
self.__line_number = line_number
self.__language = language
self.__labels = labels if labels is not None else []
self.steps = []
self.parent = None
self.__predicate = self.__extract_predicate()
self.parent = self.__find_parent(predecessors)
self.__connect_to_parent()
self._validate_predicate()
def __connect_to_parent(self):
if self.parent:
self.parent.add_child(self)
def __find_parent(self, predecessors):
allowed_parents = self.allowed_parents
if not allowed_parents and predecessors:
self.enforce(False, "Only one Feature per file")
for step in predecessors[::-1]:
if isinstance(step, allowed_parents):
return step
return None
def __extract_predicate(self):
node_re = self.__get_compiled_pattern(self.__language)
return node_re.sub("", self.source).strip()
@classmethod
def match(cls, line, language):
node_re = cls.__get_compiled_pattern(language)
return node_re.match(line)
@classmethod
def __get_compiled_pattern(cls, language, __memo={}):
try:
return __memo[cls, language]
except KeyError:
pattern = cls._get_pattern(language)
node_re = __memo[cls, language] = re.compile(pattern)
return node_re
@classmethod
def _get_pattern(cls, language):
class_name = cls.__name__
name = class_name.lower()
name = TRANSLATIONS[language].get(name, class_name)
return r"^\s*({name}):?(\s+|$)".format(name=name)
@property
def source(self):
return self.__source
@property
def line_number(self):
return self.__line_number
@property
def predicate(self):
return self.__predicate
def append_line(self, line):
self.__source += "\n" + line
self.__predicate = (self.__predicate + "\n" + line.strip()).strip()
self._validate_predicate()
def _validate_predicate(self):
return # looks good! (-:
def get_labels(self):
labels = self.__labels[:]
if self.parent:
labels.extend(self.parent.get_labels())
return labels
def get_all_steps(self):
return itertools.chain.from_iterable(
child.get_all_steps() for child in self.steps
)
def add_child(self, child):
self.steps.append(child)
def enforce(self, condition, diagnostic):
if not condition:
text = ""
offset = 1
if self.parent:
text = self.parent.source
offset = 5
text += self.source
text = text.replace("\n\n", "\n").replace("\n", "\n\t")
raise SyntaxError(
diagnostic, (self.get_filename(), self.line_number, offset, text)
)
def get_filename(self):
try:
return self.parent.get_filename() if self.parent else self.filename
except AttributeError:
return None
def interpolated_source(self):
return self.source + "\n"
def format_fault(self, diagnostic):
parent_reconstruction = ""
if self.parent:
parent_reconstruction = self.parent.source.strip("\n")
args = (
self.get_filename(),
self.line_number,
parent_reconstruction,
self.source,
diagnostic,
)
return '\n File "%s", line %s, in %s\n %s\n%s' % args
@abstractmethod
def accept(self, visitor: Visitor) -> None: # pragma: nocover
pass
class Feature(Node):
def accept(self, visitor: Visitor) -> None:
visitor.visit_feature(self, self.steps)
def prepend_steps(self, scenario):
background = self.steps[0]
try:
background.prepend_steps(scenario)
except AttributeError:
pass
class Scenario(Node):
allowed_parents = (Feature,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.row_indices = [0]
def accept(self, visitor: Visitor) -> None:
self.parent.prepend_steps(self)
self.enforce(
0 < len(self.steps),
"Scenario without step(s) - Step, Given, When, Then, And, or #",
)
schedule = self.permute_schedule()
old_row_indices = self.row_indices
try:
for indices in schedule:
self.row_indices = indices
visitor.visit_scenario(self, self.steps)
finally:
self.row_indices = old_row_indices
def permute_schedule(self):
dims = self.count_Row_dimensions()
return _permute_indices(dims)
def count_Row_dimensions(self):
return [step.rows_number for step in self.steps if isinstance(step, RowParent)]
def get_all_steps(self):
return (step for step in self.steps if isinstance(step, Step))
class Background(Node):
allowed_parents = (Feature,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_background(self)
def prepend_steps(self, scenario):
try:
return scenario.background_steps
except AttributeError:
background_steps = []
for step in self.steps:
new_step = copy.copy(step)
new_step.parent = scenario
background_steps.append(new_step)
scenario.steps = background_steps + scenario.steps
scenario.background_steps = background_steps
return background_steps
def count_Row_dimensions(self):
return [0]
def get_all_steps(self):
return (step for step in self.steps if isinstance(step, Step))
class RowParent(Node):
@property
def rows_number(self):
rows_number = len(self.get_rows()) - 1 # do not count header
return max(0, rows_number)
def get_rows(self):
return [step for step in self.steps if isinstance(step, Row)]
class Step(RowParent):
allowed_parents = (Scenario, Background)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.payload = ""
def accept(self, visitor: Visitor) -> None:
visitor.visit_step(self, self.steps)
def find_method(self, matcher):
"""Find method matching step.
:param IStepMatcher matcher: object matching methods by given predicate
:returns: (method, args, kwargs) tuple
:rtype: tuple
:raises MissingStepError: if method maching step not found
"""
predicate = self.predicate
augmented_predicate = self.__get_interpolated_predicate()
method, args, kwargs = matcher.find(predicate, augmented_predicate)
if method:
return method, args, kwargs
suggest, method_name, docstring = matcher.suggest(predicate)
raise MissingStepError(predicate, suggest, method_name, docstring)
def interpolated_source(self):
augmented_predicate = self.__get_interpolated_predicate()
return self.source.replace(self.predicate, augmented_predicate)
def __get_interpolated_predicate(self):
if self.parent is None:
return self.predicate
if self.__parent_has_no_rows():
return self.predicate
placeholders = PLACEHOLDER_RE.findall(self.predicate)
if not placeholders:
return self.predicate
return self.__replace_placeholders_in_predicate(placeholders)
def __parent_has_no_rows(self):
dims = self.parent.count_Row_dimensions()
return not any(dims)
def __replace_placeholders_in_predicate(self, placeholders):
copy = self.predicate[:]
row_indices = self.parent.row_indices
siblings = self.parent.steps
for step_idx, row_idx in enumerate(row_indices):
step = siblings[step_idx]
table = step.get_rows()
if len(table) > 1:
header = table[0]
body = table[1:]
for column_idx, column_title in enumerate(header.values):
value = body[row_idx][column_idx]
copy = self.__replace_placeholders(
column_title, value, placeholders, copy
)
return copy
def __replace_placeholders(self, column_title, table_value, placeholders, copy):
for placeholder in placeholders:
if column_title == placeholder:
return self.__replace_placeholder(copy, placeholder, table_value)
return copy
def __replace_placeholder(self, copy, placeholder, table_value):
table_value = table_value.replace("\n", "\\n")
return copy.replace(
"<{placeholder}>".format(placeholder=placeholder), table_value
)
class Given(Step):
pass
class When(Step):
pass
class Then(Step):
pass
class And(Step):
pass
class But(And):
pass
class Examples(RowParent):
allowed_parents = (Scenario,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_examples(self, self.steps)
class Row(Node):
allowed_parents = (Step, Examples)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._validate_predicate()
def _validate_predicate(self):
# TODO: validate that grandparent is not Background
row = re.split(r" \|", re.sub(r"\|$", "", self.predicate))
self.__values = [s.strip() for s in row]
def __getitem__(self, column_idx):
return self.__values[column_idx]
@property
def values(self):
return self.__values
def accept(self, visitor: Visitor) -> None:
visitor.visit_row(self)
@classmethod
def _get_pattern(cls, language):
return r"^\s*(\|):?\s+"
class Comment(Node):
allowed_parents = (Node,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_comment(self)
@classmethod
def _get_pattern(cls, language):
return r"\s*(\#)"
def _validate_predicate(self):
self.enforce("\n" not in self.predicate, "linefeed in comment")
def _permute_indices(arr):
product_args = list(_imap(arr))
result = list(itertools.product(*product_args))
return result
# tx to Chris Rebert, et al, on the Python newsgroup for curing my brainlock here!!
def _imap(*iterables):
iterables = [iter(i) for i in iterables]
while True:
try:
args = [next(i) for i in iterables]
yield _special_range(*args)
except StopIteration:
return
def _special_range(n):
return range(n) if n else [0] | PypiClean |
/CallFlow-1.3.0.tar.gz/CallFlow-1.3.0/callflow/utils/logger.py |
import logging
import colorlog
from logging import getLogger as get_logger # noqa
# ------------------------------------------------------------------------------
LOG_FMT = (
"%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s - %(message)s"
)
LOG_COLORS = {
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "purple",
"ERROR": "bold_red",
"CRITICAL": "red",
}
# ------------------------------------------------------------------------------
def append_mem_usage(message):
from .utils import get_memory_usage
return f"[{get_memory_usage()}]: {message}"
def _log_debug_with_memory(self, message, *args, **kws):
self._log(logging.DEBUG, append_mem_usage(message), args, **kws)
def _log_info_with_memory(self, message, *args, **kws):
self._log(logging.INFO, append_mem_usage(message), args, **kws)
def _log_warning_with_memory(self, message, *args, **kws):
self._log(logging.WARNING, append_mem_usage(message), args, **kws)
def _log_error_with_memory(self, message, *args, **kws):
self._log(logging.ERROR, append_mem_usage(message), args, **kws)
def _log_critical_with_memory(self, message, *args, **kws):
self._log(logging.CRITICAL, append_mem_usage(message), args, **kws)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def init_logger(**kwargs):
# extract the logging parameters (defaults given)
level = int(kwargs.get("level", 2))
do_color = str(kwargs.get("color", True))
file = str(kwargs.get("file", ""))
mem_usage = bool(kwargs.get("mem_usage", False))
# --------------------------------------------------------------------------
# get logging level in "logging" format
assert 1 <= level <= 5
if level == 1:
level = logging.DEBUG
elif level == 2:
level = logging.INFO
elif level == 3:
level = logging.WARN
elif level == 4:
level = logging.ERROR
elif level == 5:
level = logging.CRITICAL
# -------------------------------------------------------------------------
# get logging format
# here, the initialization of the format doesnt depend upon "level"
# create the actual formatter
if do_color and file == "":
formatter = colorlog.ColoredFormatter(
"%(log_color)s" + LOG_FMT, log_colors=LOG_COLORS
)
else:
formatter = logging.Formatter(LOG_FMT)
# create a handler
if file == "":
sh = logging.StreamHandler()
sh.setFormatter(formatter)
else:
sh = logging.FileHandler(file)
sh.setFormatter(formatter)
# finally, create a logger
logger = logging.getLogger() # root logger
logger.setLevel(level)
logger.addHandler(sh)
# --------------------------------------------------------------------------
# if we want to show the memory usage
if mem_usage:
logging.Logger.info = _log_info_with_memory
logging.Logger.debug = _log_debug_with_memory
logging.Logger.warning = _log_warning_with_memory
logging.Logger.error = _log_error_with_memory
logging.Logger.critical = _log_critical_with_memory
return
# --------------------------------------------------------------------------
# Print the level of logging.
logger.debug("Enabled")
logger.info("Enabled")
logger.warning("Enabled")
logger.error("Enabled")
logger.critical("Enabled")
# ------------------------------------------------------------------------------ | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/web-animations-js/docs/examples.md | #Examples of using Web Animations
Property indexed keyframes syntax
---------------------------------
- Each CSS property specifies its keyframe values as a list, different properties may have differently sized lists.
- The `easing` property applies its timing function to all keyframes.
[**Live demo**](http://jsbin.com/qiyeriruru/edit?js,output)
```javascript
element.animate({
transform: [
'scaleY(0.5)',
'scaleX(0.5)',
'scaleY(0.5)',
],
background: [
'red',
'blue',
'orange',
'red',
],
easing: 'ease-in-out',
}, {
duration: 2000,
iterations: Infinity,
});
```
Keyframe list syntax
--------------------
- Keyframes can be specified as a list of multiple CSS property values.
- Individual keyframes can be given specific offsets and easings.
- Not all properties need to be specified in every keyframe.
- Offsets are implicitly distributed if not specified.
[**Live demo**](http://jsbin.com/yajatoyere/edit?js,output)
```javascript
element.animate([
{
background: 'red',
transform: 'none',
easing: 'ease-out',
},
{
offset: 0.1,
transform: 'translateY(100px)',
easing: 'ease-in-out',
},
{
offset: 0.2,
transform: 'translate(100px, 100px)',
easing: 'ease-in-out',
},
{
offset: 0.4,
transform: 'translateX(100px)',
easing: 'ease-out',
},
{
background: 'blue',
transform: 'none',
},
], {
duration: 4000,
iterations: Infinity,
});
```
Timing parameters
-----------------
- Web Animations inherits many of its timing parameters from CSS Animations.
- See the [specification](http://w3c.github.io/web-animations/#animationeffecttimingreadonly) for details on each parameter.
[**Live demo**](http://jsbin.com/dabehipiyo/edit?js,output)
```javascript
element.animate({
transform: ['none', 'translateX(100px)'],
background: ['green', 'lime'],
}, {
// Apply effect during delay.
fill: 'backwards',
// Delay starting by 500ms.
delay: 500,
// Iterations last for 2000ms.
duration: 2000,
// Start at 25% through an iteration.
iterationStart: 0.25,
// Run for 2 iterations.
iterations: 2,
// Play every second iteration backwards.
direction: 'alternate',
// Stop animating 500ms earlier.
endDelay: -500,
// The timing function to use with each iteration.
easing: 'ease-in-out',
});
```
Playback controls
-----------------
- element.animate() returns an Animation object with basic playback controls.
- See the [specification](http://w3c.github.io/web-animations/#the-animation-interface) for details on each method.
[**Live demo**](http://jsbin.com/kutaqoxejo/edit?js,output)
```javascript
var animation = element.animate({
transform: ['none', 'translateX(200px)'],
background: ['red', 'orange'],
}, {
duration: 4000,
fill: 'both',
});
animation.play();
animation.reverse();
animation.pause();
animation.currentTime = 2000;
animation.playbackRate += 0.25;
animation.playbackRate -= 0.25;
animation.finish();
animation.cancel();
```
Transitioning states with element.animate()
-------------------------------------------
- This is an example of how to animate from one state to another using Web Animations.
[**Live demo**](http://jsbin.com/musufiwule/edit?js,output)
```javascript
var isOpen = false;
var openHeight = '100px';
var closedHeight = '0px';
var duration = 300;
button.addEventListener('click', function() {
// Prevent clicks while we transition states.
button.disabled = true;
button.textContent = '...';
// Determine where we're animation from/to.
var fromHeight = isOpen ? openHeight : closedHeight;
var toHeight = isOpen ? closedHeight : openHeight;
// Start an animation transitioning from our current state to the final state.
var animation = element.animate({ height: [fromHeight, toHeight] }, duration);
// Update the button once the animation finishes.
animation.onfinish = function() {
isOpen = !isOpen;
button.textContent = isOpen ? 'Close' : 'Open';
button.disabled = false;
};
// Put our element in the final state.
// Inline styles are overridden by active animations.
// When the above animation finishes it will stop applying and
// the element's style will fall back onto this inline style value.
element.style.setProperty('height', toHeight);
});
```
Generating animations
---------------------
- The Javascript API allows for procedurally generating a diverse range of interesting animations.
[**Live demo**](http://jsbin.com/xolacasiyu/edit?js,output)
```html
<!DOCTYPE html>
<script src="https://rawgit.com/web-animations/web-animations-js/master/web-animations.min.js"></script>
<style>
#perspective {
margin-left: 100px;
width: 300px;
height: 300px;
perspective: 600px;
}
#container {
width: 300px;
height: 300px;
line-height: 0;
transform-style: preserve-3d;
}
.box {
display: inline-block;
width: 20px;
height: 20px;
background: black;
}
</style>
<div id="perspective">
<div id="container"></div>
</div>
<script>
container.animate({
transform: [
'rotateX(70deg) rotateZ(0deg)',
'rotateX(70deg) rotateZ(360deg)',
],
}, {
duration: 20000,
iterations: Infinity,
});
for (var y = -7; y <= 7; y++) {
for (var x = -7; x <= 7; x++) {
var box = createBox();
box.animate({
transform: [
'translateZ(0px)',
'translateZ(20px)',
],
opacity: [1, 0],
}, {
delay: (x*x + y*y) * 20,
duration: 2000,
iterations: Infinity,
direction: 'alternate',
easing: 'ease-in',
});
}
}
function createBox() {
var box = document.createElement('div');
box.className = 'box';
container.appendChild(box);
return box;
}
</script>
```
| PypiClean |
/Hcl.py-0.8.2.tar.gz/Hcl.py-0.8.2/hcl/src/exceptions.py | class UnsupportedService(Exception):
"""
- **API Code** : 100
- **API Message** : Unsupported service. Your client may be out of date. Please update it to the latest version.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class FileTooLarge(Exception):
"""
- **API Code** : 102
- **API Message** : ``Unknown Message``
- **API String** : API_STD_ERR_ENTITY_TOO_LARGE_RAW
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidRequest(Exception):
"""
- **API Code** : 103, 104
- **API Message** : Invalid Request. Please update to the latest version. If the problem continues, please contact us.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidSession(Exception):
"""
- **API Code** : 105
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccessDenied(Exception):
"""
- **API Code** : 106
- **API Message** : Access denied.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UnexistentData(Exception):
"""
- **API Code** : 107
- **API Message** : The requested data does not exist.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ActionNotAllowed(Exception):
"""
- **API Code** : 110
- **API Message** : Action not allowed.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ServiceUnderMaintenance(Exception):
"""
- **API Code** : 111
- **API Message** : Sorry, this service is under maintenance. Please check back later.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class MessageNeeded(Exception):
"""
- **API Code** : 113
- **API Message** : Be more specific, please.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidAccountOrPassword(Exception):
"""
- **API Code** : 200
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccountDisabled(Exception):
"""
- **API Code** : 210
- **API Message** : This account is disabled.
- **API String** : AUTH_DISABLED_ACCOUNT
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidEmail(Exception):
"""
- **API Code** : 213
- **API Message** : Invalid email address.
- **API String** : API_ERR_EMAIL
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidPassword(Exception):
"""
- **API Code** : 214
- **API Message** : Invalid password. Password must be 6 characters or more and contain no spaces.
- **API String** : API_ERR_PASSWORD
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class EmailAlreadyTaken(Exception):
"""
- **API Code** : 215
- **API Message** : Hey this email ``X`` has been registered already. You can try to log in with the email or edit the email.
- **API String** : API_ERR_EMAIL_TAKEN
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UnsupportedEmail(Exception):
"""
- **API Code** : 215
- **API Message** : This email address is not supported.
- **API String** : API_ERR_EMAIL_TAKEN
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccountDoesntExist(Exception):
"""
- **API Code** : 216
- **API Message** : ``Unknown Message``
- **API String** : AUTH_ACCOUNT_NOT_EXISTS
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidDevice(Exception):
"""
- **API Code** : 218
- **API Message** : Error! Your device is currently not supported, or the app is out of date. Please update to the latest version.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccountLimitReached(Exception):
"""
- **API Code** : 219
- **API Message** : A maximum of 3 accounts can be created from this device. If you forget your password, please reset it.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class TooManyRequests(Exception):
"""
- **API Code** : 219
- **API Message** : Too many requests. Try again later.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CantFollowYourself(Exception):
"""
- **API Code** : 221
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UserUnavailable(Exception):
"""
- **API Code** : 225
- **API Message** : This user is unavailable.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class YouAreBanned(Exception):
"""
- **API Code** : 229
- **API Message** : You are banned.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UserNotMemberOfCommunity(Exception):
"""
- **API Code** : 230
- **API Message** : You have to join this Community first.
- **API String** : API_ERR_USER_NOT_IN_COMMUNITY
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class RequestRejected(Exception):
"""
- **API Code** : 235
- **API Message** : Request rejected. You have been temporarily muted (read only mode) because you have received a strike. To learn more, please check the Help Center.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ActivateAccount(Exception):
"""
- **API Code** : 238
- **API Message** : Please activate your account first. Check your email, including your spam folder.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CantLeaveCommunity(Exception):
"""
- **API Code** : 239
- **API Message** : Sorry, you can not do this before transferring your Agent status to another member.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ReachedTitleLength(Exception):
"""
- **API Code** : 240
- **API Message** : Sorry, the max length of member's title is limited to 20.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class EmailFlaggedAsSpam(Exception):
"""
- **API Code** : 241
- **API Message** : This email provider has been flagged for use in spamming.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccountDeleted(Exception):
"""
- **API Code** : 246
- **API Message** : ``Unknown Message``
- **API String** : AUTH_RECOVERABLE_DELETED_ACCOUNT
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_EMAIL_NO_PASSWORD(Exception):
"""
- **API Code** : 251
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_EMAIL_NO_PASSWORD
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_COMMUNITY_USER_CREATED_COMMUNITIES_VERIFY(Exception):
"""
- **API Code** : 257
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_COMMUNITY_USER_CREATED_COMMUNITIES_VERIFY
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ReachedMaxTitles(Exception):
"""
- **API Code** : 262
- **API Message** : You can only add up to 20 Titles. Please choose the most relevant ones.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class VerificationRequired(Exception):
"""
- **API Code** : 270
- **API Message** : Verification Required.
- **API String** : API_ERR_NEED_TWO_FACTOR_AUTHENTICATION
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_INVALID_AUTH_NEW_DEVICE_LINK(Exception):
"""
- **API Code** : 271
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_INVALID_AUTH_NEW_DEVICE_LINK
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommandCooldown(Exception):
"""
- **API Code** : 291
- **API Message** : Whoa there! You've done too much too quickly. Take a break and try again later.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UserBannedByTeamAmino(Exception):
"""
- **API Code** : 293
- **API Message** : Sorry, this user has been banned by Team Amino.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class BadImage(Exception):
"""
- **API Code** : 300
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidThemepack(Exception):
"""
- **API Code** : 313
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidVoiceNote(Exception):
"""
- **API Code** : 314
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class RequestedNoLongerExists(Exception):
"""
- **API Code** : 500, 700, 1600
- **API Message** : Sorry, the requested data no longer exists. Try refreshing the view.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class PageRepostedTooRecently(Exception):
"""
- **API Code** : 503
- **API Message** : Sorry, you have reported this page too recently.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InsufficientLevel(Exception):
"""
- **API Code** : 551
- **API Message** : This post type is restricted to members with a level ``X`` ranking and above.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class WallCommentingDisabled(Exception):
"""
- **API Code** : 702
- **API Message** : This member has disabled commenting on their wall.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityNoLongerExists(Exception):
"""
- **API Code** : 801
- **API Message** : This Community no longer exists.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidCodeOrLink(Exception):
"""
- **API Code** : 802
- **API Message** : Sorry, this code or link is invalid.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityNameAlreadyTaken(Exception):
"""
- **API Code** : 805
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityCreateLimitReached(Exception):
"""
- **API Code** : 806
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_COMMUNITY_USER_CREATED_COMMUNITIES_EXCEED_QUOTA
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityDisabled(Exception):
"""
- **API Code** : 814
- **API Message** : This Community is disabled.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityDeleted(Exception):
"""
- **API Code** : 833
- **API Message** : This Community has been deleted.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ReachedMaxCategories(Exception): #by ssilc1111
"""
- **API Code** : 1002
- **API Message** : Sorry, you can create up to 100 categories.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class DuplicatePollOption(Exception):
"""
- **API Code** : 1501
- **API Message** : Sorry, you have duplicate poll options.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ReachedMaxPollOptions(Exception):
"""
- **API Code** : 1507
- **API Message** : Sorry, you can only join or add up to 5 of your items per poll.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class TooManyChats(Exception):
"""
- **API Code** : 1602
- **API Message** : Sorry, you can only have up to 1000 chat sessions.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ChatFull(Exception):
"""
- **API Code** : 1605
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class TooManyInviteUsers(Exception):
"""
- **API Code** : 1606
- **API Message** : Sorry, you can only invite up to 999 people.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ChatInvitesDisabled(Exception):
"""
- **API Code** : 1611
- **API Message** : This user has disabled chat invite requests.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class RemovedFromChat(Exception):
"""
- **API Code** : 1612
- **API Message** : You've been removed from this chatroom.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UserNotJoined(Exception):
"""
- **API Code** : 1613
- **API Message** : Sorry, this user has not joined.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_CHAT_VVCHAT_NO_MORE_REPUTATIONS(Exception):
"""
- **API Code** : 1627
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_CHAT_VVCHAT_NO_MORE_REPUTATIONS
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class MemberKickedByOrganizer(Exception):
"""
- **API Code** : 1637
- **API Message** : This member was previously kicked by the organizer and cannot be reinvited.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class LevelFiveRequiredToEnableProps(Exception):
"""
- **API Code** : 1661
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ChatViewOnly(Exception):
"""
- **API Code** : 1663
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ChatMessageTooBig(Exception):
"""
- **API Code** : 1664
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_CHAT_MESSAGE_CONTENT_TOO_LONG
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InviteCodeNotFound(Exception):
"""
- **API Code** : 1900
- **API Message** : Sorry, the requested data no longer exists. Try refreshing the view.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AlreadyRequestedJoinCommunity(Exception):
"""
- **API Code** : 2001
- **API Message** : Sorry, you have already submitted a membership request.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_PUSH_SERVER_LIMITATION_APART(Exception):
"""
- **API Code** : 2501
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_PUSH_SERVER_LIMITATION_APART
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_PUSH_SERVER_LIMITATION_COUNT(Exception):
"""
- **API Code** : 2502
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_PUSH_SERVER_LIMITATION_COUNT
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_PUSH_SERVER_LINK_NOT_IN_COMMUNITY(Exception):
"""
- **API Code** : 2503
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_PUSH_SERVER_LINK_NOT_IN_COMMUNITY
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class API_ERR_PUSH_SERVER_LIMITATION_TIME(Exception):
"""
- **API Code** : 2504
- **API Message** : ``Unknown Message``
- **API String** : API_ERR_PUSH_SERVER_LIMITATION_TIME
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AlreadyCheckedIn(Exception):
"""
- **API Code** : 2601
- **API Message** : Sorry, you can't check in any more.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AlreadyUsedMonthlyRepair(Exception):
"""
- **API Code** : 2611
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AccountAlreadyRestored(Exception):
"""
- **API Code** : 2800
- **API Message** : Account already restored.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class IncorrectVerificationCode(Exception):
"""
- **API Code** : 3102
- **API Message** : Incorrect verification code.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NotOwnerOfChatBubble(Exception):
"""
- **API Code** : 3905
- **API Message** : You are not the owner of this chat bubble.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NotEnoughCoins(Exception):
"""
- **API Code** : 4300
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AlreadyPlayedLottery(Exception):
"""
- **API Code** : 4400
- **API Message** : You have played the maximum number of lucky draws.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CannotSendCoins(Exception):
"""
- **API Code** : 4500, 4501
- **API Message** : ``Unknown Message``
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AminoIDAlreadyChanged(Exception):
"""
- **API Code** : 6001
- **API Message** : Amino ID cannot be changed after you set it.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidAminoID(Exception):
"""
- **API Code** : 6002
- **API Message** : Invalid Amino ID
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class InvalidName(Exception):
"""
- **API Code** : 99001
- **API Message** : Sorry, the name is invalid.
- **API String** : ``Unknown String``
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class SpecifyType(Exception):
"""
Raised when you need to specify the output of the command.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CannotNotClaimReputation(Exception):
"""
Reputation to be claimed should be higher than 1
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class WrongType(Exception):
"""
Raised when you attribute the function the wrong type.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UnknownResponse(Exception):
"""
Raised when an error occurs but the reason is unknown.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NotLoggedIn(Exception):
"""
Raised when you try to make an action but you aren't logged in.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NoCommunity(Exception):
"""
Raised when you try to make an action but no community was selected.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityNotFound(Exception):
"""
Raised when you search for a community but nothing is found.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NoChatThread(Exception):
"""
Raised when you try to make an action but no chat was selected.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ChatRequestsBlocked(Exception):
"""
Raised when you try to make an action but the end user has chat requests blocked.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class NoImageSource(Exception):
"""
Raised when you try to make an action but no image source was selected.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CannotFetchImage(Exception):
"""
Raised when an image cannot be fetched.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class FailedLogin(Exception):
"""
Raised when you try to login but it fails.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class AgeTooLow(Exception):
"""
Raised when you try to configure an account but the age is too low. Minimum is 13.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class UnsupportedLanguage(Exception):
"""
Raised when you try to use a language that isn't supported or exists.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class CommunityNeeded(Exception):
"""
Raised when you try to execute an command but a Community needs to be specified.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class FlagTypeNeeded(Exception):
"""
Raised when you try to flag a community, blog or user but a Flag Type needs to be specified.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class ReasonNeeded(Exception):
"""
Raised when you try to execute an command but a Reason needs to be specified.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class TransferRequestNeeded(Exception):
"""
Raised when you need to transfer host to complete the action.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
class LibraryUpdateAvailable(Exception):
"""
Raised when a new library update is available.
"""
def __init__(*args, **kwargs):
Exception.__init__(*args, **kwargs)
def CheckException(data):
api_code = data["api:statuscode"]
if api_code == 100: raise UnsupportedService(data)
elif api_code == 102: raise FileTooLarge(data)
elif api_code == 103 or api_code == 104: raise InvalidRequest(data)
elif api_code == 105: raise InvalidSession(data)
elif api_code == 106: raise AccessDenied(data)
elif api_code == 107: raise UnexistentData(data)
elif api_code == 110: raise ActionNotAllowed(data)
elif api_code == 111: raise ServiceUnderMaintenance(data)
elif api_code == 113: raise MessageNeeded(data)
elif api_code == 200: raise InvalidAccountOrPassword(data)
elif api_code == 201: raise AccountDisabled(data)
elif api_code == 210: raise AccountDisabled(data)
elif api_code == 213: raise InvalidEmail(data)
elif api_code == 214: raise InvalidPassword(data)
elif api_code == 215: raise EmailAlreadyTaken(data) and UnsupportedEmail(data)
elif api_code == 216: raise AccountDoesntExist(data)
elif api_code == 218: raise InvalidDevice(data)
elif api_code == 219: raise AccountLimitReached(data) or TooManyRequests(data)
elif api_code == 221: raise CantFollowYourself(data)
elif api_code == 225: raise UserUnavailable(data)
elif api_code == 229: raise YouAreBanned(data)
elif api_code == 230: raise UserNotMemberOfCommunity(data)
elif api_code == 235: raise RequestRejected(data)
elif api_code == 238: raise ActivateAccount(data)
elif api_code == 239: raise CantLeaveCommunity(data)
elif api_code == 240: raise ReachedTitleLength(data)
elif api_code == 241: raise EmailFlaggedAsSpam(data)
elif api_code == 246: raise AccountDeleted(data)
elif api_code == 251: raise API_ERR_EMAIL_NO_PASSWORD(data)
elif api_code == 257: raise API_ERR_COMMUNITY_USER_CREATED_COMMUNITIES_VERIFY(data)
elif api_code == 262: raise ReachedMaxTitles(data)
elif api_code == 270: raise VerificationRequired(data)
elif api_code == 271: raise API_ERR_INVALID_AUTH_NEW_DEVICE_LINK(data)
elif api_code == 291: raise CommandCooldown(data)
elif api_code == 293: raise UserBannedByTeamAmino(data)
elif api_code == 300: raise BadImage(data)
elif api_code == 313: raise InvalidThemepack(data)
elif api_code == 314: raise InvalidVoiceNote(data)
elif api_code == 500 or api_code == 700 or api_code == 1600: raise RequestedNoLongerExists(data)
elif api_code == 503: raise PageRepostedTooRecently(data)
elif api_code == 551: raise InsufficientLevel(data)
elif api_code == 702: raise WallCommentingDisabled(data)
elif api_code == 801: raise CommunityNoLongerExists(data)
elif api_code == 802: raise InvalidCodeOrLink(data)
elif api_code == 805: raise CommunityNameAlreadyTaken(data)
elif api_code == 806: raise CommunityCreateLimitReached(data)
elif api_code == 814: raise CommunityDisabled(data)
elif api_code == 833: raise CommunityDeleted(data)
elif api_code == 1002: raise ReachedMaxCategories(data)
elif api_code == 1501: raise DuplicatePollOption(data)
elif api_code == 1507: raise ReachedMaxPollOptions(data)
elif api_code == 1602: raise TooManyChats(data)
elif api_code == 1605: raise ChatFull(data)
elif api_code == 1606: raise TooManyInviteUsers(data)
elif api_code == 1611: raise ChatInvitesDisabled(data)
elif api_code == 1612: raise RemovedFromChat(data)
elif api_code == 1613: raise UserNotJoined(data)
elif api_code == 1627: raise API_ERR_CHAT_VVCHAT_NO_MORE_REPUTATIONS(data)
elif api_code == 1637: raise MemberKickedByOrganizer(data)
elif api_code == 1661: raise LevelFiveRequiredToEnableProps(data)
elif api_code == 1663: raise ChatViewOnly(data)
elif api_code == 1664: raise ChatMessageTooBig(data)
elif api_code == 1900: raise InviteCodeNotFound(data)
elif api_code == 2001: raise AlreadyRequestedJoinCommunity(data)
elif api_code == 2501: raise API_ERR_PUSH_SERVER_LIMITATION_APART(data)
elif api_code == 2502: raise API_ERR_PUSH_SERVER_LIMITATION_COUNT(data)
elif api_code == 2503: raise API_ERR_PUSH_SERVER_LINK_NOT_IN_COMMUNITY(data)
elif api_code == 2504: raise API_ERR_PUSH_SERVER_LIMITATION_TIME(data)
elif api_code == 2601: raise AlreadyCheckedIn(data)
elif api_code == 2611: raise AlreadyUsedMonthlyRepair(data)
elif api_code == 2800: raise AccountAlreadyRestored(data)
elif api_code == 3102: raise IncorrectVerificationCode(data)
elif api_code == 3905: raise NotOwnerOfChatBubble(data)
elif api_code == 4300: raise NotEnoughCoins(data)
elif api_code == 4400: raise AlreadyPlayedLottery(data)
elif api_code == 4500: raise CannotSendCoins(data)
elif api_code == 4501: raise CannotSendCoins(data)
elif api_code == 6001: raise AminoIDAlreadyChanged(data)
elif api_code == 6002: raise InvalidAminoID(data)
elif api_code == 9901: raise InvalidName(data)
else: raise Exception(data) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/data/GoogleSearchStore.js | if(!dojo._hasResource["dojox.data.GoogleSearchStore"]){
dojo._hasResource["dojox.data.GoogleSearchStore"]=true;
dojo.provide("dojox.data.GoogleSearchStore");
dojo.provide("dojox.data.GoogleWebSearchStore");
dojo.provide("dojox.data.GoogleBlogSearchStore");
dojo.provide("dojox.data.GoogleLocalSearchStore");
dojo.provide("dojox.data.GoogleVideoSearchStore");
dojo.provide("dojox.data.GoogleNewsSearchStore");
dojo.provide("dojox.data.GoogleBookSearchStore");
dojo.provide("dojox.data.GoogleImageSearchStore");
dojo.require("dojo.io.script");
dojo.experimental("dojox.data.GoogleSearchStore");
dojo.declare("dojox.data.GoogleSearchStore",null,{constructor:function(_1){
if(_1){
if(_1.label){
this.label=_1.label;
}
if(_1.key){
this._key=_1.key;
}
if(_1.lang){
this._lang=_1.lang;
}
if("urlPreventCache" in _1){
this.urlPreventCache=_1.urlPreventCache?true:false;
}
}
this._id=dojox.data.GoogleSearchStore.prototype._id++;
},_id:0,_requestCount:0,_googleUrl:"http://ajax.googleapis.com/ajax/services/search/",_storeRef:"_S",_attributes:["unescapedUrl","url","visibleUrl","cacheUrl","title","titleNoFormatting","content","estimatedResultCount"],_aggregatedAttributes:{estimatedResultCount:"cursor.estimatedResultCount"},label:"titleNoFormatting",_type:"web",urlPreventCache:true,_queryAttrs:{text:"q"},_assertIsItem:function(_2){
if(!this.isItem(_2)){
throw new Error("dojox.data.GoogleSearchStore: a function was passed an item argument that was not an item");
}
},_assertIsAttribute:function(_3){
if(typeof _3!=="string"){
throw new Error("dojox.data.GoogleSearchStore: a function was passed an attribute argument that was not an attribute name string");
}
},getFeatures:function(){
return {"dojo.data.api.Read":true};
},getValue:function(_4,_5,_6){
var _7=this.getValues(_4,_5);
if(_7&&_7.length>0){
return _7[0];
}
return _6;
},getAttributes:function(_8){
return this._attributes;
},hasAttribute:function(_9,_a){
if(this.getValue(_9,_a)){
return true;
}
return false;
},isItemLoaded:function(_b){
return this.isItem(_b);
},loadItem:function(_c){
},getLabel:function(_d){
return this.getValue(_d,this.label);
},getLabelAttributes:function(_e){
return [this.label];
},containsValue:function(_f,_10,_11){
var _12=this.getValues(_f,_10);
for(var i=0;i<_12.length;i++){
if(_12[i]===_11){
return true;
}
}
return false;
},getValues:function(_13,_14){
this._assertIsItem(_13);
this._assertIsAttribute(_14);
var val=_13[_14];
if(dojo.isArray(val)){
return val;
}else{
if(val!==undefined){
return [val];
}else{
return [];
}
}
},isItem:function(_15){
if(_15&&_15[this._storeRef]===this){
return true;
}
return false;
},close:function(_16){
},_format:function(_17,_18){
return _17;
},fetch:function(_19){
_19=_19||{};
var _1a=_19.scope||dojo.global;
if(!_19.query){
if(_19.onError){
_19.onError.call(_1a,new Error(this.declaredClass+": A query must be specified."));
return;
}
}
var _1b={};
for(var _1c in this._queryAttrs){
_1b[_1c]=_19.query[_1c];
}
_19={query:_1b,onComplete:_19.onComplete,onError:_19.onError,onItem:_19.onItem,onBegin:_19.onBegin,start:_19.start,count:_19.count};
var _1d=8;
var _1e="GoogleSearchStoreCallback_"+this._id+"_"+(++this._requestCount);
var _1f=this._createContent(_1b,_1e,_19);
var _20;
if(typeof (_19.start)==="undefined"||_19.start===null){
_19.start=0;
}
if(!_19.count){
_19.count=_1d;
}
_20={start:_19.start-_19.start%_1d};
var _21=this;
var _22=this._googleUrl+this._type;
var _23={url:_22,preventCache:this.urlPreventCache,content:_1f};
var _24=[];
var _25=0;
var _26=false;
var _27=_19.start-1;
var _28=0;
var _29=[];
function _2a(req){
_28++;
_23.content.context=_23.content.start=req.start;
var _2b=dojo.io.script.get(_23);
_29.push(_2b.ioArgs.id);
_2b.addErrback(function(_2c){
if(_19.onError){
_19.onError.call(_1a,_2c,_19);
}
});
};
var _2d=function(_2e,_2f){
if(_29.length>0){
dojo.query("#"+_29.splice(0,1)).forEach(dojo.destroy);
}
if(_26){
return;
}
var _30=_21._getItems(_2f);
var _31=_2f?_2f["cursor"]:null;
if(_30){
for(var i=0;i<_30.length&&i+_2e<_19.count+_19.start;i++){
_21._processItem(_30[i],_2f);
_24[i+_2e]=_30[i];
}
_25++;
if(_25==1){
var _32=_31?_31.pages:null;
var _33=_32?Number(_32[_32.length-1].start):0;
if(_19.onBegin){
var est=_31?_31.estimatedResultCount:_30.length;
var _34=est?Math.min(est,_33+_30.length):_33+_30.length;
_19.onBegin.call(_1a,_34,_19);
}
var _35=(_19.start-_19.start%_1d)+_1d;
var _36=1;
while(_32){
if(!_32[_36]||Number(_32[_36].start)>=_19.start+_19.count){
break;
}
if(Number(_32[_36].start)>=_35){
_2a({start:_32[_36].start});
}
_36++;
}
}
if(_19.onItem&&_24[_27+1]){
do{
_27++;
_19.onItem.call(_1a,_24[_27],_19);
}while(_24[_27+1]&&_27<_19.start+_19.count);
}
if(_25==_28){
_26=true;
dojo.global[_1e]=null;
if(_19.onItem){
_19.onComplete.call(_1a,null,_19);
}else{
_24=_24.slice(_19.start,_19.start+_19.count);
_19.onComplete.call(_1a,_24,_19);
}
}
}
};
var _37=[];
var _38=_20.start-1;
dojo.global[_1e]=function(_39,_3a,_3b,_3c){
try{
if(_3b!=200){
if(_19.onError){
_19.onError.call(_1a,new Error("Response from Google was: "+_3b),_19);
}
dojo.global[_1e]=function(){
};
return;
}
if(_39==_38+1){
_2d(Number(_39),_3a);
_38+=_1d;
if(_37.length>0){
_37.sort(_21._getSort());
while(_37.length>0&&_37[0].start==_38+1){
_2d(Number(_37[0].start),_37[0].data);
_37.splice(0,1);
_38+=_1d;
}
}
}else{
_37.push({start:_39,data:_3a});
}
}
catch(e){
_19.onError.call(_1a,e,_19);
}
};
_2a(_20);
},_getSort:function(){
return function(a,b){
if(a.start<b.start){
return -1;
}
if(b.start<a.start){
return 1;
}
return 0;
};
},_processItem:function(_3d,_3e){
_3d[this._storeRef]=this;
for(var _3f in this._aggregatedAttributes){
_3d[_3f]=dojo.getObject(this._aggregatedAttributes[_3f],false,_3e);
}
},_getItems:function(_40){
return _40["results"]||_40;
},_createContent:function(_41,_42,_43){
var _44={v:"1.0",rsz:"large",callback:_42,key:this._key,hl:this._lang};
for(var _45 in this._queryAttrs){
_44[this._queryAttrs[_45]]=_41[_45];
}
return _44;
}});
dojo.declare("dojox.data.GoogleWebSearchStore",dojox.data.GoogleSearchStore,{});
dojo.declare("dojox.data.GoogleBlogSearchStore",dojox.data.GoogleSearchStore,{_type:"blogs",_attributes:["blogUrl","postUrl","title","titleNoFormatting","content","author","publishedDate"],_aggregatedAttributes:{}});
dojo.declare("dojox.data.GoogleLocalSearchStore",dojox.data.GoogleSearchStore,{_type:"local",_attributes:["title","titleNoFormatting","url","lat","lng","streetAddress","city","region","country","phoneNumbers","ddUrl","ddUrlToHere","ddUrlFromHere","staticMapUrl","viewport"],_aggregatedAttributes:{viewport:"viewport"},_queryAttrs:{text:"q",centerLatLong:"sll",searchSpan:"sspn"}});
dojo.declare("dojox.data.GoogleVideoSearchStore",dojox.data.GoogleSearchStore,{_type:"video",_attributes:["title","titleNoFormatting","content","url","published","publisher","duration","tbWidth","tbHeight","tbUrl","playUrl"],_aggregatedAttributes:{}});
dojo.declare("dojox.data.GoogleNewsSearchStore",dojox.data.GoogleSearchStore,{_type:"news",_attributes:["title","titleNoFormatting","content","url","unescapedUrl","publisher","clusterUrl","location","publishedDate","relatedStories"],_aggregatedAttributes:{}});
dojo.declare("dojox.data.GoogleBookSearchStore",dojox.data.GoogleSearchStore,{_type:"books",_attributes:["title","titleNoFormatting","authors","url","unescapedUrl","bookId","pageCount","publishedYear"],_aggregatedAttributes:{}});
dojo.declare("dojox.data.GoogleImageSearchStore",dojox.data.GoogleSearchStore,{_type:"images",_attributes:["title","titleNoFormatting","visibleUrl","url","unescapedUrl","originalContextUrl","width","height","tbWidth","tbHeight","tbUrl","content","contentNoFormatting"],_aggregatedAttributes:{}});
} | PypiClean |
/NanoSV-1.2.4.tar.gz/NanoSV-1.2.4/nanosv/utils/phasing.py | from utils import parse_breakpoints as breakpoint
from utils import parse_reads as read
from utils import parse_bam as bam
from utils import create_vcf as c_vcf
import NanoSV
import random
matrix = []
def make_matrix(sv_id, windows):
"""
Create matrix of positions and reads with their ref/alt/- as variable. Parse phasing result and select
best result
:param sv_id:
:param windows:
:return best result with the purity, phasing score and snps used:
"""
global matrix
x = 0
scores = [0,0,0,0]
bp = -1
for window in windows:
matrix = []
if x == 0:
chr = breakpoint.structural_variants[sv_id].chr
else:
chr = breakpoint.structural_variants[sv_id].chr2
bin_start = int(window[0] / NanoSV.opts_variant_bin_size)
bin_end = int(window[1] / NanoSV.opts_variant_bin_size)
if bin_start < 0:
bin_start = 0
if bin_end > c_vcf.vcf.contigs[chr][1]:
bin_end = int(c_vcf.vcf.contigs[chr][1] / NanoSV.opts_variant_bin_size)
sv_reads = get_sv_reads(sv_id, x)
for bin in range(bin_start, bin_end+1):
for variant_position in bam.variants[chr][bin]:
if int(window[0]) <= variant_position <= int(window[1]):
matrix.append([])
matrix_fill_position(breakpoint.structural_variants[sv_id].ref_qname_clips[x], variant_position, sv_id, chr, bin, x)
matrix_fill_position(sv_reads, variant_position, sv_id, chr, bin, x)
matrix = list(map(list, zip(*matrix)))
if len(matrix) == 0:
x += 1
continue
phasing_result= clustering(matrix, list(range(len(breakpoint.structural_variants[sv_id].ref_qname_clips[x]),len(matrix))), x)
if sum(phasing_result[:1]) > sum(scores[:1]):
scores = phasing_result
scores.append(len(matrix[0]))
bp += 1
x += 1
deleted = 0
for segment in range(len(matrix)):
if len(set(matrix[segment-deleted])) <= 1 and matrix[segment-deleted][0] == "-":
del matrix[segment-deleted]
deleted += 1
return scores
def get_sv_reads(sv_id, x):
"""
gets reads supporting the sv. Used later to evaluate clustering
:param sv_id:
:param x:
:return list with sv_reads:
"""
sv_reads = []
for bp_id in breakpoint.structural_variants[sv_id].breakpoints:
if x == 0:
sv_reads.append(read.breakpoints[bp_id].segment_1['id'])
else:
sv_reads.append(read.breakpoints[bp_id].segment_2['id'])
return sv_reads
def matrix_fill_position(sv_or_ref, variant_position, sv_id, chr, bin, x):
"""
fill position in matrix with correct value for either ref or alt base, or a - with unknown haplotype
:param sv_or_ref:
:param variant_position:
:param sv_id:
:param chr:
:param bin:
:param x:
"""
global matrix
for segment_id in sv_or_ref:
if segment_id[2] in bam.variants[chr][bin][variant_position].segments:
matrix[len(matrix) - 1].append(bam.variants[chr][bin][variant_position].segments[segment_id[2]][0])
else:
if breakpoint.structural_variants[sv_id].flag1 == 0 and x == 0:
if bam.segments[segment_id[0]][segment_id[1]][segment_id[2]].pos > variant_position:
matrix[len(matrix) - 1].append('-')
else:
matrix[len(matrix) - 1].append('=')
elif breakpoint.structural_variants[sv_id].flag1 == 16 and x == 0:
if bam.segments[segment_id[0]][segment_id[1]][segment_id[2]].end < variant_position:
matrix[len(matrix) - 1].append('-')
else:
matrix[len(matrix) - 1].append('=')
elif breakpoint.structural_variants[sv_id].flag2 == 0 and x == 1:
if bam.segments[segment_id[0]][segment_id[1]][segment_id[2]].end < variant_position:
matrix[len(matrix) - 1].append('-')
else:
matrix[len(matrix) - 1].append('=')
elif breakpoint.structural_variants[sv_id].flag2 == 16 and x == 1:
if bam.segments[segment_id[0]][segment_id[1]][segment_id[2]].pos > variant_position:
matrix[len(matrix) - 1].append('-')
else:
matrix[len(matrix) - 1].append('=')
def clustering(matrix, sv_reads, bp_id):
"""
creates clustering matrix by calling make_clustering_matrix(). Clusters most similar reads and sends
result to judge_clustering to be evaluated. Result comes back and is returned.
:param matrix:
:param sv_reads:
:param bp_id:
:return phasing result:
"""
clustering_matrix = make_clustering_matrix(matrix)
while len(clustering_matrix) > 2:
keys = []
for x in clustering_matrix:
keys.append(x)
highest_score = 0
readA = 0
readB = 0
for i in keys:
for key, value in clustering_matrix[i].items():
if value >= highest_score:
highest_score = value
readA = key
readB = i
if highest_score < NanoSV.opts_clustering_cutoff:
break
merged_name = str(readA) + "," + str(readB)
merged_dict = {}
for j in keys:
if j == readA or j == readB:
continue
sum_of_scores = 0
for read in [readA, readB]:
if max(map(int, read.split(","))) >= max(map(int, j.split(","))):
sum_of_scores += clustering_matrix[str(read)][str(j)]
else:
sum_of_scores += clustering_matrix[str(j)][str(read)]
merged_dict[str(j)] = sum_of_scores / 2
del_list = []
for item, merged_value in merged_dict.items():
if max(map(int, item.split(","))) <= max(map(int, readB.split(","))):
continue
clustering_matrix[str(item)][str(merged_name)] = merged_value
del_list.append(item)
del clustering_matrix[str(readA)]
del clustering_matrix[str(readB)]
for read in clustering_matrix:
if readA in clustering_matrix[read]:
del clustering_matrix[read][readA]
if readB in clustering_matrix[read]:
del clustering_matrix[read][readB]
for item in del_list:
del merged_dict[str(item)]
clustering_matrix[merged_name] = merged_dict
breakpoint_result = judge_clustering(clustering_matrix, sv_reads, len(matrix), bp_id)
return breakpoint_result
def judge_clustering(clustering_matrix, sv_reads, total_reads, x):
"""
Judges clustering result and calculates purity and phasing scores. Also calls randomise() 100000 times to get a
random result. With these random results a p-value can be calculated.
:param clustering_matrix:
:param sv_reads:
:param total_reads:
:param x:
:return list with purity score, phasing score and p-value:
"""
purity_chart = []
phasing_chart = []
clusters = []
for key in clustering_matrix:
clusters.append(key.split(","))
longest_clusters = []
for length_cluster in [len(clusters), len(clusters)-1]:
long_cluster = []
for j in range(length_cluster):
if len(long_cluster) <= len(clusters[j]):
long_cluster = clusters[j]
del clusters[clusters.index(long_cluster)]
longest_clusters.append(long_cluster)
amounts_per_cluster = []
clusternr = 0
for cluster in longest_clusters:
amounts = [0, 0]
clusternr += 1
for read in cluster:
if int(read) in sv_reads:
amounts[0] += 1
else:
amounts[1] += 1
amounts_per_cluster.append(amounts)
pur_percentage_per_cluster = []
phasing_percentage_per_cluster = []
for i in range(len(amounts_per_cluster)):
purity_percentages = []
purity_percentages.append(amounts_per_cluster[i][0] / (amounts_per_cluster[i][0] + amounts_per_cluster[i][1]) * 100)
purity_percentages.append(amounts_per_cluster[i][1] / (amounts_per_cluster[i][0] + amounts_per_cluster[i][1]) * 100)
pur_percentage_per_cluster.append(purity_percentages)
phasing_percentages = []
phasing_percentages.append(amounts_per_cluster[i][0] / len(sv_reads) * 100)
try:
phasing_percentages.append(amounts_per_cluster[i][1] / (total_reads - len(sv_reads)) * 100)
except ZeroDivisionError:
phasing_percentages.append(0)
phasing_percentage_per_cluster.append(phasing_percentages)
pur_sv_score = (pur_percentage_per_cluster[0][0] - pur_percentage_per_cluster[1][0])
pur_ref_score = (pur_percentage_per_cluster[0][1] - pur_percentage_per_cluster[1][1])
if pur_sv_score < 0:
pur_sv_score = pur_sv_score * -1
if pur_ref_score < 0:
pur_ref_score = pur_ref_score * -1
phasing_sv_score = (phasing_percentage_per_cluster[0][0] + phasing_percentage_per_cluster[1][0])
phasing_ref_score = (phasing_percentage_per_cluster[0][1] + phasing_percentage_per_cluster[1][1])
purity_score = (pur_sv_score + pur_ref_score) / 2
phasing_score = (phasing_sv_score + phasing_ref_score) / 2
random_purities = []
for i in range(100000):
random_purities.append(randomise(longest_clusters, sv_reads))
teller = 1
for value in random_purities:
if float(value) >= purity_score:
teller += 1
pvalue = teller/len(random_purities)
purity_chart.append(int(purity_score))
phasing_chart.append(int(phasing_score))
return [int(purity_score), int(phasing_score), pvalue]
def randomise(longest_clusters, sv_reads):
"""
Creates a random result and calculates scores to be used with a calculation of the p-value.
:param longest_clusters:
:param sv_reads:
:return purity score:
"""
random_options = []
for ref in range(len(matrix) - len(sv_reads)):
random_options.append(1)
for alt in range(len(sv_reads)):
random_options.append(2)
random.shuffle(random_options)
new_clusters = [[], [], []]
for clusternr in range(0,3):
if clusternr == 0:
for zero in range(len(matrix) - (len(longest_clusters[0])+len(longest_clusters[1]))):
new_clusters[clusternr].append(random_options[-1])
del random_options[-1]
else:
for one_or_two in range(len(longest_clusters[clusternr-1])):
new_clusters[clusternr].append(random_options[-1])
del random_options[-1]
pur_percentage_per_cluster = []
amounts_per_cluster = [[new_clusters[1].count(2),new_clusters[1].count(1)], [new_clusters[2].count(2), new_clusters[2].count(1)]]
for i in range(len(amounts_per_cluster)):
purity_percentages = []
purity_percentages.append(amounts_per_cluster[i][0] / (amounts_per_cluster[i][0] + amounts_per_cluster[i][1]) * 100)
purity_percentages.append(amounts_per_cluster[i][1] / (amounts_per_cluster[i][0] + amounts_per_cluster[i][1]) * 100)
pur_percentage_per_cluster.append(purity_percentages)
pur_sv_score = (pur_percentage_per_cluster[0][0] - pur_percentage_per_cluster[1][0])
pur_ref_score = (pur_percentage_per_cluster[0][1] - pur_percentage_per_cluster[1][1])
if pur_sv_score < 0:
pur_sv_score = pur_sv_score * -1
if pur_ref_score < 0:
pur_ref_score = pur_ref_score * -1
purity_score = (pur_sv_score + pur_ref_score) / 2
return purity_score
def make_clustering_matrix(matrix):
"""
calculates similarity between reads to be put in the clustering matrix
:param matrix:
:return filled clustering matrix:
"""
clustering_matrix = {}
for i in range(len(matrix)):
clustering_matrix[str(i)] = {}
for j in range(i + 1):
mutations_in_common = 0
if j == i:
clustering_matrix[str(i)][str(j)] = 0
else:
amount_positions = len(matrix[i])
for pos in range(len(matrix[i])):
if matrix[i][pos] == '-' and matrix[j][pos] == '-':
amount_positions -= 1
continue
if matrix[i][pos] == matrix[j][pos]:
mutations_in_common += 1
if amount_positions == 0:
clustering_matrix[str(i)][str(j)] = 0
else:
clustering_matrix[str(i)][str(j)] = mutations_in_common / amount_positions
return clustering_matrix | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/examples/benchmark_problems/scripts/synth_sas.py | import os
import time
import re
import argparse
import numpy as np
from numpy.random import default_rng
from scipy.optimize import curve_fit
import h5py
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--folder",
help="The folder containing the sasmodels"
" from which to synthesise data")
parser.add_argument(
"-s",
"--synthtype",
help="The classification of noise to" +
" add to the data")
args = parser.parse_args()
def file_names(pathname):
"""Obtain all filenames in directory."""
fnames = []
for file in os.listdir(pathname):
fnames.append(file)
return fnames
def read_xray_data(pfolder, fname):
"""Read experimental data from a tab delimited file.
Inputs: pfolder - string of folder path
fname - string of filename of sasmodel data
Outputs: x_vals - 1d np array of x values
y_vals - 1d array of y values
ydev_vals - 1d np array of y uncertainty values
"""
data_array = np.loadtxt(pfolder + "\\" + fname, delimiter="\t")
y_ids = data_array[:, 1] > 0 # filter out negative intensities
x_vals = data_array[y_ids, 0]
y_vals = data_array[y_ids, 1]
ydev_vals = data_array[y_ids, 2]
return x_vals, y_vals, ydev_vals
def read_neutron_data(pfolder, fname):
"""Read experimental neutron from a .h file.
Inputs: pfolder - string of folder path
fname - string of filename of sasmodel data
Outputs: x_vals - 1d np array of x values
y_vals - 1d array of y values
ydev_vals - 1d np array of y uncertainty values
"""
with h5py.File(pfolder + "\\" + fname, 'r') as file:
datasetnames = list(file.keys())
itemnames = list(file[datasetnames[0]].keys())
datanames = list(file[datasetnames[0]][itemnames[2]].keys())
# data are on the third layer of these .h5 files
x_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[2]])
y_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[0]])
ydev_vals = np.array(file[datasetnames[0]][itemnames[2]][datanames[1]])
return x_vals, y_vals, ydev_vals
def read_sasmodel_data(pfolder, fname):
"""Read sasmodel data from a 'double space' delimited txt file; the default
format that is outputted by SASView.
Inputs: pfolder - string of folder path
fname - string of filename of sasmodel data
Outputs: x_vals - 1d np array of x values
y_vals - 1d array of y values
"""
data_array = np.loadtxt(pfolder + "\\" + fname, delimiter=" ", skiprows=1)
x_vals = data_array[:, 0]
y_vals = data_array[:, 1]
return x_vals, y_vals
def normalise_data(vals):
"""Normalise np array columnwise.
Inputs: vals - 1d np array to be normalised
Outputs: norm_vals - 1d np array
norm_pars - 2 element list with max and min values
"""
vals_max = max(vals)
vals_min = min(vals)
norm_pars = [vals_min, vals_max]
norm_vals = (vals - norm_pars[0])/(norm_pars[1] - norm_pars[0])
norm_vals[norm_vals == 0] = 1e-7
return norm_vals, norm_pars
def denormalise_data(norm_vals, norm_pars):
"""Normalise np array columnwise.
Inputs: vals - 1d np array to be normalised
Outputs: norm_vals - 1d np array
norm_pars - 2 element list with max and min values
"""
vals = norm_pars[0] + norm_vals*(norm_pars[1]-norm_pars[0])
return vals
def gaussian(y_vals, n_sigma):
"""Noise on intensity is sampled from a Gaussian distribution
for each datum. The standard deviation used to sample noise
for each datum is equal to the intensity of the datum multiplied
by a chosen scaling factor.
Inputs: y_vals - 1D numpy array of intensities
n_sigma - std scaling factor
Outputs: y_noise - 1D numpy array of intensities with noise
included
noise - 1D numpy array of noise
"""
y_noise = []
noise = []
rng = default_rng()
for y_val in y_vals:
noise_temp = rng.normal(loc=0.0, scale=n_sigma * y_val)
noise.append(noise_temp)
y_noise.append(noise_temp + y_val)
y_noise = np.array(y_noise)
return y_noise, noise
def poissonian(y_vals, **kwargs):
"""Noise on intensity is sampled from a Poissonian distribution
for each datum. The poisson parameter 'lambda' for each datum
is equal to the intensity of the datum.
Inputs: y_vals - 1D numpy array of intensities
Optional Keyword Inputs:
count_scale - intensity scaling factor
count_shift - intensity shift constant
Outputs: y_noise - 1D numpy array of intensities with noise
included
noise - 1D numpy array of noise
"""
if 'count_scale' in kwargs:
count_scale = kwargs.get('count_scale', 'None')
else:
count_scale = 1
if 'count_shift' in kwargs:
count_shift = kwargs.get('count_shift', 'None')
else:
count_shift = 0
y_noise = []
rng = default_rng()
for item in y_vals:
# samples from the Poisson distribution are the sythetic data,
# unlike signal + Guassian noise
if item * count_scale <= 0:
item = 1
y_noise.append(rng.poisson(item * count_scale + count_shift))
y_noise = np.array(y_noise)
y_vals = y_vals * count_scale + count_shift
noise = y_noise - y_vals # not strictly applicable to Poisson
# noise[noise<0] = abs(noise[noise<0])
return y_vals, y_noise, noise
def powerlaw(x, a, b, c):
"""Powerlaw function used by fitting software to characterise uncertainty."""
return a * x**b + c
def errorbar_xy(x_vals, y_vals, ydev_vals, **kwargs):
""" Plotting I vs Q with uncertainty in y errorbars.
Inputs: x_vals - 1D np array of Q values
y_vals - 1D np array of intensity values
ydev_vals - 1D np array of uncertainty
Optional Inputs:
title - str defining figure title
xunits - str defining x units
yunits - str defining yunits
Outputs: plt - plot handle
"""
if "title" in kwargs:
tit = kwargs.get("title", "None")
else:
tit = ""
if 'xunits' in kwargs:
xunits = kwargs.get("xunits", "None")
else:
xunits = ""
if 'yunits' in kwargs:
yunits = kwargs.get("yunits", "None")
else:
yunits = ""
plt.plot(x_vals, y_vals)
plt.errorbar(x_vals, y_vals, yerr=ydev_vals,
fmt="None", color="orange")
plt.legend(["Data", "Uncertainty"])
plt.xscale("log", nonpositive='clip')
plt.yscale("log", nonpositive='clip')
plt.xlabel("X " + xunits)
plt.ylabel("Y " + yunits)
plt.title(tit)
return plt
def power_fit(x_vals, y_vals):
""" Perform powerlaw fit using the scipy optimize library.
"""
pars, conv = curve_fit(f=powerlaw, xdata=x_vals, ydata=y_vals,
p0=[0, 0, 0], bounds=(-np.inf, np.inf))
plt.plot(x_vals, y_vals, '+')
plt.plot(np.sort(x_vals), powerlaw(np.sort(x_vals), *pars))
plt.legend(["Experimental Uncertainty" "Powerlaw"])
plt.xscale("log", nonpositive='clip')
plt.xlabel("Intensity")
plt.ylabel("Relative Uncertainty")
plt.title("Relative Uncertainty vs Intensity Relationship")
return plt, pars, conv
def norm_ydep_pwr_synth(x_vals, y_vals, pwr_pars):
"""Generate synthetic data based on an impirical power law relationship
between relative uncertainty and normalised intensity. This approach will synthesise data in
the fashion of an x-ray SAS experiment.
Inputs: x_vals - 1d np array of sasmodel Q data
y_vals - 1d np array of sasmodel intensity data
pwr_pars - 1d array of power law parameters from power law fit
between ydev/y against y
Outputs: y_syn - 1d array of synthesised intensity data
ydev_syn - 1d np array of synthesised uncertainty data
"""
y_norm, norm_pars = normalise_data(y_vals)
y_syn = []
ydev_syn = []
rng = default_rng()
for y_datum in y_norm:
ydev_rel_temp = powerlaw(y_datum, *pwr_pars)
ydev_temp = ydev_rel_temp*y_datum # include scalar multiple here to amplify noise
ydev_syn.append(ydev_temp)
noise_temp = rng.normal(loc=0.0,
scale=ydev_temp) + rng.normal(loc=0.0,
scale=0.05 * y_datum)
y_syn.append(y_datum + noise_temp)
ydev_syn = np.array(ydev_syn)
y_syn = np.array(y_syn)
y_syn = denormalise_data(y_syn, norm_pars)
ydev_syn = denormalise_data(ydev_syn, norm_pars)
plt.plot(x_vals, y_vals, "--", color="red", zorder=3)
plt.plot(x_vals, y_syn, zorder=2)
plt.errorbar(x_vals, y_syn, yerr=ydev_syn,
fmt="None", color="orange", zorder=1)
plt.xlabel("X")
plt.ylabel("Y")
plt.xscale("log", nonpositive='clip')
plt.yscale("log", nonpositive='clip')
plt.legend(["sasmodel", "synthetic data", "synthetic uncertainty"])
return plt, y_syn, ydev_syn
def norm_xdep_linterp_synth(x_mdl, y_mdl, x_xp, ydev_rel_xp):
"""Generate synthetic data based on a linear interpolation of the experimental
relationship between uncertainty and normalised Q.
Inputs: x_mdl - 1d np array of sasmodel Q data
y_mdl - 1d np array of sasmodel I data
x_xp - 1d np array of experimental Q data
ydev_xp - 1d np array of experimental uncertainty in I data
Outputs: plt - plot handle
y_syn - 1d array of synthesised intensity data
ydev_syn - 1d np array of synthesised uncertainty data
"""
x_mdl_norm, _ = normalise_data(x_mdl)
x_xp_norm, _ = normalise_data(x_xp)
y_syn = []
ydev_syn = []
x_syn = []
rng = default_rng()
for x_datum, y_datum in zip(x_mdl_norm, y_mdl):
ydev_temp = np.interp(x_datum, x_xp_norm, ydev_rel_xp) * y_datum
noise_temp = rng.normal(
loc=0.0,
scale=ydev_temp
)
ydev_syn.append(ydev_temp)
y_syn.append(y_datum + noise_temp)
x_syn.append(x_datum)
ydev_syn = np.array(ydev_syn)
y_syn = np.array(y_syn)
plt.plot(x_mdl, y_mdl, "--", color="red", zorder=3)
plt.plot(x_mdl, y_syn, zorder=2)
plt.errorbar(x_mdl, y_syn, yerr=ydev_syn,
fmt="None", color="orange", zorder=1)
plt.xlabel("X")
plt.ylabel("Y")
plt.xscale("log", nonpositive='clip')
plt.yscale("log", nonpositive='clip')
plt.legend(["sasmodel", "synthetic data", "synthetic uncertainty"])
return plt, y_syn, ydev_syn
def example_xray_synth():
"""Method to run for a graphical demonstration of the x-ray-style
synthesis approach.
"""
# find the folder and file containing the experimental x-ray data
pfolder = "experimental_xray_sas"
fnames = file_names(pfolder)
fname = fnames[0]
# read the x-ray experimental data (x, y and y-uncertainty)
x_xp, y_xp, ydev_xp = read_xray_data(pfolder, fname)
ydev_rel_xp = ydev_xp / y_xp
fig = errorbar_xy(
x_xp,
y_xp,
ydev_xp,
title="Example X-Ray SAS Synthesis Process - " +
"Experimental Data (" +
fname +
")")
fig.show()
# plot what are considered the dependent and independent variables
# for this particular case
plt.plot(y_xp, ydev_rel_xp, '+')
plt.xlabel("Y (Independent)")
plt.ylabel("Relative Y-Uncertainty (Dependent)")
plt.xscale("log", nonpositive='clip')
plt.show()
# normalise the dependent and independent variables then perform
# power law fit to characterise their relationship
ydev_rel_norm_xp, _ = normalise_data(ydev_rel_xp)
y_norm_xp, _ = normalise_data(y_xp)
fig, pwr_pars, _ = power_fit(y_norm_xp, ydev_rel_norm_xp)
fig.legend(["Experimental Uncertainty", "Powerlaw Fit Used for Sampling"])
fig.xscale("log", nonpositive='clip')
fig.xlabel("Intensity")
fig.ylabel("Relative Uncertainty")
fig.title("Example X-Ray SAS Synthesis Process -"
+ " Relative Uncertainty vs Intensity for Experimental Data")
fig.show()
# reading sasmodel data for example synthesis
modelf = "example_sasmodel"
mdlname = "1D_core_shell_cylinder_20_20_400_nosmearing.txt"
x_mdl, y_mdl = read_sasmodel_data(modelf, mdlname)
# plotting example synthesis case
fig, _, _ = norm_ydep_pwr_synth(x_mdl, y_mdl, pwr_pars)
fig.title(
"Example X-Ray SAS Synthesis Process - Synthetic Data from sasmodel (" +
mdlname +
")")
fig.show()
def example_neutron_synth():
"""Method to run for a graphical demonstration of the neutron-style
synthesis approach.
"""
# find the folder and file containing the experimental neutron data
pfolder = "experimental_neutron_sas"
fnames = file_names(pfolder)
fname = fnames[0]
# read the neutron experimental data (x, y and y-uncertainty)
x_xp, y_xp, ydev_xp = read_neutron_data(pfolder, fname)
# determine relative uncertainty
ydev_rel_xp = ydev_xp / y_xp
fig = errorbar_xy(
x_xp,
y_xp,
ydev_xp,
title="Example Neutron SAS Synthesis Process - " +
"Experimental Data (" +
fname +
")")
fig.show()
# For neutron data, a curve fit was not appropriate for either x or
# y dependent y-uncertainty so a linear interpolant of the x dependent
# case was used.
plt.plot(x_xp, ydev_rel_xp)
plt.xlabel("X (Independent)")
plt.ylabel("Relative Y_Uncertainty (Dependent)")
plt.title("Example Neutron SAS Synthesis Process - " +
"No Suitable Curve Fit")
plt.show()
# reading sasmodel data for example synthesis
modelf = "example_sasmodel"
mdlname = "1D_core_shell_cylinder_20_20_400_nosmearing.txt"
x_mdl, y_mdl = read_sasmodel_data(modelf, mdlname)
# plotting example synthesis case
fig, _, _ = norm_xdep_linterp_synth(x_mdl, y_mdl, x_xp, ydev_rel_xp)
fig.title(
"Example Neutron SAS Synthesis Process - Synthetic Data from sasmodel (" +
mdlname +
")")
fig.show()
def xray_synth(pfolder):
"""Create synthetic xray data for all sasmodel data files
in folder and then write them to the folder with supporting figures."""
# experimental dataset on which to base noise and uncertainty
xp_folder = "experimental_xray_sas"
xp_fname = "100 and 200 nm Polystyrene NPs in Water.dat"
# read the x-ray experimental data (y and y-uncertainty)
_, y_xp, ydev_xp = read_xray_data(xp_folder, xp_fname)
# normalising data
y_xp_norm, _ = normalise_data(y_xp)
ydev_rel_xp_norm, _ = normalise_data(ydev_xp/y_xp)
# characterising the relationship between normalised y and normalised
# relative uncertainty
fig, pwr_pars, _ = power_fit(y_xp_norm, ydev_rel_xp_norm)
fig.close()
# model data from which synthetic data will be generated
mdl_fnames = []
ext = "_xray_synth.txt"
for file in os.listdir(pfolder):
if file.endswith(".txt") and (not file.endswith("_synth.txt")):
if not file[:-4] + ext in os.listdir(pfolder):
mdl_fnames.append(file)
if mdl_fnames == []:
print("No outstanding sasmodel datasets for xray"
+ " synthesis found in directory.")
for mdl in mdl_fnames:
syn_fname = mdl[:-4] + "_" + "xray_synth"
fig_name = pfolder + "\\" + syn_fname + ".png"
x_mdl, y_mdl = read_sasmodel_data(pfolder, mdl)
fig, y_syn, ydev_syn = norm_ydep_pwr_synth(x_mdl, y_mdl, pwr_pars)
fig.title(fig_name)
fig_h = fig.gcf()
fig_h.set_size_inches(24, 13.5)
fig.savefig(fig_name, bbox_inches='tight')
fig.close()
# Writing to text file
syn_dat = np.column_stack((x_mdl, y_syn, ydev_syn))
np.savetxt(
pfolder +
"\\" +
syn_fname +
".txt",
syn_dat,
header='<X>\t<Y>\t<devY>',
fmt='%.5f %.5f %.5f',
delimiter='\t')
def neutron_synth(pfolder):
""" Create synthetic neutron data for all sasmodel data files
in folder and then write them to the folder with supporting figures."""
# experimental dataset on which to base noise and uncertainty
xp_folder = "experimental_neutron_sas"
xp_fname = "33837rear_1D_1.75_16.5_NXcanSAS.h5"
# read the neutron experimental data (x, y and y-uncertainty)
x_xp, y_xp, ydev_xp = read_neutron_data(xp_folder, xp_fname)
# determine relative uncertainty
ydev_rel_xp = ydev_xp / y_xp
# model data from which synthetic data will be generated
mdl_fnames = []
ext = "_neutron_synth.txt"
for file in os.listdir(pfolder):
if file.endswith(".txt") and (not file.endswith("_synth.txt")):
if not file[:-4] + ext in os.listdir(pfolder):
mdl_fnames.append(file)
if mdl_fnames == []:
print("No outstanding sasmodel datasets for neutron"
+ " synthesis found in directory.")
for mdl in mdl_fnames:
syn_fname = mdl[:-4] + "_neutron_synth"
fig_name = pfolder + "\\" + syn_fname + ".png"
x_mdl, y_mdl = read_sasmodel_data(pfolder, mdl)
fig, y_syn, ydev_syn = norm_xdep_linterp_synth(
x_mdl, y_mdl, x_xp, ydev_rel_xp)
fig.title(fig_name)
fig_h = fig.gcf()
fig_h.set_size_inches(24, 13.5)
fig.savefig(fig_name, bbox_inches='tight')
fig.close()
# Writing to text file
syn_dat = np.column_stack((x_mdl, y_syn, ydev_syn))
np.savetxt(
pfolder +
"\\" +
syn_fname +
".txt",
syn_dat,
header='<X>\t<Y>\t<devY>',
fmt='%.5f %.5f %.5f',
delimiter='\t')
def problem_def_txt(rfolder, wfolder):
"""Generate the problem files and META file to accompany the synthetic datasets for
use in fitbenchmarking."""
titstr = "# FitBenchmark Problem"
sftstr = "software = 'SASView'"
fncstr = ["function = 'name=FUNCTION_NAME,PAR1=0.0,PARn=0.0,...'" +
"background=0.0,scale=1.0,sld=4.0,sld_solvent=1.0'"]
mdl_fnames = []
neutronext = "_neutron_synth.txt"
xrayext = "_xray_synth.txt"
for file in os.listdir(rfolder):
if file.endswith(neutronext) or file.endswith(xrayext):
mdl_fnames.append(file)
prob_fnames = []
base_prob_fname = ["MODEL_NAME", "EXPERIMENT_TYPE", "_def.txt"]
prob_names = []
base_prob_name = ["name = '", "MODEL_NAME",
"(synthetic ", "EXPERIMENT_TYPE", ")'"]
descs = []
base_desc = [
"description = 'A first iteration synthetic dataset generated for the ",
"MODEL_NAME",
"SASView model in the fashion of ",
"EXPERIMENT_TYPE",
" small angle scattering experiments. Generated on ",
time.asctime(),
".'"]
input_files = []
base_input_files = ["input_file = '", "INPUT_FILENAME", "'"]
for fname in mdl_fnames:
digs = re.findall(r'\d+', fname)
mdl_name = fname[0:fname.find(digs[1])]
base_prob_fname[0] = mdl_name
mdl_name = mdl_name.replace("_", " ")
if fname.endswith(neutronext):
base_prob_name[1] = mdl_name
base_desc[1] = mdl_name
base_prob_fname[1] = "neutron"
base_prob_name[3] = "neutron"
base_desc[3] = "neutron"
prob_fnames.append("".join(base_prob_fname))
base_input_files[1] = fname
prob_names.append("".join(base_prob_name))
descs.append("".join(base_desc))
input_files.append("".join(base_input_files))
elif fname.endswith(xrayext):
base_prob_name[1] = mdl_name
base_desc[1] = mdl_name
base_prob_fname[1] = "x-ray"
base_prob_name[3] = "x-ray"
base_desc[3] = "x-ray"
prob_fnames.append("".join(base_prob_fname))
base_input_files[1] = fname
prob_names.append("".join(base_prob_name))
descs.append("".join(base_desc))
input_files.append("".join(base_input_files))
for fname, input_file, prob, desc in zip(
prob_fnames, input_files, prob_names, descs):
text_body = "\n".join([titstr, sftstr, prob, desc, input_file, fncstr[0]])
if fname not in os.listdir(wfolder):
with open(wfolder + "//" + fname, "w") as prob_def_file:
prob_def_file.write(text_body)
if "META.txt" not in os.listdir(wfolder):
with open(wfolder + "//" + "META.txt", "w") as meta_file:
meta_file.write(wfolder + "\n")
meta_file.write("\n")
meta_file.write("%s problems synthesised from the SASView models"
"on %s. See table below for details.\n"
% (len(prob_fnames), time.asctime()))
meta_file.write("\n")
col_width = 25
header = [
"SASmodel Name",
"Dimension",
"Data Format",
"Synthesis Style"]
meta_file.write("".join(item.ljust(col_width) for item in header))
meta_file.write("\n\n")
for fname in prob_fnames:
n_split = fname.split("_")
dtype = n_split[0]
if dtype == "1D":
dfmt = "<Q> <I> <Idev>"
syn_style = n_split[-2]
mname = " ".join(n_split[1:-2])
tab_line = [mname, dtype, dfmt, syn_style]
meta_file.write("".join(item.ljust(col_width)
for item in tab_line))
meta_file.write("\n")
def main():
""" Main function to run on sasmodel datasets."""
pfolder = args.folder
synthtype = args.synthtype
dfolder = pfolder + "\\data_files"
if synthtype == "all":
xray_synth(dfolder)
neutron_synth(dfolder)
elif synthtype == "xray":
xray_synth(dfolder)
elif synthtype == "neutron":
neutron_synth(dfolder)
problem_def_txt(dfolder, pfolder)
if __name__ == "__main__":
main() | PypiClean |
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/apex/apex/contrib/multihead_attn/self_multihead_attn.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
from apex.normalization.fused_layer_norm import FusedLayerNorm
if hasattr(torch._C, '_jit_set_profiling_executor') :
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode') :
torch._C._jit_set_profiling_mode(False)
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=False, include_norm_add=False, impl='fast', separate_qkv_params=False, mask_additive=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.include_norm_add = include_norm_add
self.impl = impl
self.scaling = self.head_dim**-0.5
self.separate_qkv_params = separate_qkv_params
self.mask_additive = mask_additive
if mask_additive:
assert self.include_norm_add == False, "additive mask not supported with layer norm"
assert impl == 'default' or (impl == 'fast' and bias), "additive mask not supported for fast mode without bias"
if separate_qkv_params:
self.q_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.v_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
else:
self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if self.bias:
if separate_qkv_params:
self.q_bias = Parameter(torch.Tensor(embed_dim))
self.k_bias = Parameter(torch.Tensor(embed_dim))
self.v_bias = Parameter(torch.Tensor(embed_dim))
else:
self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
else:
if separate_qkv_params:
self.register_parameter('q_bias', None)
self.register_parameter('k_bias', None)
self.register_parameter('v_bias', None)
self.q_bias = None
self.k_bias = None
self.v_bias = None
else:
self.register_parameter('in_proj_bias', None)
self.in_proj_bias = None
self.register_parameter('out_proj_bias', None)
self.out_proj_bias = None
if self.include_norm_add:
if impl == 'fast':
self.lyr_nrm_gamma_weights = Parameter(torch.Tensor(embed_dim))
self.lyr_nrm_beta_weights = Parameter(torch.Tensor(embed_dim))
self.lyr_nrm = None
else:
self.register_parameter('lyr_norm_gamma_weights', None)
self.register_parameter('lyr_norm_beta_weights', None)
self.lyr_nrm_gamma_weights = None
self.lyr_nrm_beta_weights = None
self.lyr_nrm = FusedLayerNorm(embed_dim)
self.reset_parameters()
if self.include_norm_add:
if impl == 'fast' : self.attn_func = fast_self_attn_norm_add_func
elif impl == 'default' : self.attn_func = self_attn_func
else : assert False, "Unsupported impl: {} !".format(impl)
else:
if impl == 'fast' : self.attn_func = fast_self_attn_func
elif impl == 'default' : self.attn_func = self_attn_func
else : assert False, "Unsupported impl: {} !".format(impl)
def reset_parameters(self):
if self.separate_qkv_params:
nn.init.xavier_uniform_(self.q_weight)
nn.init.xavier_uniform_(self.k_weight)
nn.init.xavier_uniform_(self.v_weight)
else:
# in_proj_weight has shape [3 * hidden, hidden] but it should be
# initialized like a [hidden, hidden] matrix.
# sqrt(6 / (hidden + hidden)) / sqrt(6 / (3 * hidden + hidden)) = sqrt(2)
# therefore xavier_uniform gain should be set to sqrt(2).
nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj_weight)
if self.bias:
if self.separate_qkv_params:
nn.init.constant_(self.q_bias, 0.)
nn.init.constant_(self.k_bias, 0.)
nn.init.constant_(self.v_bias, 0.)
else:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
if self.include_norm_add:
if self.impl == 'fast':
nn.init.ones_(self.lyr_nrm_gamma_weights)
nn.init.zeros_(self.lyr_nrm_beta_weights)
else:
self.lyr_nrm.reset_parameters()
def forward(self, query, key, value, key_padding_mask=None, need_weights=False, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
if self.separate_qkv_params:
input_weights = torch.cat([self.q_weight.view(self.num_heads,1,self.head_dim,self.embed_dim), self.k_weight.view(self.num_heads,1,self.head_dim,self.embed_dim), self.v_weight.view(self.num_heads,1,self.head_dim,self.embed_dim)], dim=1).reshape(3*self.embed_dim,self.embed_dim).contiguous()
else:
input_weights = self.in_proj_weight
if self.bias:
if self.separate_qkv_params:
input_bias = torch.cat([self.q_bias.view(self.num_heads,1,self.head_dim), self.k_bias.view(self.num_heads,1,self.head_dim), self.v_bias.view(self.num_heads,1,self.head_dim)],dim=1).reshape(3*self.embed_dim).contiguous()
else:
input_bias = self.in_proj_bias
else:
input_bias=None
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
elif attn_mask is not None:
assert self.mask_additive == False, "additive mask not supported for time mask"
mask = attn_mask
else:
mask = None
if self.include_norm_add:
if self.impl == 'fast':
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, query,
self.lyr_nrm_gamma_weights, self.lyr_nrm_beta_weights,
input_weights, self.out_proj_weight, mask, self.dropout)
else:
lyr_nrm_results = self.lyr_nrm(query)
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, self.scaling, lyr_nrm_results,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, self.dropout)
if is_training:
outputs = jit_dropout_add(outputs, query, self.dropout, is_training)
else:
outputs = outputs + query
else:
if self.impl == 'fast':
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, query,
input_weights, self.out_proj_weight, input_bias, self.out_proj_bias, mask, self.mask_additive, self.dropout)
else:
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, self.scaling, query,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, self.mask_additive, self.dropout)
return outputs,None | PypiClean |
/MetaGram-2.0.2.tar.gz/MetaGram-2.0.2/pyrogram/methods/utilities/run.py |
import asyncio
import inspect
import pyrogram
from pyrogram.methods.utilities.idle import idle
class Run:
def run(
self: "pyrogram.Client",
coroutine=None
):
"""Start the client, idle the main script and finally stop the client.
When calling this method without any argument it acts as a convenience method that calls
:meth:`~pyrogram.Client.start`, :meth:`~pyrogram.idle` and :meth:`~pyrogram.Client.stop` in sequence.
It makes running a single client less verbose.
In case a coroutine is passed as argument, runs the coroutine until it's completed and doesn't do any client
operation. This is almost the same as :py:obj:`asyncio.run` except for the fact that Pyrogram's ``run`` uses the
current event loop instead of a new one.
If you want to run multiple clients at once, see :meth:`pyrogram.compose`.
Parameters:
coroutine (``Coroutine``, *optional*):
Pass a coroutine to run it until it completes.
Raises:
ConnectionError: In case you try to run an already started client.
Example:
.. code-block:: python
from pyrogram import Client
app = Client("my_account")
... # Set handlers up
app.run()
.. code-block:: python
from pyrogram import Client
app = Client("my_account")
async def main():
async with app:
print(await app.get_me())
app.run(main())
"""
loop = asyncio.get_event_loop()
run = loop.run_until_complete
if coroutine is not None:
run(coroutine)
else:
if inspect.iscoroutinefunction(self.start):
run(self.start())
run(idle())
run(self.stop())
else:
self.start()
run(idle())
self.stop() | PypiClean |
/Flask-B3-0.0.7.tar.gz/Flask-B3-0.0.7/README.md | [](https://travis-ci.org/davidcarboni/Flask-B3)
# Flask B3
Implements B3 propagation for Python/Flask.
Does not implement communication with a Zipkin server.
## B3
B3 is used by [Zipkin](http://zipkin.io/) for building distributed trace trees.
It's the set of headers and values you need to use when doing distributed tracing.
Specifically, this implements: https://github.com/openzipkin/b3-propagation
## Purpose
The aim is to make it clean and simple to read and propagate B3 headers.
This code intentionally implements B3 only.
It does not send tracing information to a Zipkin server.
There are two use cases:
* You're interested in distributed log aggregation, but not interested in using Zipkin.
* You'd like a B3 implementation to base your own Zipkin instrumentation on.
## Motivation
I built this library to enable Python to "play nicely" in a distributed tracing environment
(specifically taking into account [Spring Cloud Sleuth](https://cloud.spring.io/spring-cloud-sleuth/)).
I want to be able to correlate logs across multiple services and
I don't need the full power of Zipkin at this stage.
This provides a relatively low-impact first-step on the distributed tracing journey.
Incoming B3 values are made available and B3 headers can be generated for onward requests.
## Usage
You'll get two things from this implementation:
* B3 values for the current span are made available via the `values()` function.
These can be included in [log lines sent to stdout](https://12factor.net/logs)
so that log handling can be externalised, keeping services small and focused.
* Sub-span headers can be created
for propagating trace IDs when making calls to downstream services.
Here are the three steps you'll need to use flask_b3.
### Collect B3 headers from an incoming request
This could be called from a Flask `before_request()` function,
optionally passing in, say, `request.headers`.
Alternatively, it can be directly passed to `before_request()`.
This will generate any needed identifiers
(e.g. a new `trace_id` for a root span):
start_span()
If you want the end of a span to be logged ("Server Send")
you can call the following (or pass it directly to `Flask.after_request)`:
end_span()
### Add headers to onward requests
If your service needs to call other services,
you'll need to add B3 headers to the outgoing request.
This is done by starting a new sub-span, optionally passing in headers to be updated.
Once this is done, you'll get subspan IDs returned from `values()`
(e.g. for logging) until you end the subspan.
This will set up the right B3 values for a sub-span in the trace
and return a dict containing the headers you'll need for your service call:
with SubSpan([headers]) as b3_headers:
... log.debug("Calling downstream service...")
... r = requests.get(<downstream service>, headers=b3_headers)
... log.debug("Downstream service responded...")
### Access B3 values
When you need to work with tracing information, for example to build log messages,
this gets you a dict with keys that match the B3 header names
(`X-B3-TraceId`, `X-B3-ParentSpanId`, `X-B3-SpanId`, `X-B3-Sampled` and `X-B3-Flags`) for the current span (or subspan if you've started one):
values()
## Other stuff?
Surely it's more complicated, needs configuration, or does this and that else?
No. That's all.
| PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/tailwindcss/lib/lib/content.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
function _export(target, all) {
for(var name in all)Object.defineProperty(target, name, {
enumerable: true,
get: all[name]
});
}
_export(exports, {
parseCandidateFiles: ()=>parseCandidateFiles,
resolvedChangedContent: ()=>resolvedChangedContent
});
const _fs = /*#__PURE__*/ _interopRequireDefault(require("fs"));
const _path = /*#__PURE__*/ _interopRequireDefault(require("path"));
const _isGlob = /*#__PURE__*/ _interopRequireDefault(require("is-glob"));
const _fastGlob = /*#__PURE__*/ _interopRequireDefault(require("fast-glob"));
const _normalizePath = /*#__PURE__*/ _interopRequireDefault(require("normalize-path"));
const _parseGlob = require("../util/parseGlob");
const _sharedState = require("./sharedState");
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : {
default: obj
};
}
function parseCandidateFiles(context, tailwindConfig) {
let files = tailwindConfig.content.files;
// Normalize the file globs
files = files.filter((filePath)=>typeof filePath === "string");
files = files.map(_normalizePath.default);
// Split into included and excluded globs
let tasks = _fastGlob.default.generateTasks(files);
/** @type {ContentPath[]} */ let included = [];
/** @type {ContentPath[]} */ let excluded = [];
for (const task of tasks){
included.push(...task.positive.map((filePath)=>parseFilePath(filePath, false)));
excluded.push(...task.negative.map((filePath)=>parseFilePath(filePath, true)));
}
let paths = [
...included,
...excluded
];
// Resolve paths relative to the config file or cwd
paths = resolveRelativePaths(context, paths);
// Resolve symlinks if possible
paths = paths.flatMap(resolvePathSymlinks);
// Update cached patterns
paths = paths.map(resolveGlobPattern);
return paths;
}
/**
*
* @param {string} filePath
* @param {boolean} ignore
* @returns {ContentPath}
*/ function parseFilePath(filePath, ignore) {
let contentPath = {
original: filePath,
base: filePath,
ignore,
pattern: filePath,
glob: null
};
if ((0, _isGlob.default)(filePath)) {
Object.assign(contentPath, (0, _parseGlob.parseGlob)(filePath));
}
return contentPath;
}
/**
*
* @param {ContentPath} contentPath
* @returns {ContentPath}
*/ function resolveGlobPattern(contentPath) {
contentPath.pattern = contentPath.glob ? `${contentPath.base}/${contentPath.glob}` : contentPath.base;
contentPath.pattern = contentPath.ignore ? `!${contentPath.pattern}` : contentPath.pattern;
// This is required for Windows support to properly pick up Glob paths.
// Afaik, this technically shouldn't be needed but there's probably
// some internal, direct path matching with a normalized path in
// a package which can't handle mixed directory separators
contentPath.pattern = (0, _normalizePath.default)(contentPath.pattern);
return contentPath;
}
/**
* Resolve each path relative to the config file (when possible) if the experimental flag is enabled
* Otherwise, resolve relative to the current working directory
*
* @param {any} context
* @param {ContentPath[]} contentPaths
* @returns {ContentPath[]}
*/ function resolveRelativePaths(context, contentPaths) {
let resolveFrom = [];
// Resolve base paths relative to the config file (when possible) if the experimental flag is enabled
if (context.userConfigPath && context.tailwindConfig.content.relative) {
resolveFrom = [
_path.default.dirname(context.userConfigPath)
];
}
return contentPaths.map((contentPath)=>{
contentPath.base = _path.default.resolve(...resolveFrom, contentPath.base);
return contentPath;
});
}
/**
* Resolve the symlink for the base directory / file in each path
* These are added as additional dependencies to watch for changes because
* some tools (like webpack) will only watch the actual file or directory
* but not the symlink itself even in projects that use monorepos.
*
* @param {ContentPath} contentPath
* @returns {ContentPath[]}
*/ function resolvePathSymlinks(contentPath) {
let paths = [
contentPath
];
try {
let resolvedPath = _fs.default.realpathSync(contentPath.base);
if (resolvedPath !== contentPath.base) {
paths.push({
...contentPath,
base: resolvedPath
});
}
} catch {
// TODO: log this?
}
return paths;
}
function resolvedChangedContent(context, candidateFiles, fileModifiedMap) {
let changedContent = context.tailwindConfig.content.files.filter((item)=>typeof item.raw === "string").map(({ raw , extension ="html" })=>({
content: raw,
extension
}));
for (let changedFile of resolveChangedFiles(candidateFiles, fileModifiedMap)){
let content = _fs.default.readFileSync(changedFile, "utf8");
let extension = _path.default.extname(changedFile).slice(1);
changedContent.push({
content,
extension
});
}
return changedContent;
}
/**
*
* @param {ContentPath[]} candidateFiles
* @param {Map<string, number>} fileModifiedMap
* @returns {Set<string>}
*/ function resolveChangedFiles(candidateFiles, fileModifiedMap) {
let paths = candidateFiles.map((contentPath)=>contentPath.pattern);
let changedFiles = new Set();
_sharedState.env.DEBUG && console.time("Finding changed files");
let files = _fastGlob.default.sync(paths, {
absolute: true
});
for (let file of files){
let prevModified = fileModifiedMap.has(file) ? fileModifiedMap.get(file) : -Infinity;
let modified = _fs.default.statSync(file).mtimeMs;
if (modified > prevModified) {
changedFiles.add(file);
fileModifiedMap.set(file, modified);
}
}
_sharedState.env.DEBUG && console.timeEnd("Finding changed files");
return changedFiles;
} | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/vi.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['vi']={"editor":"Bộ soạn thảo văn bản có định dạng","editorPanel":"Bảng điều khiển Rich Text Editor","common":{"editorHelp":"Nhấn ALT + 0 để được giúp đỡ","browseServer":"Duyệt máy chủ","url":"URL","protocol":"Giao thức","upload":"Tải lên","uploadSubmit":"Tải lên máy chủ","image":"Hình ảnh","flash":"Flash","form":"Biểu mẫu","checkbox":"Nút kiểm","radio":"Nút chọn","textField":"Trường văn bản","textarea":"Vùng văn bản","hiddenField":"Trường ẩn","button":"Nút","select":"Ô chọn","imageButton":"Nút hình ảnh","notSet":"<không thiết lập>","id":"Định danh","name":"Tên","langDir":"Hướng ngôn ngữ","langDirLtr":"Trái sang phải (LTR)","langDirRtl":"Phải sang trái (RTL)","langCode":"Mã ngôn ngữ","longDescr":"Mô tả URL","cssClass":"Lớp Stylesheet","advisoryTitle":"Nhan đề hướng dẫn","cssStyle":"Kiểu ","ok":"Đồng ý","cancel":"Bỏ qua","close":"Đóng","preview":"Xem trước","resize":"Kéo rê để thay đổi kích cỡ","generalTab":"Tab chung","advancedTab":"Tab mở rộng","validateNumberFailed":"Giá trị này không phải là số.","confirmNewPage":"Mọi thay đổi không được lưu lại, nội dung này sẽ bị mất. Bạn có chắc chắn muốn tải một trang mới?","confirmCancel":"Một vài tùy chọn đã bị thay đổi. Bạn có chắc chắn muốn đóng hộp thoại?","options":"Tùy chọn","target":"Đích đến","targetNew":"Cửa sổ mới (_blank)","targetTop":"Cửa sổ trên cùng (_top)","targetSelf":"Tại trang (_self)","targetParent":"Cửa sổ cha (_parent)","langDirLTR":"Trái sang phải (LTR)","langDirRTL":"Phải sang trái (RTL)","styles":"Kiểu","cssClasses":"Lớp CSS","width":"Chiều rộng","height":"Chiều cao","align":"Vị trí","left":"Trái","right":"Phải","center":"Giữa","justify":"Sắp chữ","alignLeft":"Canh trái","alignRight":"Canh phải","alignCenter":"Canh giữa","alignTop":"Trên","alignMiddle":"Giữa","alignBottom":"Dưới","alignNone":"Không","invalidValue":"Giá trị không hợp lệ.","invalidHeight":"Chiều cao phải là số nguyên.","invalidWidth":"Chiều rộng phải là số nguyên.","invalidLength":"Giá trị cho trường \"%1\" phải là một số dương có hoặc không có đơn vị đo lường hợp lệ (%2)","invalidCssLength":"Giá trị quy định cho trường \"%1\" phải là một số dương có hoặc không có một đơn vị đo CSS hợp lệ (px, %, in, cm, mm, em, ex, pt, hoặc pc).","invalidHtmlLength":"Giá trị quy định cho trường \"%1\" phải là một số dương có hoặc không có một đơn vị đo HTML hợp lệ (px hoặc %).","invalidInlineStyle":"Giá trị quy định cho kiểu nội tuyến phải bao gồm một hoặc nhiều dữ liệu với định dạng \"tên:giá trị\", cách nhau bằng dấu chấm phẩy.","cssLengthTooltip":"Nhập một giá trị theo pixel hoặc một số với một đơn vị CSS hợp lệ (px, %, in, cm, mm, em, ex, pt, hoặc pc).","unavailable":"%1<span class=\"cke_accessibility\">, không có</span>","keyboard":{"8":"Phím Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Cách","35":"End","36":"Home","46":"Xóa","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Phím tắt","optionDefault":"Mặc định"},"about":{"copy":"Bản quyền © $1. Giữ toàn quyền.","dlgTitle":"Thông tin về CKEditor 4","moreInfo":"Vui lòng ghé thăm trang web của chúng tôi để có thông tin về giấy phép:"},"basicstyles":{"bold":"Đậm","italic":"Nghiêng","strike":"Gạch xuyên ngang","subscript":"Chỉ số dưới","superscript":"Chỉ số trên","underline":"Gạch chân"},"bidi":{"ltr":"Văn bản hướng từ trái sang phải","rtl":"Văn bản hướng từ phải sang trái"},"blockquote":{"toolbar":"Khối trích dẫn"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Thu gọn thanh công cụ","toolbarExpand":"Mở rộng thnah công cụ","toolbarGroups":{"document":"Tài liệu","clipboard":"Clipboard/Undo","editing":"Chỉnh sửa","forms":"Bảng biểu","basicstyles":"Kiểu cơ bản","paragraph":"Đoạn","links":"Liên kết","insert":"Chèn","styles":"Kiểu","colors":"Màu sắc","tools":"Công cụ"},"toolbars":"Thanh công cụ"},"clipboard":{"copy":"Sao chép","copyError":"Các thiết lập bảo mật của trình duyệt không cho phép trình biên tập tự động thực thi lệnh sao chép. Hãy sử dụng bàn phím cho lệnh này (Ctrl/Cmd+C).","cut":"Cắt","cutError":"Các thiết lập bảo mật của trình duyệt không cho phép trình biên tập tự động thực thi lệnh cắt. Hãy sử dụng bàn phím cho lệnh này (Ctrl/Cmd+X).","paste":"Dán","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Khu vực dán","pasteMsg":"Paste your content inside the area below and press OK."},"colorbutton":{"auto":"Tự động","bgColorTitle":"Màu nền","colors":{"000":"Đen","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Nâu","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Xám","F00":"Đỏ","FF8C00":"Dark Orange","FFD700":"Vàng","008000":"Xanh lá cây","0FF":"Cyan","00F":"Xanh da trời","EE82EE":"Tím","A9A9A9":"Xám tối","FFA07A":"Light Salmon","FFA500":"Màu cam","FFFF00":"Vàng","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"Trắng","1ABC9C":"Xanh lơ đậm","2ECC71":"Xanh lục bảo","3498DB":"Xanh dương sáng","9B59B6":"Tím thạch anh","4E5F70":"Xanh dương xám","F1C40F":"Vàng rực","16A085":"Xanh lơ đạm","27AE60":"Xanh lục bảo đậm","2980B9":"Xanh biển đậm","8E44AD":"Tím đậm","2C3E50":"Xanh dương nhạt","F39C12":"Cam","E67E22":"Cà rốt","E74C3C":"Đỏ tái","ECF0F1":"Bạc sáng","95A5A6":"Xanh lơ xám nhạt","DDD":"Xám nhạt","D35400":"Bí ngô","C0392B":"Đỏ rực","BDC3C7":"Bạc","7F8C8D":"Xanh lơ xám","999":"Xám đen"},"more":"Màu khác...","panelTitle":"Màu sắc","textColorTitle":"Màu chữ"},"colordialog":{"clear":"Xóa bỏ","highlight":"Màu chọn","options":"Tùy chọn màu","selected":"Màu đã chọn","title":"Chọn màu"},"templates":{"button":"Mẫu dựng sẵn","emptyListMsg":"(Không có mẫu dựng sẵn nào được định nghĩa)","insertOption":"Thay thế nội dung hiện tại","options":"Tùy chọn mẫu dựng sẵn","selectPromptMsg":"Hãy chọn mẫu dựng sẵn để mở trong trình biên tập<br>(nội dung hiện tại sẽ bị mất):","title":"Nội dung Mẫu dựng sẵn"},"contextmenu":{"options":"Tùy chọn menu bổ xung"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Định danh (id)","advisoryTitleInputLabel":"Nhan đề hướng dẫn","cssClassInputLabel":"Các lớp CSS","edit":"Chỉnh sửa","inlineStyleInputLabel":"Kiểu nội dòng","langDirLTRLabel":"Trái sang phải (LTR)","langDirLabel":"Hướng ngôn ngữ","langDirRTLLabel":"Phải qua trái (RTL)","languageCodeInputLabel":"Mã ngôn ngữ","remove":"Xóa bỏ","styleSelectLabel":"Kiểu (style)","title":"Tạo khối các thành phần","toolbar":"Tạo khối các thành phần"},"elementspath":{"eleLabel":"Nhãn thành phần","eleTitle":"%1 thành phần"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"find":{"find":"Tìm kiếm","findOptions":"Tìm tùy chọn","findWhat":"Tìm chuỗi:","matchCase":"Phân biệt chữ hoa/thường","matchCyclic":"Giống một phần","matchWord":"Giống toàn bộ từ","notFoundMsg":"Không tìm thấy chuỗi cần tìm.","replace":"Thay thế","replaceAll":"Thay thế tất cả","replaceSuccessMsg":"%1 vị trí đã được thay thế.","replaceWith":"Thay bằng:","title":"Tìm kiếm và thay thế"},"fakeobjects":{"anchor":"Điểm neo","flash":"Flash","hiddenfield":"Trường ẩn","iframe":"IFrame","unknown":"Đối tượng không rõ ràng"},"flash":{"access":"Truy cập mã","accessAlways":"Luôn luôn","accessNever":"Không bao giờ","accessSameDomain":"Cùng tên miền","alignAbsBottom":"Dưới tuyệt đối","alignAbsMiddle":"Giữa tuyệt đối","alignBaseline":"Đường cơ sở","alignTextTop":"Phía trên chữ","bgcolor":"Màu nền","chkFull":"Cho phép toàn màn hình","chkLoop":"Lặp","chkMenu":"Cho phép bật menu của Flash","chkPlay":"Tự động chạy","flashvars":"Các biến số dành cho Flash","hSpace":"Khoảng đệm ngang","properties":"Thuộc tính Flash","propertiesTab":"Thuộc tính","quality":"Chất lượng","qualityAutoHigh":"Cao tự động","qualityAutoLow":"Thấp tự động","qualityBest":"Tốt nhất","qualityHigh":"Cao","qualityLow":"Thấp","qualityMedium":"Trung bình","scale":"Tỷ lệ","scaleAll":"Hiển thị tất cả","scaleFit":"Vừa vặn","scaleNoBorder":"Không đường viền","title":"Thuộc tính Flash","vSpace":"Khoảng đệm dọc","validateHSpace":"Khoảng đệm ngang phải là số nguyên.","validateSrc":"Hãy đưa vào đường dẫn liên kết","validateVSpace":"Khoảng đệm dọc phải là số nguyên.","windowMode":"Chế độ cửa sổ","windowModeOpaque":"Mờ đục","windowModeTransparent":"Trong suốt","windowModeWindow":"Cửa sổ"},"font":{"fontSize":{"label":"Cỡ chữ","voiceLabel":"Kích cỡ phông","panelTitle":"Cỡ chữ"},"label":"Phông","panelTitle":"Phông","voiceLabel":"Phông"},"forms":{"button":{"title":"Thuộc tính của nút","text":"Chuỗi hiển thị (giá trị)","type":"Kiểu","typeBtn":"Nút bấm","typeSbm":"Nút gửi","typeRst":"Nút nhập lại"},"checkboxAndRadio":{"checkboxTitle":"Thuộc tính nút kiểm","radioTitle":"Thuộc tính nút chọn","value":"Giá trị","selected":"Được chọn","required":"Bắt buộc"},"form":{"title":"Thuộc tính biểu mẫu","menu":"Thuộc tính biểu mẫu","action":"Hành động","method":"Phương thức","encoding":"Bảng mã"},"hidden":{"title":"Thuộc tính trường ẩn","name":"Tên","value":"Giá trị"},"select":{"title":"Thuộc tính ô chọn","selectInfo":"Thông tin","opAvail":"Các tùy chọn có thể sử dụng","value":"Giá trị","size":"Kích cỡ","lines":"dòng","chkMulti":"Cho phép chọn nhiều","required":"Bắt buộc","opText":"Văn bản","opValue":"Giá trị","btnAdd":"Thêm","btnModify":"Thay đổi","btnUp":"Lên","btnDown":"Xuống","btnSetValue":"Giá trị được chọn","btnDelete":"Nút xoá"},"textarea":{"title":"Thuộc tính vùng văn bản","cols":"Số cột","rows":"Số hàng"},"textfield":{"title":"Thuộc tính trường văn bản","name":"Tên","value":"Giá trị","charWidth":"Độ rộng của ký tự","maxChars":"Số ký tự tối đa","required":"Bắt buộc","type":"Kiểu","typeText":"Ký tự","typePass":"Mật khẩu","typeEmail":"Email","typeSearch":"Tìm kiếm","typeTel":"Số điện thoại","typeUrl":"URL"}},"format":{"label":"Định dạng","panelTitle":"Định dạng","tag_address":"Address","tag_div":"Bình thường (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Bình thường (P)","tag_pre":"Đã thiết lập"},"horizontalrule":{"toolbar":"Chèn đường phân cách ngang"},"iframe":{"border":"Hiển thị viền khung","noUrl":"Vui lòng nhập địa chỉ iframe","scrolling":"Kích hoạt thanh cuộn","title":"Thuộc tính iframe","toolbar":"Iframe"},"image":{"alt":"Chú thích ảnh","border":"Đường viền","btnUpload":"Tải lên máy chủ","button2Img":"Bạn có muốn chuyển nút bấm bằng ảnh được chọn thành ảnh?","hSpace":"Khoảng đệm ngang","img2Button":"Bạn có muốn chuyển đổi ảnh được chọn thành nút bấm bằng ảnh?","infoTab":"Thông tin của ảnh","linkTab":"Tab liên kết","lockRatio":"Giữ nguyên tỷ lệ","menu":"Thuộc tính của ảnh","resetSize":"Kích thước gốc","title":"Thuộc tính của ảnh","titleButton":"Thuộc tính nút của ảnh","upload":"Tải lên","urlMissing":"Thiếu đường dẫn hình ảnh","vSpace":"Khoảng đệm dọc","validateBorder":"Chiều rộng của đường viền phải là một số nguyên dương","validateHSpace":"Khoảng đệm ngang phải là một số nguyên dương","validateVSpace":"Khoảng đệm dọc phải là một số nguyên dương"},"indent":{"indent":"Dịch vào trong","outdent":"Dịch ra ngoài"},"smiley":{"options":"Tùy chọn hình biểu lộ cảm xúc","title":"Chèn hình biểu lộ cảm xúc (mặt cười)","toolbar":"Hình biểu lộ cảm xúc (mặt cười)"},"language":{"button":"Thiết lập ngôn ngữ","remove":"Loại bỏ ngôn ngữ"},"link":{"acccessKey":"Phím hỗ trợ truy cập","advanced":"Mở rộng","advisoryContentType":"Nội dung hướng dẫn","advisoryTitle":"Nhan đề hướng dẫn","anchor":{"toolbar":"Chèn/Sửa điểm neo","menu":"Thuộc tính điểm neo","title":"Thuộc tính điểm neo","name":"Tên của điểm neo","errorName":"Hãy nhập vào tên của điểm neo","remove":"Xóa neo"},"anchorId":"Theo định danh thành phần","anchorName":"Theo tên điểm neo","charset":"Bảng mã của tài nguyên được liên kết đến","cssClasses":"Lớp Stylesheet","download":"Force Download","displayText":"Display Text","emailAddress":"Thư điện tử","emailBody":"Nội dung thông điệp","emailSubject":"Tiêu đề thông điệp","id":"Định danh","info":"Thông tin liên kết","langCode":"Mã ngôn ngữ","langDir":"Hướng ngôn ngữ","langDirLTR":"Trái sang phải (LTR)","langDirRTL":"Phải sang trái (RTL)","menu":"Sửa liên kết","name":"Tên","noAnchors":"(Không có điểm neo nào trong tài liệu)","noEmail":"Hãy đưa vào địa chỉ thư điện tử","noUrl":"Hãy đưa vào đường dẫn liên kết (URL)","noTel":"Please type the phone number","other":"<khác>","phoneNumber":"Phone number","popupDependent":"Phụ thuộc (Netscape)","popupFeatures":"Đặc điểm của cửa sổ Popup","popupFullScreen":"Toàn màn hình (IE)","popupLeft":"Vị trí bên trái","popupLocationBar":"Thanh vị trí","popupMenuBar":"Thanh Menu","popupResizable":"Có thể thay đổi kích cỡ","popupScrollBars":"Thanh cuộn","popupStatusBar":"Thanh trạng thái","popupToolbar":"Thanh công cụ","popupTop":"Vị trí phía trên","rel":"Quan hệ","selectAnchor":"Chọn một điểm neo","styles":"Kiểu (style)","tabIndex":"Chỉ số của Tab","target":"Đích","targetFrame":"<khung>","targetFrameName":"Tên khung đích","targetPopup":"<cửa sổ popup>","targetPopupName":"Tên cửa sổ Popup","title":"Liên kết","toAnchor":"Neo trong trang này","toEmail":"Thư điện tử","toUrl":"URL","toPhone":"Phone","toolbar":"Chèn/Sửa liên kết","type":"Kiểu liên kết","unlink":"Xoá liên kết","upload":"Tải lên"},"list":{"bulletedlist":"Chèn/Xoá Danh sách không thứ tự","numberedlist":"Chèn/Xoá Danh sách có thứ tự"},"liststyle":{"bulletedTitle":"Thuộc tính danh sách không thứ tự","circle":"Khuyên tròn","decimal":"Kiểu số (1, 2, 3 ...)","disc":"Hình đĩa","lowerAlpha":"Kiểu abc thường (a, b, c, d, e...)","lowerRoman":"Số La Mã kiểu thường (i, ii, iii, iv, v...)","none":"Không gì cả","notset":"<không thiết lập>","numberedTitle":"Thuộc tính danh sách có thứ tự","square":"Hình vuông","start":"Bắt đầu","type":"Kiểu loại","upperAlpha":"Kiểu ABC HOA (A, B, C, D, E...)","upperRoman":"Số La Mã kiểu HOA (I, II, III, IV, V...)","validateStartNumber":"Số bắt đầu danh sách phải là một số nguyên."},"magicline":{"title":"Chèn đoạn vào đây"},"maximize":{"maximize":"Phóng to tối đa","minimize":"Thu nhỏ"},"newpage":{"toolbar":"Trang mới"},"pagebreak":{"alt":"Ngắt trang","toolbar":"Chèn ngắt trang"},"pastetext":{"button":"Dán theo định dạng văn bản thuần","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Dán theo định dạng văn bản thuần"},"pastefromword":{"confirmCleanup":"Văn bản bạn muốn dán có kèm định dạng của Word. Bạn có muốn loại bỏ định dạng Word trước khi dán?","error":"Không thể để làm sạch các dữ liệu dán do một lỗi nội bộ","title":"Dán với định dạng Word","toolbar":"Dán với định dạng Word"},"preview":{"preview":"Xem trước"},"print":{"toolbar":"In"},"removeformat":{"toolbar":"Xoá định dạng"},"save":{"toolbar":"Lưu"},"selectall":{"toolbar":"Chọn tất cả"},"showblocks":{"toolbar":"Hiển thị các khối"},"sourcearea":{"toolbar":"Mã HTML"},"specialchar":{"options":"Tùy chọn các ký tự đặc biệt","title":"Hãy chọn ký tự đặc biệt","toolbar":"Chèn ký tự đặc biệt"},"scayt":{"btn_about":"Thông tin về SCAYT","btn_dictionaries":"Từ điển","btn_disable":"Tắt SCAYT","btn_enable":"Bật SCAYT","btn_langs":"Ngôn ngữ","btn_options":"Tùy chọn","text_title":"Kiểm tra chính tả ngay khi gõ chữ (SCAYT)"},"stylescombo":{"label":"Kiểu","panelTitle":"Phong cách định dạng","panelTitle1":"Kiểu khối","panelTitle2":"Kiểu trực tiếp","panelTitle3":"Kiểu đối tượng"},"table":{"border":"Kích thước đường viền","caption":"Đầu đề","cell":{"menu":"Ô","insertBefore":"Chèn ô Phía trước","insertAfter":"Chèn ô Phía sau","deleteCell":"Xoá ô","merge":"Kết hợp ô","mergeRight":"Kết hợp sang phải","mergeDown":"Kết hợp xuống dưới","splitHorizontal":"Phân tách ô theo chiều ngang","splitVertical":"Phân tách ô theo chiều dọc","title":"Thuộc tính của ô","cellType":"Kiểu của ô","rowSpan":"Kết hợp hàng","colSpan":"Kết hợp cột","wordWrap":"Chữ liền hàng","hAlign":"Canh lề ngang","vAlign":"Canh lề dọc","alignBaseline":"Đường cơ sở","bgColor":"Màu nền","borderColor":"Màu viền","data":"Dữ liệu","header":"Đầu đề","yes":"Có","no":"Không","invalidWidth":"Chiều rộng của ô phải là một số nguyên.","invalidHeight":"Chiều cao của ô phải là một số nguyên.","invalidRowSpan":"Số hàng kết hợp phải là một số nguyên.","invalidColSpan":"Số cột kết hợp phải là một số nguyên.","chooseColor":"Chọn màu"},"cellPad":"Khoảng đệm giữ ô và nội dung","cellSpace":"Khoảng cách giữa các ô","column":{"menu":"Cột","insertBefore":"Chèn cột phía trước","insertAfter":"Chèn cột phía sau","deleteColumn":"Xoá cột"},"columns":"Số cột","deleteTable":"Xóa bảng","headers":"Đầu đề","headersBoth":"Cả hai","headersColumn":"Cột đầu tiên","headersNone":"Không có","headersRow":"Hàng đầu tiên","heightUnit":"height unit","invalidBorder":"Kích cỡ của đường biên phải là một số nguyên.","invalidCellPadding":"Khoảng đệm giữa ô và nội dung phải là một số nguyên.","invalidCellSpacing":"Khoảng cách giữa các ô phải là một số nguyên.","invalidCols":"Số lượng cột phải là một số lớn hơn 0.","invalidHeight":"Chiều cao của bảng phải là một số nguyên.","invalidRows":"Số lượng hàng phải là một số lớn hơn 0.","invalidWidth":"Chiều rộng của bảng phải là một số nguyên.","menu":"Thuộc tính bảng","row":{"menu":"Hàng","insertBefore":"Chèn hàng phía trước","insertAfter":"Chèn hàng phía sau","deleteRow":"Xoá hàng"},"rows":"Số hàng","summary":"Tóm lược","title":"Thuộc tính bảng","toolbar":"Bảng","widthPc":"Phần trăm (%)","widthPx":"Điểm ảnh (px)","widthUnit":"Đơn vị"},"undo":{"redo":"Làm lại thao tác","undo":"Khôi phục thao tác"},"widget":{"move":"Nhấp chuột và kéo để di chuyển","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the user.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."},"wsc":{"btnIgnore":"Bỏ qua","btnIgnoreAll":"Bỏ qua tất cả","btnReplace":"Thay thế","btnReplaceAll":"Thay thế tất cả","btnUndo":"Phục hồi lại","changeTo":"Chuyển thành","errorLoading":"Lỗi khi đang nạp dịch vụ ứng dụng: %s.","ieSpellDownload":"Chức năng kiểm tra chính tả chưa được cài đặt. Bạn có muốn tải về ngay bây giờ?","manyChanges":"Hoàn tất kiểm tra chính tả: %1 từ đã được thay đổi","noChanges":"Hoàn tất kiểm tra chính tả: Không có từ nào được thay đổi","noMispell":"Hoàn tất kiểm tra chính tả: Không có lỗi chính tả","noSuggestions":"- Không đưa ra gợi ý về từ -","notAvailable":"Xin lỗi, dịch vụ này hiện tại không có.","notInDic":"Không có trong từ điển","oneChange":"Hoàn tất kiểm tra chính tả: Một từ đã được thay đổi","progress":"Đang tiến hành kiểm tra chính tả...","title":"Kiểm tra chính tả","toolbar":"Kiểm tra chính tả"}}; | PypiClean |
/dipex-4.54.5.tar.gz/dipex-4.54.5/integrations/SD_Lon/sdlon/main.py | import asyncio
import enum
from typing import Iterable
from contextlib import contextmanager
from functools import partial
from fastapi import FastAPI
from prometheus_fastapi_instrumentator import Instrumentator
from prometheus_client import Enum
from prometheus_client import Gauge
from integrations.rundb.db_overview import DBOverview
from .config import get_changed_at_settings
from .sd_changed_at import changed_at
class State(enum.Enum):
RUNNING = "running"
OK = "ok"
FAILURE = "failure"
UNKNOWN = "unknown"
def get_state() -> State:
try:
settings = get_changed_at_settings()
run_db = settings.sd_import_run_db
db_overview = DBOverview(run_db)
status_line = db_overview._read_last_line("status")
if "Running since" in status_line:
return State.RUNNING
if "Update finished" in status_line:
return State.OK
return State.UNKNOWN
except:
return State.FAILURE
state = Enum(
"sd_changed_at_state",
"Current state of the SD Changed At integration",
states=[s.value for s in State],
)
state.state(get_state().value)
start_time = Gauge(
"sd_changed_at_start_time", "Start time of the latest SD Changed At run"
)
end_time = Gauge("sd_changed_at_end_time", "End time of the latest SD Changed At run")
app = FastAPI()
Instrumentator().instrument(app).expose(app)
@contextmanager
def update_state_metric() -> Iterable[None]:
"""Update SDChangedAt state metrics."""
start_time.set_to_current_time()
# TODO: write a test of this contextmanager
# TODO: refactor the contextmanager to use the get_state function above
settings = get_changed_at_settings()
run_db = settings.sd_import_run_db
db_overview = DBOverview(run_db)
try:
status = db_overview._read_last_line("status")
state.state("running")
yield
if "Running since" in status:
state.state("failure")
elif "Update finished" in status:
state.state("ok")
else:
state.state("unknown")
except:
state.state("failure")
raise
finally:
end_time.set_to_current_time()
@app.get("/")
async def index() -> dict[str, str]:
return {"name": "sdlon"}
@app.post("/trigger")
@app.get("/trigger", deprecated=True)
async def trigger(force: bool = False) -> dict[str, str]:
loop = asyncio.get_running_loop()
loop.call_soon(partial(update_state_metric()(changed_at), init=False, force=force))
return {"triggered": "OK"} | PypiClean |
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/gui/kivy/uix/gridview.py | from kivy.uix.boxlayout import BoxLayout
from kivy.adapters.dictadapter import DictAdapter
from kivy.adapters.listadapter import ListAdapter
from kivy.properties import ObjectProperty, ListProperty, AliasProperty
from kivy.uix.listview import (ListItemButton, ListItemLabel, CompositeListItem,
ListView)
from kivy.lang import Builder
from kivy.metrics import dp, sp
Builder.load_string('''
<GridView>
header_view: header_view
content_view: content_view
BoxLayout:
orientation: 'vertical'
padding: '0dp', '2dp'
BoxLayout:
id: header_box
orientation: 'vertical'
size_hint: 1, None
height: '30dp'
ListView:
id: header_view
BoxLayout:
id: content_box
orientation: 'vertical'
ListView:
id: content_view
<-HorizVertGrid>
header_view: header_view
content_view: content_view
ScrollView:
id: scrl
do_scroll_y: False
RelativeLayout:
size_hint_x: None
width: max(scrl.width, dp(sum(root.widths)))
BoxLayout:
orientation: 'vertical'
padding: '0dp', '2dp'
BoxLayout:
id: header_box
orientation: 'vertical'
size_hint: 1, None
height: '30dp'
ListView:
id: header_view
BoxLayout:
id: content_box
orientation: 'vertical'
ListView:
id: content_view
''')
class GridView(BoxLayout):
"""Workaround solution for grid view by using 2 list view.
Sometimes the height of lines is shown properly."""
def _get_hd_adpt(self):
return self.ids.header_view.adapter
header_adapter = AliasProperty(_get_hd_adpt, None)
'''
'''
def _get_cnt_adpt(self):
return self.ids.content_view.adapter
content_adapter = AliasProperty(_get_cnt_adpt, None)
'''
'''
headers = ListProperty([])
'''
'''
widths = ListProperty([])
'''
'''
data = ListProperty([])
'''
'''
getter = ObjectProperty(lambda item, i: item[i])
'''
'''
on_context_menu = ObjectProperty(None)
def __init__(self, **kwargs):
self._from_widths = False
super(GridView, self).__init__(**kwargs)
#self.on_headers(self, self.headers)
def on_widths(self, instance, value):
if not self.get_root_window():
return
self._from_widths = True
self.on_headers(instance, self.headers)
self._from_widths = False
def on_headers(self, instance, value):
if not self._from_widths:
return
if not (value and self.canvas and self.headers):
return
widths = self.widths
if len(self.widths) != len(value):
return
#if widths is not None:
# widths = ['%sdp' % i for i in widths]
def generic_args_converter(row_index,
item,
is_header=True,
getter=self.getter):
cls_dicts = []
_widths = self.widths
getter = self.getter
on_context_menu = self.on_context_menu
for i, header in enumerate(self.headers):
kwargs = {
'padding': ('2dp','2dp'),
'halign': 'center',
'valign': 'middle',
'size_hint_y': None,
'shorten': True,
'height': '30dp',
'text_size': (_widths[i], dp(30)),
'text': getter(item, i),
}
kwargs['font_size'] = '9sp'
if is_header:
kwargs['deselected_color'] = kwargs['selected_color'] =\
[0, 1, 1, 1]
else: # this is content
kwargs['deselected_color'] = 1, 1, 1, 1
if on_context_menu is not None:
kwargs['on_press'] = on_context_menu
if widths is not None: # set width manually
kwargs['size_hint_x'] = None
kwargs['width'] = widths[i]
cls_dicts.append({
'cls': ListItemButton,
'kwargs': kwargs,
})
return {
'id': item[-1],
'size_hint_y': None,
'height': '30dp',
'cls_dicts': cls_dicts,
}
def header_args_converter(row_index, item):
return generic_args_converter(row_index, item)
def content_args_converter(row_index, item):
return generic_args_converter(row_index, item, is_header=False)
self.ids.header_view.adapter = ListAdapter(data=[self.headers],
args_converter=header_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.ids.content_view.adapter = ListAdapter(data=self.data,
args_converter=content_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.content_adapter.bind_triggers_to_view(self.ids.content_view._trigger_reset_populate)
class HorizVertGrid(GridView):
pass
if __name__ == "__main__":
from kivy.app import App
class MainApp(App):
def build(self):
data = []
for i in range(90):
data.append((str(i), str(i)))
self.data = data
return Builder.load_string('''
BoxLayout:
orientation: 'vertical'
HorizVertGrid:
on_parent: if args[1]: self.content_adapter.data = app.data
headers:['Address', 'Previous output']
widths: [400, 500]
<Label>
font_size: '16sp'
''')
MainApp().run() | PypiClean |
/KeralaPyApiV2-2.0.2020.tar.gz/KeralaPyApiV2-2.0.2020/pyrogram/client/methods/chats/kick_chat_member.py |
from typing import Union
import pyrogram
from pyrogram.api import functions, types
from ...ext import BaseClient
class KickChatMember(BaseClient):
async def kick_chat_member(
self,
chat_id: Union[int, str],
user_id: Union[int, str],
until_date: int = 0
) -> Union["pyrogram.Message", bool]:
"""Kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group on their own using
invite links, etc., unless unbanned first. You must be an administrator in the chat for this to work and must
have the appropriate admin rights.
Note:
In regular groups (non-supergroups), this method will only work if the "All Members Are Admins" setting is
off in the target group. Otherwise members may only be removed by the group's creator or by the member
that added them.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target user.
For a contact that exists in your Telegram address book you can use his phone number (str).
until_date (``int``, *optional*):
Date when the user will be unbanned, unix time.
If user is banned for more than 366 days or less than 30 seconds from the current time they are
considered to be banned forever. Defaults to 0 (ban forever).
Returns:
:obj:`Message` | ``bool``: On success, a service message will be returned (when applicable), otherwise, in
case a message object couldn't be returned, True is returned.
Example:
.. code-block:: python
from time import time
# Ban chat member forever
app.kick_chat_member(chat_id, user_id)
# Kick chat member and automatically unban after 24h
app.kick_chat_member(chat_id, user_id, int(time.time() + 86400))
"""
chat_peer = await self.resolve_peer(chat_id)
user_peer = await self.resolve_peer(user_id)
if isinstance(chat_peer, types.InputPeerChannel):
r = await self.send(
functions.channels.EditBanned(
channel=chat_peer,
user_id=user_peer,
banned_rights=types.ChatBannedRights(
until_date=until_date,
view_messages=True,
send_messages=True,
send_media=True,
send_stickers=True,
send_gifs=True,
send_games=True,
send_inline=True,
embed_links=True
)
)
)
else:
r = await self.send(
functions.messages.DeleteChatUser(
chat_id=abs(chat_id),
user_id=user_peer
)
)
for i in r.updates:
if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):
return await pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
else:
return True | PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/_RST/_TOP/_elFinder/Tag_Cloud_Driver.py |
from __future__ import absolute_import, division, print_function, unicode_literals
from _GTW import GTW
from _MOM import MOM
from _TFL import TFL
from _GTW._RST._TOP._elFinder import elFinder
from _MOM.import_MOM import Q
from _TFL import sos as os
from _TFL._Meta.Once_Property import Once_Property
from _TFL.pyk import pyk
import _GTW._RST._TOP.Page
import _GTW._RST._TOP._elFinder._Filesystem_Driver_
import datetime
import mimetypes
import time
import PIL.Image
class Tag_Cloud_Driver (elFinder._Filesystem_Driver_) :
"""Creates on directory for each tag."""
def __init__ (self, root_dir, ETM, name, ** kw) :
self.thumb_directory = kw.pop ("thumb_directory", None)
self.__super.__init__ (name, ** kw)
self._ETM = ETM
self.root_dir = root_dir
# end def __init__
def abs_path (self, obj) :
return obj.abs_path
# end def abs_path
def add (self, path_spec, upload) :
import werkzeug ### XXX
path, dir, file = path_spec
if file :
raise elFinder.Error ("errTrgFolderNotFound")
abs_file_name = os.path.join \
( self.root_dir, dir.name
, werkzeug.secure_filename (upload.filename)
)
rel_file_name = abs_file_name.replace (MOM.Web_Src_Root, "")
upload.save (abs_file_name)
i = PIL.Image.open (abs_file_name)
w, h = i.size
file = self.ETM (path = rel_file_name, width = w, height = h)
self._scope.MOM.Id_Entity_has_Tag (file, ddir)
return self.file_entry (dir, file)
# end def add
def copy (self, src_path_spec, dst_volume, dst_path_spec, remove = False) :
spath, sdir, sfile = src_path_spec
dpath, ddir, dfile = dst_path_spec
if not sfile :
raise _Error_ ("errNotFile")
if not ddir :
raise _Error_ ("errNotFolder")
if dfile :
raise _Error_ ("errCmdParams")
dfile = dst_volume._copy_from (ddir, self, sdir, sfile)
result = dict (added = [dst_volume.file_entry (ddir, dfile)])
if remove :
result ["removed"] = [self.file_entry (sdir, sfile) ["hash"]]
self.remove (src_path_spec)
return result
# end def copy
def _copy_from (self, ddir, svolume, sdir, sfile, buf_size = 16 * 1024) :
if svolume is not self :
shandle = svolume._open_file (sdir, sfile)
dhandle = self ._create_file (ddir.name, sfile.name)
while True :
buffer = shandle.read (buf_size)
if buffer :
dhandle.write (buf_size)
else :
break
shandle.close ()
dhandle.close ()
return self.add_file (ddir, sfile.name)
self._scope.MOM.Id_Entity_has_Tag (sfile, ddir)
return sfile
# end def _copy_from
def _create_file (self, dir, file) :
if isinstance (file, pyk.string_types) :
file_name = os.path.join (self.root_dir, dir, file)
return open (file_name, "wb")
# end def _create_file
def current_directory_options (self, path) :
path, dir, file = path
disabled = ["mkfile", "extract", "archive", "duplicate"]
if dir :
disabled.append ("mkdir")
else :
disabled.append ("upload")
return dict (disabled = disabled)
# end def current_directory_options
def decode_hashes (self, hashes) :
if len (hashes) > 2 :
raise _Error_ ("errCmdParams")
result = []
dir = None
file = None
if hashes :
dir_name = hashes.pop (0)
dir = self.TETM.pid_query (dir_name)
if not dir :
raise _Error_ ("errFolderNotFound", dir_name)
result.append (dir.name)
if hashes :
file_name = hashes.pop (0)
file = self.ETM.pid_query (file_name)
if not file :
raise _Error_ ("errFileNotFound", file_name)
result.append (file.name)
return "/".join (result), dir, file
# end def decode_hashes
def directories (self, dir) :
if dir : ### tag of tag no supported
return []
return self.TETM.query ()
# end def directories
def directory_entry (self, tag) :
return dict \
( mime = "directory"
, ts = time.mktime (self.started.timetuple ())
, read = self.allow_read
, write = self.allow_write
, size = 0
, hash = "%s_%s" % (self.hash, tag.pid)
, phash = self.hash
, name = tag.name
, date = self.started.strftime ("%c")
, dirs = 0
)
# end def directory_entry
def dirs_of_path (self, path, dir) :
return (None, dir)
# end def dirs_of_path
@Once_Property
def ETM (self) :
return self._scope [self._ETM]
# end def ETM
def files_in_directory (self, tag) :
if tag :
return self.ETM.query (Q.tags.CONTAINS (tag))
return ()
# end def files_in_directory
def file_entry (self, tag, obj) :
path = self.abs_path (obj)
mime_type = self.mime_type (obj)
stat = os.stat (path)
result = dict \
( mime = mime_type
, ts = stat.st_mtime
, hash = "%s_%s_%s" % (self.hash, tag.pid, obj.pid)
, phash = "%s_%s" % (self.hash, tag.pid)
, read = self.allow_read
, write = self.allow_write
, size = stat.st_size
, name = obj.name
, date = datetime.datetime.fromtimestamp
(stat.st_mtime).strftime ("%c")
, dim = "%dx%d" % (obj.width, obj.height)
)
if self.media_domain :
result ["url"] = "%s/%s" % (self.media_domain, obj.path)
if self.thumb_directory :
result ["tmb"] = "%s/%s" % (self.media_domain, obj.thumb_path)
return result
# end def file_entry
def file_name (self, dir, file) :
return file.name
# end def file_name
@property
def has_directories (self) :
return self.TETM.count > 0
# end def has_directories
def image_dimensions (self, path, dir, file) :
return "%dx%d" % (file.width, file.height)
# end def image_dimensions
def mime_type (self, obj) :
result = obj.mime_type
if result is None :
result = obj.mime_type = mimetypes.guess_type (obj.abs_path) [0]
return result
# end def mime_type
def mkdir (self, dir, name) :
### a directory for in the tag cloud is ceating a new tag
dir_name = os.path.join (self.root_dir, name)
if not os.path.isdir (dir_name) :
os.makedirs (dir_name)
tag = self.TETM (name)
return self.directory_entry (tag)
# end def mkdir
def _open_file (self, dir, file) :
return open (file.abs_path, "rb")
# end def _open_file
def remove (self, path_spec) :
path, dir, file = path_spec
EhT = self._scope.MOM.Id_Entity_has_Tag
if file :
entry = self.file_entry (dir, file)
EhT.query (right = dir, left = file).one ().destroy ()
if not file.tags :
file.destroy ()
else :
if not EhT.query (Q.tag == dir).count () :
entry = self.directory_entry (dir)
dir.destroy ()
else :
raise _Error_ ("errUsupportType")
return entry
# end def remove
def rename (self, new_name, dir, file) :
if file :
### rename the file
file_name = self.abs_path (file)
root_dir = file_name [:-len (file.path)]
new_name = os.path.join (os.path.dirname (file_name), new_name)
os.rename (file_name, new_name)
file.set_raw (path = new_name [len (root_dir):])
return self.file_entry (dir, file)
else :
### rename the tag
dir.set_raw (name = new_name)
return self.directory_entry (dir)
# end def rename
@Once_Property
def TETM (self) :
return self._scope.MOM.Tag
# end def TETM
# end class Tag_Cloud_Driver
if __name__ != "__main__" :
GTW.RST.TOP.elFinder._Export ("*")
### __END__ GTW.RST.TOP.elFinder.Tag_Cloud_Driver | PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/doc/quickstart/find_data.rst | .. _findingdata:
**********
Input data
**********
Overview
========
Data discovery and retrieval is the first step in any evaluation process;
ESMValTool uses a `semi-automated` data finding mechanism with inputs from both
the user configuration file and the recipe file: this means that the user will
have to provide the tool with a set of parameters related to the data needed
and once these parameters have been provided, the tool will automatically find
the right data. We will detail below the data finding and retrieval process and
the input the user needs to specify, giving examples on how to use the data
finding routine under different scenarios.
Data types
==========
.. _CMOR-DRS:
CMIP data
---------
CMIP data is widely available via the Earth System Grid Federation
(`ESGF <https://esgf.llnl.gov/>`_) and is accessible to users either
via automatic download by ``esmvaltool`` or through the ESGF data nodes hosted
by large computing facilities (like CEDA-Jasmin, DKRZ, etc). This data
adheres to, among other standards, the DRS and Controlled Vocabulary
standard for naming files and structured paths; the `DRS
<https://www.ecmwf.int/sites/default/files/elibrary/2014/13713-data-reference-syntax-governing-standards-within-climate-research-data-archived-esgf.pdf>`_
ensures that files and paths to them are named according to a
standardized convention. Examples of this convention, also used by
ESMValTool for file discovery and data retrieval, include:
* CMIP6 file: ``{variable_short_name}_{mip}_{dataset_name}_{experiment}_{ensemble}_{grid}_{start-date}-{end-date}.nc``
* CMIP5 file: ``{variable_short_name}_{mip}_{dataset_name}_{experiment}_{ensemble}_{start-date}-{end-date}.nc``
* OBS file: ``{project}_{dataset_name}_{type}_{version}_{mip}_{short_name}_{start-date}-{end-date}.nc``
Similar standards exist for the standard paths (input directories); for the
ESGF data nodes, these paths differ slightly, for example:
* CMIP6 path for BADC: ``ROOT-BADC/{institute}/{dataset_name}/{experiment}/{ensemble}/{mip}/
{variable_short_name}/{grid}``;
* CMIP6 path for ETHZ: ``ROOT-ETHZ/{experiment}/{mip}/{variable_short_name}/{dataset_name}/{ensemble}/{grid}``
From the ESMValTool user perspective the number of data input parameters is
optimized to allow for ease of use. We detail this procedure in the next
section.
Observational data
------------------
Part of observational data is retrieved in the same manner as CMIP data, for example
using the ``OBS`` root path set to:
.. code-block:: yaml
OBS: /gws/nopw/j04/esmeval/obsdata-v2
and the dataset:
.. code-block:: yaml
- {dataset: ERA-Interim, project: OBS6, type: reanaly, version: 1, start_year: 2014, end_year: 2015, tier: 3}
in ``recipe.yml`` in ``datasets`` or ``additional_datasets``, the rules set in
CMOR-DRS_ are used again and the file will be automatically found:
.. code-block::
/gws/nopw/j04/esmeval/obsdata-v2/Tier3/ERA-Interim/OBS_ERA-Interim_reanaly_1_Amon_ta_201401-201412.nc
Since observational data are organized in Tiers depending on their level of
public availability, the ``default`` directory must be structured accordingly
with sub-directories ``TierX`` (``Tier1``, ``Tier2`` or ``Tier3``), even when
``drs: default``.
.. _read_native_datasets:
Datasets in native format
-------------------------
Some datasets are supported in their native format (i.e., the data is not
formatted according to a CMIP data request) through the ``native6`` project
(mostly native reanalysis/observational datasets) or through a dedicated
project, e.g., ``ICON`` (mostly native models).
A detailed description of how to include new native datasets is given
:ref:`here <add_new_fix_native_datasets>`.
.. hint::
When using native datasets, it might be helpful to specify a custom location
for the :ref:`custom_cmor_tables`.
This allows reading arbitrary variables from native datasets.
Note that this requires the option ``cmor_strict: false`` in the
:ref:`project configuration <configure_native_models>` used for the native
model output.
.. _read_native_obs:
Supported native reanalysis/observational datasets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following native reanalysis/observational datasets are supported under the
``native6`` project.
To use these datasets, put the files containing the data in the directory that
you have configured for the ``native6`` project in your :ref:`user
configuration file`, in a subdirectory called
``Tier{tier}/{dataset}/{version}/{frequency}/{short_name}``.
Replace the items in curly braces by the values used in the variable/dataset
definition in the :ref:`recipe <recipe_overview>`.
Below is a list of native reanalysis/observational datasets currently
supported.
.. _read_native_era5:
ERA5
^^^^
- Supported variables: ``cl``, ``clt``, ``evspsbl``, ``evspsblpot``, ``mrro``, ``pr``, ``prsn``, ``ps``, ``psl``, ``ptype``, ``rls``, ``rlds``, ``rsds``, ``rsdt``, ``rss``, ``uas``, ``vas``, ``tas``, ``tasmax``, ``tasmin``, ``tdps``, ``ts``, ``tsn`` (``E1hr``/``Amon``), ``orog`` (``fx``)
- Tier: 3
.. _read_native_mswep:
MSWEP
^^^^^
- Supported variables: ``pr``
- Supported frequencies: ``mon``, ``day``, ``3hr``.
- Tier: 3
For example for monthly data, place the files in the ``/Tier3/MSWEP/version/mon/pr`` subdirectory of your ``native6`` project location.
.. note::
For monthly data (``V220``), the data must be postfixed with the date, i.e. rename ``global_monthly_050deg.nc`` to ``global_monthly_050deg_197901-201710.nc``
For more info: http://www.gloh2o.org/
Data for the version ``V220`` can be downloaded from: https://hydrology.princeton.edu/data/hylkeb/MSWEP_V220/.
.. _read_native_models:
Supported native models
~~~~~~~~~~~~~~~~~~~~~~~
The following models are natively supported by ESMValCore.
In contrast to the native observational datasets listed above, they use
dedicated projects instead of the project ``native6``.
.. _read_cesm:
CESM
^^^^
ESMValTool is able to read native `CESM <https://www.cesm.ucar.edu/>`__ model
output.
.. warning::
The support for native CESM output is still experimental.
Currently, only one variable (`tas`) is fully supported. Other 2D variables
might be supported by specifying appropriate facets in the recipe or extra
facets files (see text below).
3D variables (data that uses a vertical dimension) are not supported, yet.
The default naming conventions for input directories and files for CESM are
* input directories: 3 different types supported:
* ``/`` (run directory)
* ``{case}/{gcomp}/hist`` (short-term archiving)
* ``{case}/{gcomp}/proc/{tdir}/{tperiod}`` (post-processed data)
* input files: ``{case}.{scomp}.{type}.{string}*nc``
as configured in the :ref:`config-developer file <config-developer>` (using the
default DRS ``drs: default`` in the :ref:`user configuration file`).
More information about CESM naming conventions are given `here
<https://www.cesm.ucar.edu/models/cesm2/naming_conventions.html>`__.
.. note::
The ``{string}`` entry in the input file names above does not only
correspond to the (optional) ``$string`` entry for `CESM model output files
<https://www.cesm.ucar.edu/models/cesm2/naming_conventions.html#modelOutputFilenames>`__,
but can also be used to read `post-processed files
<https://www.cesm.ucar.edu/models/cesm2/naming_conventions.html#ppDataFilenames>`__.
In the latter case, ``{string}`` corresponds to the combination
``$SSTRING.$TSTRING``.
Thus, example dataset entries could look like this:
.. code-block:: yaml
datasets:
- {project: CESM, dataset: CESM2, case: f.e21.FHIST_BGC.f09_f09_mg17.CMIP6-AMIP.001, type: h0, mip: Amon, short_name: tas, start_year: 2000, end_year: 2014}
- {project: CESM, dataset: CESM2, case: f.e21.F1850_BGC.f09_f09_mg17.CFMIP-hadsst-piForcing.001, type: h0, gcomp: atm, scomp: cam, mip: Amon, short_name: tas, start_year: 2000, end_year: 2014}
Variable-specific defaults for the facet ``gcomp`` and ``scomp`` are given in
the extra facets (see next paragraph) for some variables, but this can be
overwritten in the recipe.
Similar to any other fix, the CESM fix allows the use of :ref:`extra
facets<extra_facets>`.
By default, the file :download:`cesm-mappings.yml
</../esmvalcore/config/extra_facets/cesm-mappings.yml>` is used for that
purpose.
Currently, this file only contains default facets for a single variable
(`tas`); for other variables, these entries need to be defined in the recipe.
Supported keys for extra facets are:
==================== ====================================== =================================
Key Description Default value if not specified
==================== ====================================== =================================
``gcomp`` Generic component-model name No default (needs to be specified
in extra facets or recipe if
default DRS is used)
``raw_name`` Variable name of the variable in the CMOR variable name of the
raw input file corresponding variable
``raw_units`` Units of the variable in the raw If specified, the value given by
input file the ``units`` attribute in the
raw input file; otherwise
``unknown``
``scomp`` Specific component-model name No default (needs to be specified
in extra facets or recipe if
default DRS is used)
``string`` Short string which is used to further ``''`` (empty string)
identify the history file type
(corresponds to ``$string`` or
``$SSTRING.$TSTRING`` in the CESM file
name conventions; see note above)
``tdir`` Entry to distinguish time averages ``''`` (empty string)
from time series from diagnostic plot
sets (only used for post-processed
data)
``tperiod`` Time period over which the data was ``''`` (empty string)
processed (only used for
post-processed data)
==================== ====================================== =================================
.. _read_emac:
EMAC
^^^^
ESMValTool is able to read native `EMAC
<https://www.dlr.de/pa/en/desktopdefault.aspx/tabid-8859/15306_read-37415/>`_
model output.
The default naming conventions for input directories and files for EMAC are
* input directories: ``{exp}/{channel}``
* input files: ``{exp}*{channel}{postproc_flag}.nc``
as configured in the :ref:`config-developer file <config-developer>` (using the
default DRS ``drs: default`` in the :ref:`user configuration file`).
Thus, example dataset entries could look like this:
.. code-block:: yaml
datasets:
- {project: EMAC, dataset: EMAC, exp: historical, mip: Amon, short_name: tas, start_year: 2000, end_year: 2014}
- {project: EMAC, dataset: EMAC, exp: historical, mip: Omon, short_name: tos, postproc_flag: "-p-mm", start_year: 2000, end_year: 2014}
- {project: EMAC, dataset: EMAC, exp: historical, mip: Amon, short_name: ta, raw_name: tm1_p39_cav, start_year: 2000, end_year: 2014}
Please note the duplication of the name ``EMAC`` in ``project`` and
``dataset``, which is necessary to comply with ESMValTool's data finding and
CMORizing functionalities.
A variable-specific default for the facet ``channel`` is given in the extra
facets (see next paragraph) for many variables, but this can be overwritten in
the recipe.
Similar to any other fix, the EMAC fix allows the use of :ref:`extra
facets<extra_facets>`.
By default, the file :download:`emac-mappings.yml
</../esmvalcore/config/extra_facets/emac-mappings.yml>` is used for that
purpose.
For some variables, extra facets are necessary; otherwise ESMValTool cannot
read them properly.
Supported keys for extra facets are:
==================== ====================================== =================================
Key Description Default value if not specified
==================== ====================================== =================================
``channel`` Channel in which the desired variable No default (needs to be specified
is stored in extra facets or recipe if
default DRS is used)
``postproc_flag`` Postprocessing flag of the data ``''`` (empty string)
``raw_name`` Variable name of the variable in the CMOR variable name of the
raw input file corresponding variable
``raw_units`` Units of the variable in the raw If specified, the value given by
input file the ``units`` attribute in the
raw input file; otherwise
``unknown``
==================== ====================================== =================================
.. note::
``raw_name`` can be given as ``str`` or ``list``.
The latter is used to support multiple different variables names in the
input file.
In this case, the prioritization is given by the order of the list; if
possible, use the first entry, if this is not present, use the second, etc.
This is particularly useful for files in which regular averages (``*_ave``)
or conditional averages (``*_cav``) exist.
For 3D variables defined on pressure levels, only the pressure levels
defined by the CMOR table (e.g., for `Amon`'s `ta`: ``tm1_p19_cav`` and
``tm1_p19_ave``) are given in the default extra facets file.
If other pressure levels are desired, e.g., ``tm1_p39_cav``, this has to be
explicitly specified in the recipe using ``raw_name: tm1_p39_cav`` or
``raw_name: [tm1_p19_cav, tm1_p39_cav]``.
.. _read_icon:
ICON
^^^^
ESMValTool is able to read native `ICON
<https://code.mpimet.mpg.de/projects/iconpublic>`_ model output.
The default naming conventions for input directories and files for ICON are
* input directories: ``{exp}`` or ``{exp}/outdata``
* input files: ``{exp}_{var_type}*.nc``
as configured in the :ref:`config-developer file <config-developer>` (using the
default DRS ``drs: default`` in the :ref:`user configuration file`).
Thus, example dataset entries could look like this:
.. code-block:: yaml
datasets:
- {project: ICON, dataset: ICON, exp: icon-2.6.1_atm_amip_R2B5_r1i1p1f1,
mip: Amon, short_name: tas, start_year: 2000, end_year: 2014}
- {project: ICON, dataset: ICON, exp: historical, mip: Amon,
short_name: ta, var_type: atm_dyn_3d_ml, start_year: 2000,
end_year: 2014}
Please note the duplication of the name ``ICON`` in ``project`` and
``dataset``, which is necessary to comply with ESMValTool's data finding and
CMORizing functionalities.
A variable-specific default for the facet ``var_type`` is given in the extra
facets (see below) for many variables, but this can be overwritten in the
recipe.
This is necessary if your ICON output is structured in one variable per file.
For example, if your output is stored in files called
``<exp>_<variable_name>_atm_2d_ml_YYYYMMDDThhmmss.nc``, use ``var_type:
<variable_name>_atm_2d_ml`` in the recipe for this variable.
Usually, ICON reports aggregated values at the end of the corresponding time
output intervals.
For example, for monthly output, ICON reports the month February as "1 March".
Thus, by default, ESMValCore shifts all time points back by 1/2 of the output
time interval so that the new time point corresponds to the center of the
interval.
This can be disabled by using ``shift_time: false`` in the recipe or the extra
facets (see below).
For point measurements (identified by ``cell_methods = "time: point"``), this
is always disabled.
.. warning::
To get all desired time points, do **not** use ``start_year`` and
``end_year`` in the recipe, but rather ``timerange`` with at least 8 digits.
For example, to get data for the years 2000 and 2001, use ``timerange:
20000101/20020101`` instead of ``timerange: 2000/2001`` or ``start_year:
2000``, ``end_year: 2001``.
See :ref:`timerange_examples` for more information on the ``timerange``
option.
Usually, ESMValCore will need the corresponding ICON grid file of your
simulation to work properly (examples: setting latitude/longitude coordinates
if these are not yet present, UGRIDization [see below], etc.).
This grid file can either be specified as absolute or relative (to
``auxiliary_data_dir`` as defined in the :ref:`user configuration file`) path
with the facet ``horizontal_grid`` in the recipe or the extra facets (see
below), or retrieved automatically from the `grid_file_uri` attribute of the
input files.
In the latter case, the file is downloaded once and then cached.
The cached file is valid for 7 days.
ESMValCore can automatically make native ICON data `UGRID
<https://ugrid-conventions.github.io/ugrid-conventions/>`__-compliant when
loading the data.
The UGRID conventions provide a standardized format to store data on
unstructured grids, which is required by many software packages or tools to
work correctly.
An example is the horizontal regridding of native ICON data to a regular grid.
While the built-in :ref:`unstructured_nearest scheme <built-in regridding
schemes>` can handle unstructured grids not in UGRID format, using more complex
regridding algorithms (for example provided by the
:doc:`iris-esmf-regrid:index` package through :ref:`generic regridding
schemes`) requires the input data in UGRID format.
The following code snippet provides a preprocessor that regrids native ICON
data to a 1°x1° grid using `ESMF's first-order conservative regridding
algorithm <https://earthsystemmodeling.org/regrid/#regridding-methods>`__:
.. code-block:: yaml
preprocessors:
regrid_icon:
regrid:
target_grid: 1x1
scheme:
reference: esmf_regrid.schemes:ESMFAreaWeighted
This automatic UGRIDization is enabled by default, but can be switched off with
the facet ``ugrid: false`` in the recipe or the extra facets (see below).
This is useful for diagnostics that do not support input data in UGRID format
(yet) like the :ref:`Psyplot diagnostic <esmvaltool:recipes_psyplot_diag>` or
if you want to use the built-in :ref:`unstructured_nearest scheme <built-in
regridding schemes>` regridding scheme.
For 3D ICON variables, ESMValCore tries to add the pressure level information
(from the variables `pfull` and `phalf`) and/or altitude information (from the
variables `zg` and `zghalf`) to the preprocessed output files.
If neither of these variables are available in the input files, it is possible
to specify the location of files that include the corresponding `zg` or
`zghalf` variables with the facets ``zg_file`` and/or ``zghalf_file`` in the
recipe or the extra facets.
The paths to these files can be specified absolute or relative (to
``auxiliary_data_dir`` as defined in the :ref:`user configuration file`).
.. hint::
To use the :func:`~esmvalcore.preprocessor.extract_levels` preprocessor on
native ICON data, you need to specify the name of the vertical coordinate
(e.g., ``coordinate: air_pressure``) since native ICON output usually
provides a 3D air pressure field instead of a simple 1D vertical coordinate.
This also works if your files only contain altitude information (in this
case, the US standard atmosphere is used to convert between altitude and
pressure levels; see :ref:`Vertical interpolation` for details).
Example:
.. code-block:: yaml
preprocessors:
extract_500hPa_level_from_icon:
extract_levels:
levels: 50000
scheme: linear
coordinate: air_pressure
Similar to any other fix, the ICON fix allows the use of :ref:`extra
facets<extra_facets>`.
By default, the file :download:`icon-mappings.yml
</../esmvalcore/config/extra_facets/icon-mappings.yml>` is used for that
purpose.
For some variables, extra facets are necessary; otherwise ESMValTool cannot
read them properly.
Supported keys for extra facets are:
=================== ================================ ===================================
Key Description Default value if not specified
=================== ================================ ===================================
``horizontal_grid`` Absolute or relative (to If not given, use file attribute
``auxiliary_data_dir`` defined ``grid_file_uri`` to retrieve ICON
in the grid file
:ref:`user configuration file`)
path to the ICON grid file
``latitude`` Standard name of the latitude ``latitude``
coordinate in the raw input
file
``longitude`` Standard name of the ``longitude``
longitude coordinate in the
raw input file
``raw_name`` Variable name of the CMOR variable name of the
variable in the raw input corresponding variable
file
``raw_units`` Units of the variable in the If specified, the value given by
raw input file the ``units`` attribute in the
raw input file; otherwise
``unknown``
``shift_time`` Shift time points back by 1/2 of ``True``
the corresponding output time
interval
``ugrid`` Automatic UGRIDization of ``True``
the input data
``var_type`` Variable type of the No default (needs to be specified
variable in the raw input in extra facets or recipe if
file default DRS is used)
``zg_file`` Absolute or relative (to If possible, use `zg` variable
``auxiliary_data_dir`` defined provided by the raw input file
in the
:ref:`user configuration file`)
path to the input file that
contains `zg`
``zghalf_file`` Absolute or relative (to If possible, use `zghalf` variable
``auxiliary_data_dir`` defined provided by the raw input file
in the
:ref:`user configuration file`)
path to the input file that
contains `zghalf`
=================== ================================ ===================================
.. hint::
In order to read cell area files (``areacella`` and ``areacello``), one
additional manual step is necessary:
Copy the ICON grid file (you can find a download link in the global
attribute ``grid_file_uri`` of your ICON data) to your ICON input directory
and change its name in such a way that only the grid file is found when the
cell area variables are required.
Make sure that this file is not found when other variables are loaded.
For example, you could use a new ``var_type``, e.g., ``horizontalgrid`` for
this file.
Thus, an ICON grid file located in
``2.6.1_atm_amip_R2B5_r1i1p1f1/2.6.1_atm_amip_R2B5_r1i1p1f1_horizontalgrid.nc``
can be found using ``var_type: horizontalgrid`` in the recipe (assuming the
default naming conventions listed above).
Make sure that no other variable uses this ``var_type``.
If you want to use the :func:`~esmvalcore.preprocessor.area_statistics`
preprocessor on *regridded* ICON data, make sure to **not** use the cell area
files by using the ``skip: true`` syntax in the recipe as described in
:ref:`preprocessors_using_supplementary_variables`, e.g.,
.. code-block:: yaml
datasets:
- {project: ICON, dataset: ICON, exp: amip,
supplementary_variables: [{short_name: areacella, skip: true}]}
.. _read_ipsl-cm6:
IPSL-CM6
^^^^^^^^
Both output formats (i.e. the ``Output`` and the ``Analyse / Time series``
formats) are supported, and should be configured in recipes as e.g.:
.. code-block:: yaml
datasets:
- {simulation: CM61-LR-hist-03.1950, exp: piControl, out: Analyse, freq: TS_MO,
account: p86caub, status: PROD, dataset: IPSL-CM6, project: IPSLCM,
root: /thredds/tgcc/store}
- {simulation: CM61-LR-hist-03.1950, exp: historical, out: Output, freq: MO,
account: p86caub, status: PROD, dataset: IPSL-CM6, project: IPSLCM,
root: /thredds/tgcc/store}
.. _ipslcm_extra_facets_example:
The ``Output`` format is an example of a case where variables are grouped in
multi-variable files, which name cannot be computed directly from datasets
attributes alone but requires to use an extra_facets file, which principles are
explained in :ref:`extra_facets`, and which content is :download:`available here
</../esmvalcore/config/extra_facets/ipslcm-mappings.yml>`. These multi-variable
files must also undergo some data selection.
.. _data-retrieval:
Data retrieval
==============
Data retrieval in ESMValTool has two main aspects from the user's point of
view:
* data can be found by the tool, subject to availability on disk or `ESGF <https://esgf.llnl.gov/>`_;
* it is the user's responsibility to set the correct data retrieval parameters;
The first point is self-explanatory: if the user runs the tool on a machine
that has access to a data repository or multiple data repositories, then
ESMValTool will look for and find the available data requested by the user.
If the files are not found locally, the tool can search the ESGF_ and download
the missing files, provided that they are available.
The second point underlines the fact that the user has full control over what
type and the amount of data is needed for the analyses. Setting the data
retrieval parameters is explained below.
Enabling automatic downloads from the ESGF
------------------------------------------
To enable automatic downloads from ESGF, set ``search_esgf: when_missing`` (use
local files whenever possible) or ``search_esgf: always`` (always search ESGF
for latest version of files and only use local data if it is the latest
version) in the :ref:`user configuration file`, or provide the corresponding
command line arguments ``--search_esgf=when_missing`` or
``--search_esgf=always`` when running the recipe.
The files will be stored in the ``download_dir`` set in
the :ref:`user configuration file`.
Setting the correct root paths
------------------------------
The first step towards providing ESMValTool the correct set of parameters for
data retrieval is setting the root paths to the data. This is done in the user
configuration file ``config-user.yml``. The two sections where the user will
set the paths are ``rootpath`` and ``drs``. ``rootpath`` contains pointers to
``CMIP``, ``OBS``, ``default`` and ``RAWOBS`` root paths; ``drs`` sets the type
of directory structure the root paths are structured by. It is important to
first discuss the ``drs`` parameter: as we've seen in the previous section, the
DRS as a standard is used for both file naming conventions and for directory
structures.
Synda
-----
If the `synda install <https://prodiguer.github.io/synda/sdt/user_guide.html#synda-install>`_ command is used to download data,
it maintains the directory structure as on ESGF. To find data downloaded by
synda, use the ``SYNDA`` ``drs`` parameter.
.. code-block:: yaml
drs:
CMIP6: SYNDA
CMIP5: SYNDA
.. _config-user-drs:
Explaining ``config-user/drs: CMIP5:`` or ``config-user/drs: CMIP6:``
---------------------------------------------------------------------
Whereas ESMValTool will **always** use the CMOR standard for file naming (please
refer above), by setting the ``drs`` parameter the user tells the tool what
type of root paths they need the data from, e.g.:
.. code-block:: yaml
drs:
CMIP6: BADC
will tell the tool that the user needs data from a repository structured
according to the BADC DRS structure, i.e.:
``ROOT/{institute}/{dataset_name}/{experiment}/{ensemble}/{mip}/{variable_short_name}/{grid}``;
setting the ``ROOT`` parameter is explained below. This is a
strictly-structured repository tree and if there are any sort of irregularities
(e.g. there is no ``{mip}`` directory) the data will not be found! ``BADC`` can
be replaced with ``DKRZ`` or ``ETHZ`` depending on the existing ``ROOT``
directory structure.
The snippet
.. code-block:: yaml
drs:
CMIP6: default
is another way to retrieve data from a ``ROOT`` directory that has no DRS-like
structure; ``default`` indicates that the data lies in a directory that
contains all the files without any structure.
.. note::
When using ``CMIP6: default`` or ``CMIP5: default`` it is important to
remember that all the needed files must be in the same top-level directory
set by ``default`` (see below how to set ``default``).
.. _config-user-rootpath:
Explaining ``config-user/rootpath:``
------------------------------------
``rootpath`` identifies the root directory for different data types (``ROOT`` as we used it above):
* ``CMIP`` e.g. ``CMIP5`` or ``CMIP6``: this is the `root` path(s) to where the
CMIP files are stored; it can be a single path or a list of paths; it can
point to an ESGF node or it can point to a user private repository. Example
for a CMIP5 root path pointing to the ESGF node on CEDA-Jasmin (formerly
known as BADC):
.. code-block:: yaml
CMIP5: /badc/cmip5/data/cmip5/output1
Example for a CMIP6 root path pointing to the ESGF node on CEDA-Jasmin:
.. code-block:: yaml
CMIP6: /badc/cmip6/data/CMIP6/CMIP
Example for a mix of CMIP6 root path pointing to the ESGF node on CEDA-Jasmin
and a user-specific data repository for extra data:
.. code-block:: yaml
CMIP6: [/badc/cmip6/data/CMIP6/CMIP, /home/users/johndoe/cmip_data]
* ``OBS``: this is the `root` path(s) to where the observational datasets are
stored; again, this could be a single path or a list of paths, just like for
CMIP data. Example for the OBS path for a large cache of observation datasets
on CEDA-Jasmin:
.. code-block:: yaml
OBS: /gws/nopw/j04/esmeval/obsdata-v2
* ``default``: this is the `root` path(s) where the tool will look for data
from projects that do not have their own rootpath set.
* ``RAWOBS``: this is the `root` path(s) to where the raw observational data
files are stored; this is used by ``esmvaltool data format``.
Dataset definitions in ``recipe``
---------------------------------
Once the correct paths have been established, ESMValTool collects the
information on the specific datasets that are needed for the analysis. This
information, together with the CMOR convention for naming files (see CMOR-DRS_)
will allow the tool to search and find the right files. The specific
datasets are listed in any recipe, under either the ``datasets`` and/or
``additional_datasets`` sections, e.g.
.. code-block:: yaml
datasets:
- {dataset: HadGEM2-CC, project: CMIP5, exp: historical, ensemble: r1i1p1, start_year: 2001, end_year: 2004}
- {dataset: UKESM1-0-LL, project: CMIP6, exp: historical, ensemble: r1i1p1f2, grid: gn, start_year: 2004, end_year: 2014}
The data finding feature will use this information to find data for **all** the variables specified in ``diagnostics/variables``.
Recap and example
=================
Let us look at a practical example for a recap of the information above:
suppose you are using a ``config-user.yml`` that has the following entries for
data finding:
.. code-block:: yaml
rootpath: # running on CEDA-Jasmin
CMIP6: /badc/cmip6/data/CMIP6/CMIP
drs:
CMIP6: BADC # since you are on CEDA-Jasmin
and the dataset you need is specified in your ``recipe.yml`` as:
.. code-block:: yaml
- {dataset: UKESM1-0-LL, project: CMIP6, mip: Amon, exp: historical, grid: gn, ensemble: r1i1p1f2, start_year: 2004, end_year: 2014}
for a variable, e.g.:
.. code-block:: yaml
diagnostics:
some_diagnostic:
description: some_description
variables:
ta:
preprocessor: some_preprocessor
The tool will then use the root path ``/badc/cmip6/data/CMIP6/CMIP`` and the
dataset information and will assemble the full DRS path using information from
CMOR-DRS_ and establish the path to the files as:
.. code-block:: bash
/badc/cmip6/data/CMIP6/CMIP/MOHC/UKESM1-0-LL/historical/r1i1p1f2/Amon
then look for variable ``ta`` and specifically the latest version of the data
file:
.. code-block:: bash
/badc/cmip6/data/CMIP6/CMIP/MOHC/UKESM1-0-LL/historical/r1i1p1f2/Amon/ta/gn/latest/
and finally, using the file naming definition from CMOR-DRS_ find the file:
.. code-block:: bash
/badc/cmip6/data/CMIP6/CMIP/MOHC/UKESM1-0-LL/historical/r1i1p1f2/Amon/ta/gn/latest/ta_Amon_UKESM1-0-LL_historical_r1i1p1f2_gn_195001-201412.nc
.. _observations:
Data loading
============
Data loading is done using the data load functionality of `iris`; we will not go into too much detail
about this since we can point the user to the specific functionality
`here <https://scitools-iris.readthedocs.io/en/latest/userguide/loading_iris_cubes.html>`_ but we will underline
that the initial loading is done by adhering to the CF Conventions that `iris` operates by as well (see
`CF Conventions Document <http://cfconventions.org/cf-conventions/cf-conventions.html>`_ and the search
page for CF `standard names <http://cfconventions.org/standard-names.html>`_).
Data concatenation from multiple sources
========================================
Oftentimes data retrieving results in assembling a continuous data stream from
multiple files or even, multiple experiments. The internal mechanism through which
the assembly is done is via cube concatenation. One peculiarity of iris concatenation
(see `iris cube concatenation <https://scitools-iris.readthedocs.io/en/latest/userguide/merge_and_concat.html>`_)
is that it doesn't allow for concatenating time-overlapping cubes; this case is rather
frequent with data from models overlapping in time, and is accounted for by a function that performs a
flexible concatenation between two cubes, depending on the particular setup:
* cubes overlap in time: resulting cube is made up of the overlapping data plus left and
right hand sides on each side of the overlapping data; note that in the case of the cubes
coming from different experiments the resulting concatenated cube will have composite data
made up from multiple experiments: assume [cube1: exp1, cube2: exp2] and cube1 starts before cube2,
and cube2 finishes after cube1, then the concatenated cube will be made up of cube2: exp2 plus the
section of cube1: exp1 that contains data not provided in cube2: exp2;
* cubes don't overlap in time: data from the two cubes is bolted together;
Note that two cube concatenation is the base operation of an iterative process of reducing multiple cubes
from multiple data segments via cube concatenation ie if there is no time-overlapping data, the
cubes concatenation is performed in one step.
.. _extra-facets-data-finder:
Use of extra facets in the datafinder
=====================================
Extra facets are a mechanism to provide additional information for certain kinds
of data. The general approach is described in :ref:`extra_facets`. Here, we
describe how they can be used to locate data files within the datafinder
framework.
This is useful to build paths for directory structures and file names
that require more information than what is provided in the recipe.
A common application is the location of variables in multi-variable files as
often found in climate models' native output formats.
Another use case is files that use different names for variables in their
file name than for the netCDF4 variable name.
To apply the extra facets for this purpose, simply use the corresponding tag in
the applicable DRS inside the `config-developer.yml` file. For example, given
the extra facets in :ref:`extra-facets-example-1`, one might write the
following.
.. _extra-facets-example-2:
.. code-block:: yaml
:caption: Example drs use in `config-developer.yml`
native6:
input_file:
default: '{name_in_filename}*.nc'
The same replacement mechanism can be employed everywhere where tags can be
used, particularly in `input_dir` and `input_file`.
| PypiClean |
/ISY994v5-0.9.7.tar.gz/ISY994v5-0.9.7/isy994/items/devices/insteon/device_insteon_dimmer.py |
from ..common.device_dimmer import Device_Dimmer
from .device_insteon_base import Device_Insteon_Base
paddle_events = {"DON", "DON3", "DON4", "DON5", "DOF", "DOF3", "DOF4", "DOF5", "DIM", "BRT", "DFON", "DFOF", "FDUP", "FDDOWN", "FDSTOP"}
class Device_Insteon_Dimmer(Device_Dimmer, Device_Insteon_Base):
def __init__(self, container, device_info):
Device_Dimmer.__init__(self, container, device_info.name, device_info.address)
Device_Insteon_Base.__init__(self, device_info)
# print(device_info)
self.add_property("paddle_action")
value = device_info.get_property("ST", "value")
if value:
try:
self.properties["level"] = int(int(value) / 255 * 100)
except:
pass
def process_websocket_event(self, event):
Device_Dimmer.process_websocket_event(self,event)
if event.control == "ST":
self.set_property("level", int(int(event.action) / 255 * 100))
# print ('device {}. changed status to {}'.format(self.name,event.action))
elif event.control in paddle_events: # need to add other events
self.set_property("paddle_action", event.control, True)
# print ('device {}. changed local control {}'.format(self.name,event.action))
def set_level(self, level):
percentage = int(level / 100 * 255)
Device_Dimmer.set_level(self,level)
path = "nodes/" + self.address + "/cmd/DON/" + str(percentage)
return self.send_request(path)
# def fade_up(self):
# path = "nodes/" + self.address + "/cmd/FDUP/startlevel/" + str(int(level / 100 * 255))
# return self.send_request(path)
# def fade_down(self):
# path = "nodes/" + self.address + "/cmd/FDDOWN/startlevel/" + str(int(level / 100 * 255))
# return self.send_request(path)
def fade_stop(self):
path = "nodes/" + self.address + "/cmd/FDSTOP"
return self.send_request(path)
def fast_on(self):
path = "nodes/" + self.address + "/cmd/DFON"
return self.send_request(path)
def fast_off(self):
path = "nodes/" + self.address + "/cmd/DFOF"
return self.send_request(path)
def brighten(self):
path = "nodes/" + self.address + "/cmd/BRT"
return self.send_request(path)
def dim(self):
path = "nodes/" + self.address + "/cmd/DIM"
return self.send_request(path) | PypiClean |
/Exam2excel_converter-0.0.2-py3-none-any.whl/exam2excel_converter.py | import re
import sys
import pandas as pd
from openpyxl import Workbook
# Class object question
class Question:
def __init__(self, statement, level, option_a, option_b, option_c, option_d,instruction , answer):
self.statement = statement
self.level = level
self.option_a = option_a
self.option_b = option_b
self.option_c = option_c
self.option_d = option_d
self.instruction= instruction
self.answer = answer
LEVEL_MAP = {
'TH': '2_Thông hiểu',
'VD': '3_Vận Dụng',
'VDT': '3_Vận Dụng',
'NB': '1_Nhận biết',
}
pattern = re.compile(r'Câu (\d+) \((.*?)\)\. (.*?)\n((?:[A-Z]\. .*?\n)+)Hướng dẫn: (.*?)\nChọn: ([A-Z])', re.DOTALL)
def convert_file_word_to_excel(input_file_path, output_file_path):
if input_file_path == "":
print('The file does not exist. Please check again')
sys.exit(0)
questions_qc=[]
questions_final=[]
question_len=0
with open(input_file_path, 'r', encoding='utf-8') as file:
content = file.read()
questions = re.findall(pattern, content)
question_len=len(questions)
if len(questions)==0:
print('The pattern is not invalid for any questions please check !')
sys.exit(0)
for question in questions:
num = question[0]
question_type = question[1]
text = question[2].strip()
options_text = question[3]
options = re.findall(r'[A-Z]\. (.*?)\n', options_text)
instruction = question[4].strip()
chosen_option = question[5]
option_a, option_b, option_c, option_d = options
print(f"_Câu {num} ({question_type}): {text} passed ✅")
questions_final.append(Question(statement=text,level=LEVEL_MAP.get(question_type, '4_Vận dụng cao'), option_a=option_a, option_b=option_b, option_c=option_c, option_d=option_d, instruction=instruction, answer=chosen_option.lower() ))
questions_qc.append(Question(statement=f"Câu {num} {text}", level=f" question_type ", option_a=f"A. {option_a} ", option_b=f" B. {option_b} ", option_c=f' C. {option_c}', option_d=f' D. {option_d} ', instruction=f'Hướng Dẫn: {instruction}', answer=f'Chọn: {chosen_option} ' ))
# Convert data list to pandas data frame
columns = ['statement', 'level', 'option_a',
'option_b', 'option_c', 'option_d', 'answer', 'instruction']
rows = []
for question in questions_final:
row = [question.statement, question.level, question.option_a,
question.option_b, question.option_c, question.option_d, question.answer, question.instruction]
rows.append(row)
df_final = pd.DataFrame(rows, columns=columns)
rows_for_qc = []
for question in questions_qc:
row = [question.statement, question.level, question.option_a,
question.option_b, question.option_c, question.option_d, question.answer, question.instruction]
rows_for_qc.append(row)
df_qc = pd.DataFrame(rows_for_qc, columns=columns)
# Create an Excel Workbook and add the DataFrame as a worksheet
writer = pd.ExcelWriter('./output/output_data.xlsx' if output_file_path == "" else output_file_path, engine='openpyxl')
# # Remove default sheet
book = writer.book
df_final.to_excel(writer, index=False, header=False,
sheet_name='questions')
df_qc.to_excel(writer, index=False, header=False,
sheet_name='question_for_qc')
# # Save the Excel Workbook
writer.close()
# Remove empty file input
# ==========================
def conver_txt_file(input_file_path, output_file_path):
# Open the input file
if input_file_path == "":
sys.exit(0)
with open(file=input_file_path, mode="r", encoding="utf-8") as input_file:
# Read the contents of the file
lines = input_file.readlines()
# Remove empty lines from the list of lines
lines = list(filter(lambda x: x.strip() != "", lines))
# Open the output file and write the filtered lines to it
with open(file='./output/output_data.txt' if output_file_path == "" else output_file_path, mode="w", encoding='utf-8') as output_file:
output_file.writelines(lines) | PypiClean |
/ISAMBARD-2.3.1.tar.gz/ISAMBARD-2.3.1/src/isambard/optimisation/mmc_optimizer.py |
import copy
import enum
import math
import random
import sys
import numpy
def float_f(f):
"""Formats a float for printing to std out."""
return '{:.0f}'.format(f)
class MMCParameterType(enum.Enum):
"""Defines the types of parameters that can be used for Monte Carlo."""
STATIC_VALUE = 1
DISCRETE_RANGE = 2
LIST = 3
UNIFORM_DIST = 4
NORMAL_DIST = 5
class MMCParameter:
"""Defines parameters used in the MMC optimizer.
Parameters
----------
label : str
The name of the parameter.
parameter_type : MMCParameterType
The type of the parameter.
static_dist_or_list
The values used to create the parameter. For example, if the
parameter types is "STATIC_VALUE" then this can be any object,
if it is "LIST" then it should be a list of values and if it
is "UNIFORM_DIST" then it should be (mu, sigma) etc.
starting_value, optional
The initial value of the parameter.
Attributes
----------
label : str
The name of the parameter.
parameter_type : MMCParameterType
The type of the parameter.
static_dist_or_list
The values used to create the parameter. For example, if the
parameter types is "STATIC_VALUE" then this can be any object,
if it is "LIST" then it should be a list of values and if it
is "UNIFORM_DIST" then it should be (mu, sigma) etc.
starting_value
The initial value of the parameter.
current_value
The currently accepted value for the parameter.
proposed_value
A new value proposed for the parameter that has not been
accepted.
"""
def __init__(self, label, parameter_type, static_dist_or_list,
starting_value=None):
self.label = label
self.parameter_type = parameter_type
self.current_value = None
self.static_dist_or_list = static_dist_or_list
if starting_value is None:
if self.parameter_type is MMCParameterType.STATIC_VALUE:
self.starting_value = static_dist_or_list
self.current_value = self.starting_value
else:
self.randomise_proposed_value()
self.starting_value = self.proposed_value
self.accept_proposed_value()
else:
self.starting_value = starting_value
self.current_value = self.starting_value
self.proposed_value = None
def randomise_proposed_value(self):
"""Creates a randomly the proposed value.
Raises
------
TypeError
Raised if this method is called on a static value.
TypeError
Raised if the parameter type is unknown.
"""
if self.parameter_type is MMCParameterType.UNIFORM_DIST:
(a, b) = self.static_dist_or_list
self.proposed_value = random.uniform(a, b)
elif self.parameter_type is MMCParameterType.NORMAL_DIST:
(mu, sigma) = self.static_dist_or_list
self.proposed_value = random.normalvariate(mu, sigma)
elif self.parameter_type is MMCParameterType.DISCRETE_RANGE:
(min_v, max_v, step) = self.static_dist_or_list
self.proposed_value = random.choice(
numpy.arange(min_v, max_v, step))
elif self.parameter_type is MMCParameterType.LIST:
self.proposed_value = random.choice(self.static_dist_or_list)
elif self.parameter_type is MMCParameterType.STATIC_VALUE:
raise TypeError('This value is static, it cannot be mutated.')
else:
raise TypeError(
'Cannot randomise this parameter, unknown parameter type.')
return
def accept_proposed_value(self):
"""Changes the current value to the proposed value."""
if self.proposed_value is not None:
self.current_value = self.proposed_value
self.proposed_value = None
return
def reject_proposed_value(self):
"""Removes the proposed value."""
self.proposed_value = None
return
def __repr__(self):
return '<MMCParameter: {}, {}, {}, {}>'.format(
self.label, self.parameter_type, self.static_dist_or_list,
self.current_value)
class MMCParameterOptimisation:
"""Performs Metropolis Monte Carlo opimisation on a specification.
References
----------
.. [0] Metropolis N, Rosenbluth AW, Rosenbluth MN, Teller AH and
Teller E (1953) Equations of State Calculations by Fast
Computing Machines, Journal of Chemical Physics. 21 (6),
1087-1092.
Parameters
----------
specification : isambard.Specification
Any ISAMBARD specification. This will be used to create models
during the optimisation.
parameters : [MMCParameter]
Parameters to be optimised. The order of the parameters should
match the init signiture of the specification.
sequences : [str]
The sequences to be used during the optimisation.
eval_function : f(ampal) -> float
A function that takes an AMPAL object as an input and returns
a float. By default the `buff_interaction_energy` will be
used to evaluate the structure.
Attributes
----------
specification : isambard.Specification
Any ISAMBARD specification. This will be used to create models
during the optimisation.
parameters : [MMCParameter]
Parameters to be optimised. The order of the parameters should
match the init signiture of the specification.
sequences : [str]
The sequences to be used during the optimisation.
eval_function : f(ampal) -> float
A function that takes an AMPAL object as an input and returns
a float. By default the `buff_interaction_energy` will be
used to evaluate the structure.
current_energy : float
The energy of the current structure.
best_energy : float
The best energy observed for any model.
best_parameters : [MMCParameter]
The parameters associated with the `best_energy`.
best_model : AMPAL Object
The model associated with the `best_energy`.
"""
current_energy = None
best_energy = None
best_parameters = None
best_model = None
def __init__(self, specification, parameters, sequences, eval_function):
self.specification = specification
self.current_parameters = copy.deepcopy(parameters)
self.sequences = sequences
self.eval_function = eval_function
def start_optimisation(self, rounds, temp=298.15):
"""Begin the optimisation run.
Parameters
----------
rounds : int
The number of rounds of optimisation to perform.
temp : float, optional
The temperature (in K) used during the optimisation.
"""
self._generate_initial_model()
self._mmc_loop(rounds, temp=temp)
return
def _generate_initial_model(self):
"""Creates the initial model for the optimistation.
Raises
------
TypeError
Raised if the model failed to build. This could be due to
parameters being passed to the specification in the wrong
format.
"""
initial_parameters = [p.current_value for p in self.current_parameters]
try:
initial_model = self.specification(*initial_parameters)
except TypeError:
raise TypeError(
'Failed to build initial model. Make sure that the input '
'parameters match the number and order of arguements '
'expected by the input specification.')
initial_model.pack_new_sequences(self.sequences)
self.current_energy = self.eval_function(initial_model)
self.best_energy = copy.deepcopy(self.current_energy)
self.best_parameters = copy.deepcopy(self.current_parameters)
self.best_model = initial_model
return
def _mmc_loop(self, rounds, temp=298.15, verbose=True):
"""The main MMC loop.
Parameters
----------
rounds : int
The number of rounds of optimisation to perform.
temp : float, optional
The temperature (in K) used during the optimisation.
verbose : bool, optional
If true, prints information about the run to std out.
"""
# TODO add weighted randomisation of altered variable
current_round = 0
while current_round < rounds:
modifiable = list(filter(
lambda p: p.parameter_type is not MMCParameterType.STATIC_VALUE,
self.current_parameters))
chosen_parameter = random.choice(modifiable)
if chosen_parameter.parameter_type is MMCParameterType.UNIFORM_DIST:
chosen_parameter.randomise_proposed_value()
else:
chosen_parameter.randomise_proposed_value()
proposed_parameters = [
p.current_value
if p.proposed_value is None else p.proposed_value
for p in self.current_parameters]
model = self.specification(*proposed_parameters)
model.pack_new_sequences(self.sequences)
proposed_energy = self.eval_function(model)
# TODO Add proper logging
if verbose:
sys.stdout.write(
'\rRound: {}, Current energy: {}, Proposed energy: {} '
'(best {}), {}. '
.format(current_round, float_f(self.current_energy),
float_f(proposed_energy), float_f(
self.best_energy),
"ACCEPTED" if self.check_move(
proposed_energy, self.current_energy, t=temp)
else "DECLINED"
))
sys.stdout.flush()
if self.check_move(proposed_energy, self.current_energy, t=temp):
for p in self.current_parameters:
p.accept_proposed_value()
self.current_energy = proposed_energy
if self.current_energy < self.best_energy:
self.best_energy = copy.deepcopy(self.current_energy)
self.best_parameters = copy.deepcopy(
self.current_parameters)
self.best_model = model
else:
for p in self.current_parameters:
p.reject_proposed_value()
current_round += 1
return
@staticmethod
def check_move(new, old, t):
"""Determines if a model will be accepted.
Uses Boltzmann distribution scaled by temperature in Kelvin."""
if (t <= 0) or numpy.isclose(t, 0.0):
return False
K_BOLTZ = 8.3144621E-003 # kJa/mol.K
if new < old:
return True
else:
move_prob = math.exp(-(new - old) / (K_BOLTZ * t))
if move_prob > random.uniform(0, 1):
return True
else:
return False
__author__ = "Christopher W. Wood" | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Format/GML.js | * @requires OpenLayers/Format/XML.js
* @requires OpenLayers/Feature/Vector.js
* @requires OpenLayers/Geometry/Point.js
* @requires OpenLayers/Geometry/MultiPoint.js
* @requires OpenLayers/Geometry/LineString.js
* @requires OpenLayers/Geometry/MultiLineString.js
* @requires OpenLayers/Geometry/Polygon.js
* @requires OpenLayers/Geometry/MultiPolygon.js
*/
/**
* Class: OpenLayers.Format.GML
* Read/Write GML. Create a new instance with the <OpenLayers.Format.GML>
* constructor. Supports the GML simple features profile.
*
* Inherits from:
* - <OpenLayers.Format.XML>
*/
OpenLayers.Format.GML = OpenLayers.Class(OpenLayers.Format.XML, {
/**
* APIProperty: featureNS
* {String} Namespace used for feature attributes. Default is
* "http://mapserver.gis.umn.edu/mapserver".
*/
featureNS: "http://mapserver.gis.umn.edu/mapserver",
/**
* APIProperty: featurePrefix
* {String} Namespace alias (or prefix) for feature nodes. Default is
* "feature".
*/
featurePrefix: "feature",
/**
* APIProperty: featureName
* {String} Element name for features. Default is "featureMember".
*/
featureName: "featureMember",
/**
* APIProperty: layerName
* {String} Name of data layer. Default is "features".
*/
layerName: "features",
/**
* APIProperty: geometryName
* {String} Name of geometry element. Defaults to "geometry".
*/
geometryName: "geometry",
/**
* APIProperty: collectionName
* {String} Name of featureCollection element.
*/
collectionName: "FeatureCollection",
/**
* APIProperty: gmlns
* {String} GML Namespace.
*/
gmlns: "http://www.opengis.net/gml",
/**
* APIProperty: extractAttributes
* {Boolean} Extract attributes from GML.
*/
extractAttributes: true,
/**
* APIProperty: xy
* {Boolean} Order of the GML coordinate true:(x,y) or false:(y,x)
* Changing is not recommended, a new Format should be instantiated.
*/
xy: true,
/**
* Constructor: OpenLayers.Format.GML
* Create a new parser for GML.
*
* Parameters:
* options - {Object} An optional object whose properties will be set on
* this instance.
*/
initialize: function(options) {
// compile regular expressions once instead of every time they are used
this.regExes = {
trimSpace: (/^\s*|\s*$/g),
removeSpace: (/\s*/g),
splitSpace: (/\s+/),
trimComma: (/\s*,\s*/g)
};
OpenLayers.Format.XML.prototype.initialize.apply(this, [options]);
},
/**
* APIMethod: read
* Read data from a string, and return a list of features.
*
* Parameters:
* data - {String} or {DOMElement} data to read/parse.
*
* Returns:
* {Array(<OpenLayers.Feature.Vector>)} An array of features.
*/
read: function(data) {
if(typeof data == "string") {
data = OpenLayers.Format.XML.prototype.read.apply(this, [data]);
}
var featureNodes = this.getElementsByTagNameNS(data.documentElement,
this.gmlns,
this.featureName);
var features = [];
for(var i=0; i<featureNodes.length; i++) {
var feature = this.parseFeature(featureNodes[i]);
if(feature) {
features.push(feature);
}
}
return features;
},
/**
* Method: parseFeature
* This function is the core of the GML parsing code in OpenLayers.
* It creates the geometries that are then attached to the returned
* feature, and calls parseAttributes() to get attribute data out.
*
* Parameters:
* node - {DOMElement} A GML feature node.
*/
parseFeature: function(node) {
// only accept one geometry per feature - look for highest "order"
var order = ["MultiPolygon", "Polygon",
"MultiLineString", "LineString",
"MultiPoint", "Point", "Envelope"];
// FIXME: In case we parse a feature with no geometry, but boundedBy an Envelope,
// this code creates a geometry derived from the Envelope. This is not correct.
var type, nodeList, geometry, parser;
for(var i=0; i<order.length; ++i) {
type = order[i];
nodeList = this.getElementsByTagNameNS(node, this.gmlns, type);
if(nodeList.length > 0) {
// only deal with first geometry of this type
parser = this.parseGeometry[type.toLowerCase()];
if(parser) {
geometry = parser.apply(this, [nodeList[0]]);
if (this.internalProjection && this.externalProjection) {
geometry.transform(this.externalProjection,
this.internalProjection);
}
} else {
throw new TypeError("Unsupported geometry type: " + type);
}
// stop looking for different geometry types
break;
}
}
var bounds;
var boxNodes = this.getElementsByTagNameNS(node, this.gmlns, "Box");
for(i=0; i<boxNodes.length; ++i) {
var boxNode = boxNodes[i];
var box = this.parseGeometry["box"].apply(this, [boxNode]);
var parentNode = boxNode.parentNode;
var parentName = parentNode.localName ||
parentNode.nodeName.split(":").pop();
if(parentName === "boundedBy") {
bounds = box;
} else {
geometry = box.toGeometry();
}
}
// construct feature (optionally with attributes)
var attributes;
if(this.extractAttributes) {
attributes = this.parseAttributes(node);
}
var feature = new OpenLayers.Feature.Vector(geometry, attributes);
feature.bounds = bounds;
feature.gml = {
featureType: node.firstChild.nodeName.split(":")[1],
featureNS: node.firstChild.namespaceURI,
featureNSPrefix: node.firstChild.prefix
};
// assign fid - this can come from a "fid" or "id" attribute
var childNode = node.firstChild;
var fid;
while(childNode) {
if(childNode.nodeType == 1) {
fid = childNode.getAttribute("fid") ||
childNode.getAttribute("id");
if(fid) {
break;
}
}
childNode = childNode.nextSibling;
}
feature.fid = fid;
return feature;
},
/**
* Property: parseGeometry
* Properties of this object are the functions that parse geometries based
* on their type.
*/
parseGeometry: {
/**
* Method: parseGeometry.point
* Given a GML node representing a point geometry, create an OpenLayers
* point geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.Point>} A point geometry.
*/
point: function(node) {
/**
* Three coordinate variations to consider:
* 1) <gml:pos>x y z</gml:pos>
* 2) <gml:coordinates>x, y, z</gml:coordinates>
* 3) <gml:coord><gml:X>x</gml:X><gml:Y>y</gml:Y></gml:coord>
*/
var nodeList, coordString;
var coords = [];
// look for <gml:pos>
var nodeList = this.getElementsByTagNameNS(node, this.gmlns, "pos");
if(nodeList.length > 0) {
coordString = nodeList[0].firstChild.nodeValue;
coordString = coordString.replace(this.regExes.trimSpace, "");
coords = coordString.split(this.regExes.splitSpace);
}
// look for <gml:coordinates>
if(coords.length == 0) {
nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"coordinates");
if(nodeList.length > 0) {
coordString = nodeList[0].firstChild.nodeValue;
coordString = coordString.replace(this.regExes.removeSpace,
"");
coords = coordString.split(",");
}
}
// look for <gml:coord>
if(coords.length == 0) {
nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"coord");
if(nodeList.length > 0) {
var xList = this.getElementsByTagNameNS(nodeList[0],
this.gmlns, "X");
var yList = this.getElementsByTagNameNS(nodeList[0],
this.gmlns, "Y");
if(xList.length > 0 && yList.length > 0) {
coords = [xList[0].firstChild.nodeValue,
yList[0].firstChild.nodeValue];
}
}
}
// preserve third dimension
if(coords.length == 2) {
coords[2] = null;
}
if (this.xy) {
return new OpenLayers.Geometry.Point(coords[0], coords[1],
coords[2]);
}
else{
return new OpenLayers.Geometry.Point(coords[1], coords[0],
coords[2]);
}
},
/**
* Method: parseGeometry.multipoint
* Given a GML node representing a multipoint geometry, create an
* OpenLayers multipoint geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.MultiPoint>} A multipoint geometry.
*/
multipoint: function(node) {
var nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"Point");
var components = [];
if(nodeList.length > 0) {
var point;
for(var i=0; i<nodeList.length; ++i) {
point = this.parseGeometry.point.apply(this, [nodeList[i]]);
if(point) {
components.push(point);
}
}
}
return new OpenLayers.Geometry.MultiPoint(components);
},
/**
* Method: parseGeometry.linestring
* Given a GML node representing a linestring geometry, create an
* OpenLayers linestring geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.LineString>} A linestring geometry.
*/
linestring: function(node, ring) {
/**
* Two coordinate variations to consider:
* 1) <gml:posList dimension="d">x0 y0 z0 x1 y1 z1</gml:posList>
* 2) <gml:coordinates>x0, y0, z0 x1, y1, z1</gml:coordinates>
*/
var nodeList, coordString;
var coords = [];
var points = [];
// look for <gml:posList>
nodeList = this.getElementsByTagNameNS(node, this.gmlns, "posList");
if(nodeList.length > 0) {
coordString = this.getChildValue(nodeList[0]);
coordString = coordString.replace(this.regExes.trimSpace, "");
coords = coordString.split(this.regExes.splitSpace);
var dim = parseInt(nodeList[0].getAttribute("dimension"));
var j, x, y, z;
for(var i=0; i<coords.length/dim; ++i) {
j = i * dim;
x = coords[j];
y = coords[j+1];
z = (dim == 2) ? null : coords[j+2];
if (this.xy) {
points.push(new OpenLayers.Geometry.Point(x, y, z));
} else {
points.push(new OpenLayers.Geometry.Point(y, x, z));
}
}
}
// look for <gml:coordinates>
if(coords.length == 0) {
nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"coordinates");
if(nodeList.length > 0) {
coordString = this.getChildValue(nodeList[0]);
coordString = coordString.replace(this.regExes.trimSpace,
"");
coordString = coordString.replace(this.regExes.trimComma,
",");
var pointList = coordString.split(this.regExes.splitSpace);
for(var i=0; i<pointList.length; ++i) {
coords = pointList[i].split(",");
if(coords.length == 2) {
coords[2] = null;
}
if (this.xy) {
points.push(new OpenLayers.Geometry.Point(coords[0],
coords[1],
coords[2]));
} else {
points.push(new OpenLayers.Geometry.Point(coords[1],
coords[0],
coords[2]));
}
}
}
}
var line = null;
if(points.length != 0) {
if(ring) {
line = new OpenLayers.Geometry.LinearRing(points);
} else {
line = new OpenLayers.Geometry.LineString(points);
}
}
return line;
},
/**
* Method: parseGeometry.multilinestring
* Given a GML node representing a multilinestring geometry, create an
* OpenLayers multilinestring geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.MultiLineString>} A multilinestring geometry.
*/
multilinestring: function(node) {
var nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"LineString");
var components = [];
if(nodeList.length > 0) {
var line;
for(var i=0; i<nodeList.length; ++i) {
line = this.parseGeometry.linestring.apply(this,
[nodeList[i]]);
if(line) {
components.push(line);
}
}
}
return new OpenLayers.Geometry.MultiLineString(components);
},
/**
* Method: parseGeometry.polygon
* Given a GML node representing a polygon geometry, create an
* OpenLayers polygon geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.Polygon>} A polygon geometry.
*/
polygon: function(node) {
var nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"LinearRing");
var components = [];
if(nodeList.length > 0) {
// this assumes exterior ring first, inner rings after
var ring;
for(var i=0; i<nodeList.length; ++i) {
ring = this.parseGeometry.linestring.apply(this,
[nodeList[i], true]);
if(ring) {
components.push(ring);
}
}
}
return new OpenLayers.Geometry.Polygon(components);
},
/**
* Method: parseGeometry.multipolygon
* Given a GML node representing a multipolygon geometry, create an
* OpenLayers multipolygon geometry.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Geometry.MultiPolygon>} A multipolygon geometry.
*/
multipolygon: function(node) {
var nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"Polygon");
var components = [];
if(nodeList.length > 0) {
var polygon;
for(var i=0; i<nodeList.length; ++i) {
polygon = this.parseGeometry.polygon.apply(this,
[nodeList[i]]);
if(polygon) {
components.push(polygon);
}
}
}
return new OpenLayers.Geometry.MultiPolygon(components);
},
envelope: function(node) {
var components = [];
var coordString;
var envelope;
var lpoint = this.getElementsByTagNameNS(node, this.gmlns, "lowerCorner");
if (lpoint.length > 0) {
var coords = [];
if(lpoint.length > 0) {
coordString = lpoint[0].firstChild.nodeValue;
coordString = coordString.replace(this.regExes.trimSpace, "");
coords = coordString.split(this.regExes.splitSpace);
}
if(coords.length == 2) {
coords[2] = null;
}
if (this.xy) {
var lowerPoint = new OpenLayers.Geometry.Point(coords[0], coords[1],coords[2]);
} else {
var lowerPoint = new OpenLayers.Geometry.Point(coords[1], coords[0],coords[2]);
}
}
var upoint = this.getElementsByTagNameNS(node, this.gmlns, "upperCorner");
if (upoint.length > 0) {
var coords = [];
if(upoint.length > 0) {
coordString = upoint[0].firstChild.nodeValue;
coordString = coordString.replace(this.regExes.trimSpace, "");
coords = coordString.split(this.regExes.splitSpace);
}
if(coords.length == 2) {
coords[2] = null;
}
if (this.xy) {
var upperPoint = new OpenLayers.Geometry.Point(coords[0], coords[1],coords[2]);
} else {
var upperPoint = new OpenLayers.Geometry.Point(coords[1], coords[0],coords[2]);
}
}
if (lowerPoint && upperPoint) {
components.push(new OpenLayers.Geometry.Point(lowerPoint.x, lowerPoint.y));
components.push(new OpenLayers.Geometry.Point(upperPoint.x, lowerPoint.y));
components.push(new OpenLayers.Geometry.Point(upperPoint.x, upperPoint.y));
components.push(new OpenLayers.Geometry.Point(lowerPoint.x, upperPoint.y));
components.push(new OpenLayers.Geometry.Point(lowerPoint.x, lowerPoint.y));
var ring = new OpenLayers.Geometry.LinearRing(components);
envelope = new OpenLayers.Geometry.Polygon([ring]);
}
return envelope;
},
/**
* Method: parseGeometry.box
* Given a GML node representing a box geometry, create an
* OpenLayers.Bounds.
*
* Parameters:
* node - {DOMElement} A GML node.
*
* Returns:
* {<OpenLayers.Bounds>} A bounds representing the box.
*/
box: function(node) {
var nodeList = this.getElementsByTagNameNS(node, this.gmlns,
"coordinates");
var coordString;
var coords, beginPoint = null, endPoint = null;
if (nodeList.length > 0) {
coordString = nodeList[0].firstChild.nodeValue;
coords = coordString.split(" ");
if (coords.length == 2) {
beginPoint = coords[0].split(",");
endPoint = coords[1].split(",");
}
}
if (beginPoint !== null && endPoint !== null) {
return new OpenLayers.Bounds(parseFloat(beginPoint[0]),
parseFloat(beginPoint[1]),
parseFloat(endPoint[0]),
parseFloat(endPoint[1]) );
}
}
},
/**
* Method: parseAttributes
*
* Parameters:
* node - {DOMElement}
*
* Returns:
* {Object} An attributes object.
*/
parseAttributes: function(node) {
var attributes = {};
// assume attributes are children of the first type 1 child
var childNode = node.firstChild;
var children, i, child, grandchildren, grandchild, name, value;
while(childNode) {
if(childNode.nodeType == 1) {
// attributes are type 1 children with one type 3 child
children = childNode.childNodes;
for(i=0; i<children.length; ++i) {
child = children[i];
if(child.nodeType == 1) {
grandchildren = child.childNodes;
if(grandchildren.length == 1) {
grandchild = grandchildren[0];
if(grandchild.nodeType == 3 ||
grandchild.nodeType == 4) {
name = (child.prefix) ?
child.nodeName.split(":")[1] :
child.nodeName;
value = grandchild.nodeValue.replace(
this.regExes.trimSpace, "");
attributes[name] = value;
}
} else {
// If child has no childNodes (grandchildren),
// set an attribute with null value.
// e.g. <prefix:fieldname/> becomes
// {fieldname: null}
attributes[child.nodeName.split(":").pop()] = null;
}
}
}
break;
}
childNode = childNode.nextSibling;
}
return attributes;
},
/**
* APIMethod: write
* Generate a GML document string given a list of features.
*
* Parameters:
* features - {Array(<OpenLayers.Feature.Vector>)} List of features to
* serialize into a string.
*
* Returns:
* {String} A string representing the GML document.
*/
write: function(features) {
if(!(OpenLayers.Util.isArray(features))) {
features = [features];
}
var gml = this.createElementNS("http://www.opengis.net/wfs",
"wfs:" + this.collectionName);
for(var i=0; i<features.length; i++) {
gml.appendChild(this.createFeatureXML(features[i]));
}
return OpenLayers.Format.XML.prototype.write.apply(this, [gml]);
},
/**
* Method: createFeatureXML
* Accept an OpenLayers.Feature.Vector, and build a GML node for it.
*
* Parameters:
* feature - {<OpenLayers.Feature.Vector>} The feature to be built as GML.
*
* Returns:
* {DOMElement} A node reprensting the feature in GML.
*/
createFeatureXML: function(feature) {
var geometry = feature.geometry;
var geometryNode = this.buildGeometryNode(geometry);
var geomContainer = this.createElementNS(this.featureNS,
this.featurePrefix + ":" +
this.geometryName);
geomContainer.appendChild(geometryNode);
var featureNode = this.createElementNS(this.gmlns,
"gml:" + this.featureName);
var featureContainer = this.createElementNS(this.featureNS,
this.featurePrefix + ":" +
this.layerName);
var fid = feature.fid || feature.id;
featureContainer.setAttribute("fid", fid);
featureContainer.appendChild(geomContainer);
for(var attr in feature.attributes) {
var attrText = this.createTextNode(feature.attributes[attr]);
var nodename = attr.substring(attr.lastIndexOf(":") + 1);
var attrContainer = this.createElementNS(this.featureNS,
this.featurePrefix + ":" +
nodename);
attrContainer.appendChild(attrText);
featureContainer.appendChild(attrContainer);
}
featureNode.appendChild(featureContainer);
return featureNode;
},
/**
* APIMethod: buildGeometryNode
*/
buildGeometryNode: function(geometry) {
if (this.externalProjection && this.internalProjection) {
geometry = geometry.clone();
geometry.transform(this.internalProjection,
this.externalProjection);
}
var className = geometry.CLASS_NAME;
var type = className.substring(className.lastIndexOf(".") + 1);
var builder = this.buildGeometry[type.toLowerCase()];
return builder.apply(this, [geometry]);
},
/**
* Property: buildGeometry
* Object containing methods to do the actual geometry node building
* based on geometry type.
*/
buildGeometry: {
// TBD retrieve the srs from layer
// srsName is non-standard, so not including it until it's right.
// gml.setAttribute("srsName",
// "http://www.opengis.net/gml/srs/epsg.xml#4326");
/**
* Method: buildGeometry.point
* Given an OpenLayers point geometry, create a GML point.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.Point>} A point geometry.
*
* Returns:
* {DOMElement} A GML point node.
*/
point: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:Point");
gml.appendChild(this.buildCoordinatesNode(geometry));
return gml;
},
/**
* Method: buildGeometry.multipoint
* Given an OpenLayers multipoint geometry, create a GML multipoint.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.MultiPoint>} A multipoint geometry.
*
* Returns:
* {DOMElement} A GML multipoint node.
*/
multipoint: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:MultiPoint");
var points = geometry.components;
var pointMember, pointGeom;
for(var i=0; i<points.length; i++) {
pointMember = this.createElementNS(this.gmlns,
"gml:pointMember");
pointGeom = this.buildGeometry.point.apply(this,
[points[i]]);
pointMember.appendChild(pointGeom);
gml.appendChild(pointMember);
}
return gml;
},
/**
* Method: buildGeometry.linestring
* Given an OpenLayers linestring geometry, create a GML linestring.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.LineString>} A linestring geometry.
*
* Returns:
* {DOMElement} A GML linestring node.
*/
linestring: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:LineString");
gml.appendChild(this.buildCoordinatesNode(geometry));
return gml;
},
/**
* Method: buildGeometry.multilinestring
* Given an OpenLayers multilinestring geometry, create a GML
* multilinestring.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.MultiLineString>} A multilinestring
* geometry.
*
* Returns:
* {DOMElement} A GML multilinestring node.
*/
multilinestring: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:MultiLineString");
var lines = geometry.components;
var lineMember, lineGeom;
for(var i=0; i<lines.length; ++i) {
lineMember = this.createElementNS(this.gmlns,
"gml:lineStringMember");
lineGeom = this.buildGeometry.linestring.apply(this,
[lines[i]]);
lineMember.appendChild(lineGeom);
gml.appendChild(lineMember);
}
return gml;
},
/**
* Method: buildGeometry.linearring
* Given an OpenLayers linearring geometry, create a GML linearring.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.LinearRing>} A linearring geometry.
*
* Returns:
* {DOMElement} A GML linearring node.
*/
linearring: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:LinearRing");
gml.appendChild(this.buildCoordinatesNode(geometry));
return gml;
},
/**
* Method: buildGeometry.polygon
* Given an OpenLayers polygon geometry, create a GML polygon.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.Polygon>} A polygon geometry.
*
* Returns:
* {DOMElement} A GML polygon node.
*/
polygon: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:Polygon");
var rings = geometry.components;
var ringMember, ringGeom, type;
for(var i=0; i<rings.length; ++i) {
type = (i==0) ? "outerBoundaryIs" : "innerBoundaryIs";
ringMember = this.createElementNS(this.gmlns,
"gml:" + type);
ringGeom = this.buildGeometry.linearring.apply(this,
[rings[i]]);
ringMember.appendChild(ringGeom);
gml.appendChild(ringMember);
}
return gml;
},
/**
* Method: buildGeometry.multipolygon
* Given an OpenLayers multipolygon geometry, create a GML multipolygon.
*
* Parameters:
* geometry - {<OpenLayers.Geometry.MultiPolygon>} A multipolygon
* geometry.
*
* Returns:
* {DOMElement} A GML multipolygon node.
*/
multipolygon: function(geometry) {
var gml = this.createElementNS(this.gmlns, "gml:MultiPolygon");
var polys = geometry.components;
var polyMember, polyGeom;
for(var i=0; i<polys.length; ++i) {
polyMember = this.createElementNS(this.gmlns,
"gml:polygonMember");
polyGeom = this.buildGeometry.polygon.apply(this,
[polys[i]]);
polyMember.appendChild(polyGeom);
gml.appendChild(polyMember);
}
return gml;
},
/**
* Method: buildGeometry.bounds
* Given an OpenLayers bounds, create a GML box.
*
* Parameters:
* bounds - {<OpenLayers.Geometry.Bounds>} A bounds object.
*
* Returns:
* {DOMElement} A GML box node.
*/
bounds: function(bounds) {
var gml = this.createElementNS(this.gmlns, "gml:Box");
gml.appendChild(this.buildCoordinatesNode(bounds));
return gml;
}
},
/**
* Method: buildCoordinates
* builds the coordinates XmlNode
* (code)
* <gml:coordinates decimal="." cs="," ts=" ">...</gml:coordinates>
* (end)
*
* Parameters:
* geometry - {<OpenLayers.Geometry>}
*
* Returns:
* {XmlNode} created xmlNode
*/
buildCoordinatesNode: function(geometry) {
var coordinatesNode = this.createElementNS(this.gmlns,
"gml:coordinates");
coordinatesNode.setAttribute("decimal", ".");
coordinatesNode.setAttribute("cs", ",");
coordinatesNode.setAttribute("ts", " ");
var parts = [];
if(geometry instanceof OpenLayers.Bounds){
parts.push(geometry.left + "," + geometry.bottom);
parts.push(geometry.right + "," + geometry.top);
} else {
var points = (geometry.components) ? geometry.components : [geometry];
for(var i=0; i<points.length; i++) {
parts.push(points[i].x + "," + points[i].y);
}
}
var txtNode = this.createTextNode(parts.join(" "));
coordinatesNode.appendChild(txtNode);
return coordinatesNode;
},
CLASS_NAME: "OpenLayers.Format.GML"
}); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/grid/enhanced/nls/ro/Filter.js | define(
"dojox/grid/enhanced/nls/ro/Filter", //begin v1.x content
({
"clearFilterDialogTitle": "Ştergere filtru",
"filterDefDialogTitle": "Filtru",
"ruleTitleTemplate": "Regulă ${0}",
"conditionEqual": "egal",
"conditionNotEqual": "nu este egal",
"conditionLess": "este mai mic decât",
"conditionLessEqual": "mai mic sau egal",
"conditionLarger": "este mai mare decât",
"conditionLargerEqual": "mai mare sau egal",
"conditionContains": "conţine",
"conditionIs": "este",
"conditionStartsWith": "începe cu",
"conditionEndWith": "se termină cu",
"conditionNotContain": "nu conţine",
"conditionIsNot": "nu este",
"conditionNotStartWith": "nu începe cu",
"conditionNotEndWith": "nu se termină cu",
"conditionBefore": "înaintea",
"conditionAfter": "după",
"conditionRange": "interval",
"conditionIsEmpty": "este gol",
"all": "toate",
"any": "oricare",
"relationAll": "toate regulile",
"waiRelAll": "Răspundeţi tuturor regulilor următoare:",
"relationAny": "oricare reguli",
"waiRelAny": "Răspundeţi oricărei dintre regulile următoare:",
"relationMsgFront": "Răspuns",
"relationMsgTail": "",
"and": "şi",
"or": "sau",
"addRuleButton": "Adăugare regulă",
"waiAddRuleButton": "Adăugare regulă nouă",
"removeRuleButton": "Înlăturare regulă",
"waiRemoveRuleButtonTemplate": "Înlăturare regulă ${0}",
"cancelButton": "Anulare",
"waiCancelButton": "Anulaţi acest dialog",
"clearButton": "Ştergere",
"waiClearButton": "Ştergeţi filtrul",
"filterButton": "Filtru",
"waiFilterButton": "Lansaţi în execuţie filtrul",
"columnSelectLabel": "Coloană",
"waiColumnSelectTemplate": "Coloană pentru regulă ${0}",
"conditionSelectLabel": "Condiţie",
"waiConditionSelectTemplate": "Condiţie pentru regula ${0}",
"valueBoxLabel": "Valoare",
"waiValueBoxTemplate": "Introduceţi valoarea pentru filtrarea pentru regulă ${0}",
"rangeTo": "la",
"rangeTemplate": "din ${0} la ${1}",
"statusTipHeaderColumn": "Coloană",
"statusTipHeaderCondition": "Reguli",
"statusTipTitle": "Bară de filtru",
"statusTipMsg": "Faceţi clic pe bara de filtru aici pentru a filtra valorile în ${0}.",
"anycolumn": "orice coloană",
"statusTipTitleNoFilter": "Bară de filtru",
"statusTipTitleHasFilter": "Filtru",
"statusTipRelAny": "Potrivire orice regulă.",
"statusTipRelAll": "Potrivire toate regulile.",
"defaultItemsName": "articole",
"filterBarMsgHasFilterTemplate": "${0} din ${1} ${2} afişate.",
"filterBarMsgNoFilterTemplate": "Niciun filtru nu este aplicat",
"filterBarDefButton": "Definire filtru",
"waiFilterBarDefButton": "Filtrare tabelă",
"a11yFilterBarDefButton": "Filtru...",
"filterBarClearButton": "Ştergere filtru",
"waiFilterBarClearButton": "Ştergeţi filtrul",
"closeFilterBarBtn": "Închidere bară de filtru",
"clearFilterMsg": "Aceasta va înlătura filtrul şi va afişa toate înregistrările disponibile.",
"anyColumnOption": "Orice coloană",
"trueLabel": "Adevărat",
"falseLabel": "Fals"
})
//end v1.x content
); | PypiClean |
/Deeplodocus-0.3.0-py3-none-any.whl/deeplodocus/utils/generic_utils.py | import os
from deeplodocus.utils import get_main_path
import re
import pkgutil
import random
import string
from typing import List
from typing import Union
from typing import Optional
# Import deeplodocus flags
from deeplodocus.flags.load_as import *
from deeplodocus.flags.ext import *
from deeplodocus.flags.notif import *
# Import deeplodocus utils
from deeplodocus.utils.namespace import Namespace
from deeplodocus.utils.notification import Notification
def convert(value, d_type=None):
"""
Convert a value or list of values to data type in order of preference: (float, bool, str)
:param value: value to convert
:param d_type: data type to convert to
:return: converted value
"""
if value is None:
return None
elif d_type is None:
if value is None:
return None
elif isinstance(value, list):
return [convert(item) for item in value]
else:
new_value = convert2float(value)
if new_value is not None:
if round(new_value, 0) == new_value:
return int(new_value)
else:
return new_value
if isinstance(new_value, str) and new_value.lower() in ["true", "false"]:
new_value = convert2bool(value)
if new_value is not None:
return new_value
return str(value)
elif d_type is str:
return str(value)
elif d_type is int:
return convert2int(value)
elif d_type is float:
return convert2float(value)
elif d_type is bool:
return convert2bool(value)
elif d_type is dict:
try:
return convert_namespace(value)
except AttributeError:
return None
elif isinstance(d_type, dict):
new_value = {}
for key, item in d_type.items():
try:
new_value[key] = convert(value[key], d_type=item)
except KeyError:
new_value[key] = None
except TypeError:
return None
return Namespace(new_value)
elif isinstance(d_type, list):
value = value if isinstance(value, list) else [value]
new_value = []
for item in value:
new_item = convert(item, d_type[0])
if new_item is None:
return None
else:
new_value.append(new_item)
return new_value
def convert2int(value):
try:
return int(eval(value))
except (ValueError, TypeError, SyntaxError, NameError):
pass
try:
return int(value)
except (ValueError, TypeError):
return None
def convert2float(value):
try:
return float(eval(value))
except (ValueError, TypeError, SyntaxError, NameError):
pass
try:
return float(value)
except (ValueError, TypeError):
return None
def convert2bool(value):
try:
return bool(value)
except TypeError:
return None
def list_namespace2list_dict(namespaces: List[Namespace]) -> List[dict]:
list_dicts = list()
for i in namespaces:
list_dicts.append(i.get_all())
return list_dicts
def convert_namespace(namespace):
"""
AUTHORS:
--------
:author: Samuel Westlake
DESCRIPTION:
------------
Converts each value in a namespace to the most appropriate data type
PARAMETERS:
-----------
:param namespace: a given namespace to convert the values of
RETURN:
-------
:return: the namespace with each value converted to a sensible data type
"""
for key, value in namespace.get().items():
namespace.get()[key] = convert(value)
return namespace
def sorted_nicely(l):
""" Sorts the given iterable in the way that is expected.
Required arguments:
l -- The iterable to be sorted.
"""
to_convert = lambda text: int(text) if text.isdigit() else text
alpha_num_key = lambda key: [to_convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alpha_num_key)
def is_string_an_integer(string: str) -> bool:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check whether a string is an integer or not
PARAMETERS:
-----------
:param string(str): The string to analyze
RETURN:
-------
:return (bool): Whether the string is an integer or not
"""
try:
int(string)
return True
except ValueError:
return False
def get_int_or_float(data):
"""
AUTHORS:
--------
author: Alix Leroy
DESCRIPTION:
------------
Check whether the data is an integer or a float
PARAMETERS:
-----------
:param data: The data to check
RETURN:
-------
:return: The integer flag of the corresponding type or False if the data isn't a number
"""
if isinstance(data, list):
return False
elif isinstance(data, tuple):
return False
try:
number_as_float = float(data)
number_as_int = int(number_as_float)
return DEEP_LOAD_AS_INTEGER if number_as_float == number_as_int else DEEP_LOAD_AS_FLOAT
except ValueError:
return False
def is_np_array(data):
"""
AUTHORS:
--------
author: Alix Leroy
DESCRIPTION:
------------
Check whether the data is an numpy array or not
PARAMETERS:
-----------
:param data: The data to check
RETURN:
-------
:return: Whether the data is a numpy array or not
"""
try:
if data.endswith(DEEP_EXT_NPY) or data.endswith(DEEP_EXT_NPZ):
return True
except:
return False
def get_specific_module(name, module, silence=False, fatal=False):
"""
Author: Samuel Westlake
:param module: str: path to the module (separated by '.')
:param name: str: name of the item to be imported
:param silence: bool
:param fatal:
:return:
"""
local = {"module": None}
try:
exec("from %s import %s\nmodule = %s" % (module, name, name), {}, local)
except ImportError as e:
if not silence:
notif = DEEP_NOTIF_FATAL if fatal else DEEP_NOTIF_WARNING
# Capitalize first letter only (e.capitalize() seems to make subsequent letters lowercase)
e = str(e)[0].capitalize() + str(e)[1:]
Notification(notif, e)
return local["module"]
def get_module(name: str, module=None, browse=None, silence=True, fatal=False) -> Union[callable, None]:
"""
AUTHORS:
--------
:author: Alix Leroy
:author: Samuel Westlake
DESCRIPTION:
------------
Get a module from either a direct import or a folder browsing
PARAMETERS:
-----------
:param name: str: the name of the object to load
:param module: str: the name of the specific module
:param browse: dict: a DEEP_MODULE dictionary to browse through
:param silence: bool: whether or not to print import errors
:param fatal: bool: whether or not to raise DeepFatal on failure to find the module
RETURN:
-------
:return module(Union[callable, None]): The loaded module
"""
if module is not None:
return get_specific_module(name, module, silence=silence, fatal=fatal), module
elif browse is not None:
return browse_module(name, browse, silence=silence, fatal=fatal)
else:
return None, None
def browse_module(name, modules, silence=False, fatal=False) -> callable:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Load the desired module
Works with any python module, deeplodocus module or custom module
NOTE: Consider the name of the callable to be unique to avoid conflict
PARAMETERS:
-----------
:param modules: dict
:param name: The name of the callable
RETURN:
-------
:return (callable, str): The loaded module and its correponding module path
"""
list_modules = []
# For all the given modules
for key, value in modules.items():
# For all the sub-modules available in the main module
for importer, module_path, ispkg in pkgutil.walk_packages(
path=value["path"],
prefix=value["prefix"] + '.',
onerror=lambda x: None
):
#
# Fix the loading a of useless torch module(temporary)
#
# ISSUE WITH TORCH 1.0.0
if module_path == "torch.nn.parallel.distributed_c10d":
continue
# ISSUE WITH TORCH 1.2.0
elif module_path =="torch.nn._intrinsic.qat" or module_path == "torch.nn.qat":
continue
# Try to get the module
module = get_specific_module(name, module_path, silence=silence, fatal=fatal)
# If the module exists add it to the list
if module is not None:
list_modules.append({"module path": module_path, "module": module})
# Remove duplicates
modules = []
module_paths = []
for item in list_modules:
if item["module"] not in modules:
modules.append(item["module"])
module_paths.append(item["module path"])
if not list_modules:
return None, None
elif len(modules) == 1:
return modules[0], module_paths[0]
else:
module, module_path = select_module(
name=name,
modules=modules,
module_paths=module_paths
)
return module, module_path
def select_module(name: str, modules: list, module_paths: list) -> callable:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Select the desired module among a list of similar names
PARAMETERS:
-----------
:param name: (str): Name of the module
:param modules: list: List of modules to select from
:param module_paths: list: list of module paths to select from
RETURN:
-------
:return: The desired module(callable)
"""
Notification(DEEP_NOTIF_WARNING, "The module '%s' was found in multiple locations :" % name)
# Print the list of modules and their corresponding indices
for i, path in enumerate(module_paths):
Notification(DEEP_NOTIF_WARNING, "%i : %s from %s" % (i, name, path))
# Prompt the user to pick on from the list
response = -1
while response < 0 or response >= len(modules):
response = Notification(DEEP_NOTIF_INPUT, "Which one would you prefer to use ? (Pick a number)").get()
# Check if the response is an integer
if is_string_an_integer(response) is False:
response = -1
else:
response = int(response)
return modules[response], module_paths[response]
def generate_random_alphanumeric(size: int = 16) -> str:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Generate a string of alphanumeric characters of a specific size
The default size is 16 characters
PARAMETERS:
-----------
:param size: int: The size of the alphanumeric string
RETURN:
-------
:return (string): The random alphanumeric string
"""
return''.join(random.choices(string.ascii_letters + string.digits, k=size))
def get_corresponding_flag(
flag_list: List[Flag],
info: Union[str, int, Flag],
fatal: bool = True,
default: Optional[Flag] = None
) -> Union[Flag, None]:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Browse the wanted flag among a list
If no flag corresponds, raise a DeepError
PARAMETERS:
-----------
:param flag_list: (List[Flag]): The list of flag to browse in
:param info: (Union[str, int, Flag]): Info (name, index or full Flag) of the flag to search
:param fatal: (bool, Optional): Whether to raise a DeepError if no flag is found or not
:param default: (Flag, Optional) : The default flag to use if no flag is found
RETURN:
-------
:return : The corresponding flag
"""
# Search in the flag list
for flag in flag_list:
if flag.corresponds(info=info) is True:
return flag
# If no flag is found
if default is not None:
Notification(DEEP_NOTIF_WARNING,
"The following flag does not exist : %s, the default one %s has been selected instead"
% (str(info), default.get_description()))
return default
# If no default
if fatal is True:
Notification(DEEP_NOTIF_FATAL, "No flag with the info '%s' was found in the following list : %s"
% (str(info), str([flag.description for flag in flag_list])))
else:
return None | PypiClean |
/LabExT_pkg-2.2.0.tar.gz/LabExT_pkg-2.2.0/LabExT/View/Controls/DriverPathDialog.py | import json
import os
import logging
from pathlib import Path
from tkinter import Toplevel, Label, Button, Entry, messagebox
from LabExT.Utils import get_configuration_file_path
from LabExT.View.Controls.CustomFrame import CustomFrame
class DriverPathDialog(Toplevel):
"""
Dialog to change a driver path to be saved in a specified file.
"""
def __init__(
self,
parent,
settings_file_path,
title=None,
label=None,
hint=None
) -> None:
"""
Constructor.
settings_file_path must be relative to the LabExT settings folder.
"""
Toplevel.__init__(self, parent)
self.title(title)
self.logger = logging.getLogger()
self._label = label
self._hint = hint
self._settings_file_path = get_configuration_file_path(
settings_file_path)
self._driver_path = None
self._path_has_changed = False
self._driver_path_entry = None
self.__setup__()
def __setup__(self):
"""
Builds Dialog.
"""
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
path_frame = CustomFrame(self)
path_frame.title = self._label
path_frame.grid(row=1, column=0, padx=5, pady=5, sticky='nswe')
path_frame.columnconfigure(0, weight=1)
path_frame.rowconfigure(0, weight=1)
Label(
path_frame,
text=self._hint
).grid(row=0, column=0, padx=5, pady=5, sticky='nswe')
self._driver_path_entry = Entry(path_frame)
self._driver_path_entry.insert(0, self.driver_path)
self._driver_path_entry.grid(
row=1, column=0, padx=5, pady=5, sticky='nswe')
self._cancel_button = Button(
self,
text="Discard and Close",
command=self.destroy,
width=30,
height=1
)
self._cancel_button.grid(row=2, column=0, padx=5, pady=5, sticky='sw')
self._save_button = Button(
self,
text="Save and Close",
command=self._save,
width=30,
height=1
)
self._save_button.grid(row=2, column=0, padx=5, pady=5, sticky='se')
def _save(self) -> None:
"""
Callback, when user wants to save the Path.
"""
if not self._driver_path_entry:
return
user_given_path = str(self._driver_path_entry.get())
self.driver_path = str(Path(user_given_path.strip()))
self.destroy()
@property
def driver_path(self) -> str:
"""
Returns current driver path.
If None, the path is read from the settings file.
"""
if not self._driver_path:
self._driver_path = self._get_driver_path_from_file(
default="/path/to/module")
return self._driver_path
@property
def path_has_changed(self) -> bool:
"""
Returns True, if driver path has changed and False otherwise
"""
return self._path_has_changed
@driver_path.setter
def driver_path(self, path) -> None:
"""
Saves the given driver path in the settings file if it is not equal to the current path.
"""
if path == self.driver_path:
return
try:
with open(self._settings_file_path, 'w') as f:
json.dump(path, f)
self._path_has_changed = True
self._driver_path = path
except Exception as e:
messagebox.showerror(
"Error", "Could not save driver path: {}".format(e))
def _get_driver_path_from_file(
self,
default: str = None
) -> str:
"""
Reads the current driver path from settings path.
If file does not exists or is not readable,
the default value will be returned.
"""
if not os.path.exists(self._settings_file_path):
return default
try:
with open(self._settings_file_path, 'r') as f:
try:
return json.load(f)
except ValueError as err:
self.logger.error(
f"Failed to load JSON from {self._settings_file_path}: {err}")
return default
except IOError as err:
self.logger.error(
f"Failed to load driver settings file {self._settings_file_path}: {err}")
return default | PypiClean |
/Climind-0.1-py3-none-any.whl/climind/readers/reader_grace.py |
from pathlib import Path
import copy
import numpy as np
import pandas as pd
from typing import List
import climind.data_types.timeseries as ts
from climind.readers.generic_reader import get_last_modified_time
from climind.data_manager.metadata import CombinedMetadata
def find_latest(out_dir: Path, filename_with_wildcards: str) -> Path:
# look in directory to find all matching
filename_with_wildcards = filename_with_wildcards.replace('YYYYMMMM', '*')
list_of_files = list(out_dir.glob(filename_with_wildcards))
list_of_files.sort()
out_filename = list_of_files[-1]
return out_filename
def read_ts(out_dir: Path, metadata: CombinedMetadata, **kwargs):
filename_with_wildcards = metadata['filename'][0]
filename = find_latest(out_dir, filename_with_wildcards)
last_modified = get_last_modified_time(filename)
construction_metadata = copy.deepcopy(metadata)
construction_metadata.dataset['last_modified'] = [last_modified]
if metadata['type'] == 'timeseries':
if metadata['time_resolution'] == 'monthly':
return read_monthly_ts([filename], construction_metadata)
elif metadata['time_resolution'] == 'annual':
return read_annual_ts([filename], construction_metadata)
else:
raise KeyError(f'That time resolution is not known: {metadata["time_resolution"]}')
pass
def read_monthly_ts(filename: List[Path], metadata: CombinedMetadata, **kwargs):
lines_to_skip = 31
if 'first_difference' in kwargs:
first_diff = kwargs['first_difference']
else:
first_diff = False
dates = []
years = []
months = []
uncertainties = []
data = []
with open(filename[0], 'r') as in_file:
for _ in range(lines_to_skip):
in_file.readline()
for line in in_file:
columns = line.split()
decimal_year = float(columns[0])
year_int = int(decimal_year)
diny = 1 + int(365. * (decimal_year - year_int))
month = int(np.rint(12. * (decimal_year - year_int) + 1.0))
dates.append(f'{year_int} {diny:03d}')
years.append(year_int)
months.append(month)
data.append(float(columns[1]))
uncertainties.append(float(columns[2]))
dates = pd.to_datetime(dates, format='%Y %j')
years2 = dates.year.tolist()
months2 = dates.month.tolist()
dico = {'year': years, 'month': months, 'data': data}
df = pd.DataFrame(dico)
if first_diff:
df['data'] = df.diff()['data']
data = df['data'].values.tolist()
metadata.creation_message()
return ts.TimeSeriesMonthly(years2, months2, data, metadata=metadata, uncertainty=uncertainties)
def read_annual_ts(filename: List[Path], metadata: CombinedMetadata, **kwargs):
monthly = read_monthly_ts(filename, metadata, **kwargs)
annual = monthly.make_annual_by_selecting_month(8)
return annual | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/__init__.py | from JumpScale import j
import os
def _setup_stacktrace_hook():
'''Set up SIGUSR2 signal handler which dumps stack traces of all threads'''
try:
import signal
except ImportError:
# No signal support on current platform, ignore
return
sig = signal.SIGUSR2
def stderr():
'''Coroutine which writes input to sys.stderr and a dump file,
/tmp/pm_<PID>.stack'''
outputs = list()
try:
import sys
outputs.append((sys.stderr.write, sys.stderr.flush, lambda: None, ))
except Exception:
pass
try:
import os
name = '/tmp/pm_%d.stack' % os.getpid()
fd = open(name, 'w')
outputs.append((fd.write, fd.flush, fd.close, ))
except Exception:
pass
try:
while True:
message = yield
if message is None:
break
for write, flush, _ in outputs:
try:
write(message)
flush()
except Exception:
pass
finally:
for _, _, close in outputs:
try:
close()
except Exception:
pass
def getframes(output, frame):
'''Get a list of all current frames
This function tries to use sys._current_frames to get a list of the
frames of every running thread and their thread ID. If this function is
not available, the given frame will be returned using the string
'<unknown>' as thread ID.
'''
if j.application.skipTraceback:
return None
import sys
# Using sys._current_frames for now
# We could rewrite this using ctypes as well, see the implementation of
# _PyThread_CurrentFrames at
# http://svn.python.org/projects/python/trunk/Python/pystate.c
current_frames = getattr(sys, '_current_frames', None)
if not current_frames:
output('Your system has no support to dump stacks of all threads\n')
output('Only dumping interrupted frame\n')
return (('<current>', frame, ), )
else:
return tuple(current_frames().items())
def dump_proc_status(output):
import os.path
procfile = '/proc/%d/status' % os.getpid()
if not os.path.exists(procfile):
# File doesn't exist, we're not running on Linux or something alike
return
try:
fd = open(procfile, 'r')
except Exception:
# No permissions or something alike?
# Funny if a process would have no permission on its own status proc
# file, but anyway, better safe than sorry
return
try:
data = fd.read()
finally:
fd.close()
output('Dumping content of %s\n' % procfile)
output('\n')
output(data)
output('\n')
def handler_impl(output, num, frame):
'''Implementation of the signal handler
This will be called inside a try/except clause so the signal handler
behaves correctly.
'''
import traceback
output('Got signal %s\n' % str(num))
output('Dumping current stack frame(s)\n')
frames = getframes(output, frame)
output('\n')
try:
from threading import _active as active_threads
except ImportError:
active_threads = dict()
for threadid, frame in frames:
title = None
if threadid in active_threads:
try:
name = active_threads[threadid].getName()
except Exception:
pass
else:
if name:
title = 'Thread %s (%s)' % (name, str(threadid))
if not title:
title = 'Thread %s' % str(threadid)
output('%s\n%s\n' % (title, '=' * len(title)))
try:
import thread
get_ident = thread.get_ident
except (ImportError, AttributeError):
get_ident = lambda: object()
ident = get_ident()
if threadid == get_ident():
# We want to strip of ourself from the stacktrace
orig_frame = frame
while frame:
# If we found the frame of this 'handler' function
if frame.f_code == handler.func_code:
# Go one frame up and return
frame = frame.f_back
break
# Else go up one more frame
frame = frame.f_back
# If we were not able to find the stackframe we were looking
# for, just use the original one
if not frame:
frame = orig_frame
# Format and print backtrace
stack = ''.join(traceback.format_stack(frame))
output(stack)
output('\n')
try:
dump_proc_status(output)
except Exception:
pass
def handler(num, frame):
'''Signal handler which dumps Python stacks of all running threads'''
output = stderr()
output.next()
output = output.send
try:
handler_impl(output, num, frame)
except Exception as e:
output('An exception occurred while handling signal %d\n' % num)
output('Exception information:\n')
output('%s\n\n' % str(e))
try:
output(None)
except StopIteration:
pass
# Install signal handler, if none set
# Check whether a handler is set
orig_handler = signal.getsignal(sig)
if orig_handler != signal.SIG_DFL:
return
# Set up handler
old = signal.signal(sig, handler)
# Set up our signal handler
try:
_setup_stacktrace_hook()
except Exception as e:
pass
# Remove the no longer needed function
del _setup_stacktrace_hook
from .Dirs import Dirs
j.dirs=Dirs()
from . import logging
from .Application import Application
from . import system
j.system.installtools=j.base.fs.installtools
from . import enumerators
j.application=Application()
from . import base
from . import baseclasses
from JumpScale.core.baseclasses.BaseEnumeration import enumerations
j.enumerators=enumerations
from PlatformTypes import PlatformTypes
j.system.platformtype=PlatformTypes()
from . import pmtypes
pmtypes.register_types()
j.basetype=pmtypes.register_types()
from . import errorhandling
# j.pm_hooked_extension_dirs = dict()
import JumpScale.baselib.platforms
import JumpScale.core.config
import JumpScale.baselib.hrd
import JumpScale.baselib.startupmanager
from . import shellconfig
from . import console
from . import gui
import JumpScale.baselib.jsdeveltools
#reinit whoAmI after dirs are loaded
j.application.initWhoAmI()
# from extensions.PMExtensions import PMExtensions
# from JumpScale.core.JumpScale import JumpScale
# class InteractiveExtensions(PMExtensionsGroup):
# def __init__(self):
# self._init=False
# self.__init_properties__()
# self.pm_name="i"
# self.pm_location="i"
# def _initExtensions(self):
# if self._init==False:
# self._pmExtensions = PMExtensions(self, 'i.', suppressAlreadyMountedError=True)
# ##self._pmExtensions.load(j.system.fs.joinPaths(self.dirs.extensionsDir,"interactive"))
# self._pmExtensions.load(j.system.fs.joinPaths(j.dirs.extensionsDir,"core"))
# self._init=True
# def extensionsLoad(self,extensionsDir):
# self._pmExtensions.load(extensionsDir) | PypiClean |
/EmbyPy-0.6.6.4.tar.gz/EmbyPy-0.6.6.4/embypy/objects/videos.py | from embypy.objects.object import EmbyObject
from embypy.utils.asyncio import async_func
# Generic class
class Video(EmbyObject):
'''Class representing generic emby video objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector)
@property
def aspect_ratio(self):
'''aspect ratio of the video'''
return self.object_dict.get('AspectRatio', 0.0)
@property
def chapters(self):
'''chapters included in the video file'''
return self.object_dict.get('Chapters')
@property
def stream_url(self):
'''stream url (as an mp4)'''
path = '/Videos/{}/stream.mp4'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
# Videos
class Movie(Video):
'''Class representing movie objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector)
@property
def premiere_date(self):
'''date that the movie permiered'''
return self.object_dict.get('PremiereDate')
class Episode(Video):
'''Class representing episode objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector)
@property
def premiere_date(self):
'''date that the episode permiered'''
return self.object_dict.get('PremiereDate')
@property
def index_number(self):
'''the episode number (in the season)'''
return self.object_dict.get('IndexNumber', 1)
@index_number.setter
def index_number(self, value):
self.object_dict['IndexNumber'] = value
@property
def episode_number(self):
'''the episode number (in the season)'''
return self.index_number
@episode_number.setter
def episode_number(self, value):
self.index_number = value
@property
def season_id(self):
'''season name'''
return self.object_dict.get('SeasonId')
@property
def season_name(self):
'''season name'''
return self.object_dict.get('SeasonName')
@property
def season_number(self):
'''season index'''
return self.object_dict.get('ParentIndexNumber')
@season_id.setter
def season_id(self, value):
self.object_dict['SeasonId'] = value
@property
@async_func
async def season(self):
'''Season that episode is a part of
|coro|
Returns
-------
:class:`embypy.objects.Season`
'''
return await self.process(self.season_id)
@property
def series_id(self):
'''The emby id of the series this episode belongs to'''
return self.object_dict.get('SeriesId')
@property
@async_func
async def series(self):
'''Series that episode is a part of
|coro|
Returns
-------
:class:`embypy.objects.Series`
'''
return await self.process(self.series_id)
@property
@async_func
async def show(self):
return await self.series
@property
def series_name(self):
'''name of the season'''
return self.object_dict.get('SeriesName')
@property
def genres(self):
'''genres for the show'''
return self.object_dict.get('SeriesGenres', [])
class Trailer(Video):
'''Class representing trailer objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector)
class AdultVideo(Video):
'''Class representing adult vidoe objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector)
class MusicVideo(Video):
'''Class representing music video objects
Parameters
----------
object_dict : dict
same as for `EmbyObject`
connector : embypy.utils.connector.Connector
same as for `EmbyObject`
'''
def __init__(self, object_dict, connector):
super().__init__(object_dict, connector) | PypiClean |
/Draugr-1.0.9.tar.gz/Draugr-1.0.9/draugr/metrics/metric_collection.py |
__author__ = "Christian Heider Nielsen"
from typing import Sequence, MutableMapping, Any
from draugr.metrics.metric_aggregator import MetricAggregator, MEASURES
__all__ = ["MetricCollection"]
class MetricCollection(dict):
"""description"""
def __init__(
self,
metrics=("signal", "length"),
measures=MEASURES,
keep_measure_history=True,
use_disk_cache=True,
):
super().__init__()
self._metrics = {}
self._measures = measures
self._keep_measure_history = keep_measure_history
self._use_disk_cache = use_disk_cache
for metric in metrics:
self._metrics[metric] = MetricAggregator(
measures=self._measures,
keep_measure_history=self._keep_measure_history,
use_disk_cache=self._use_disk_cache,
)
def add_metric(self, name):
"""
:param name:
:type name:"""
self._metrics[name] = MetricAggregator(
measures=self._measures, keep_measure_history=self._keep_measure_history
)
def append(self, *args: Sequence[Any], **kwargs: MutableMapping[str, Any]):
"""
:param args:
:type args:
:param kwargs:
:type kwargs:"""
for arg, (k, v) in zip(args, self._metrics.items()):
self._metrics[k].append(arg)
for k, v in kwargs:
self._metrics[k].append(v)
def remove_metric(self, name):
"""
:param name:
:type name:"""
del self._metrics[name]
def __len__(self):
return len(self._metrics)
@property
def metrics(self):
"""
:return:
:rtype:"""
return self._metrics
def __getattr__(self, name):
return self.__getitem__(name)
def __repr__(self):
return f"<StatisticCollection> {self._metrics} </StatisticCollection>"
def __str__(self):
return self.__repr__()
def __iter__(self):
return self.metrics
def __getitem__(self, name):
if name in self._metrics:
return self._metrics[name]
else:
# return self.add_metric(name)
raise AttributeError
# def __setitem__(self, key, value):
# if key in self._metrics:
# if self._keep_measure_history:
# self._metrics[key].append(value)
# else:
# self._metrics[key] = value
# else:
# self.add_metric(key)
# self.append({key:value})
def keys(self):
"""
:return:
:rtype:"""
return self.metrics.keys()
def __contains__(self, item):
return item in self.metrics
def items(self):
"""
:return:
:rtype:"""
return self.metrics.items()
def save(self, **kwargs: MutableMapping):
"""
:param kwargs:
:type kwargs:"""
for key, value in self._metrics.items():
value.save(stat_name=key, **kwargs)
if __name__ == "__main__":
stats = MetricCollection(keep_measure_history=False)
stats2 = MetricCollection(keep_measure_history=True)
for i in range(10):
stats.signal.append(i)
stats2.signal.append(i)
print(stats)
print(stats.signal)
print(stats.length)
print(stats.length.measures)
print(stats.signal.measures)
print(stats.signal.variance)
print(stats.signal.calc_moving_average())
print(stats.signal.max)
print(stats.signal.min)
print("\n")
print(stats2)
print(stats2.signal.min) | PypiClean |
/Brian2-2.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/brian2/input/spikegeneratorgroup.py | import numpy as np
from brian2.core.functions import timestep
from brian2.core.spikesource import SpikeSource
from brian2.core.variables import Variables
from brian2.groups.group import CodeRunner, Group
from brian2.units.allunits import second
from brian2.units.fundamentalunits import Quantity, check_units
from brian2.utils.logger import get_logger
__all__ = ["SpikeGeneratorGroup"]
logger = get_logger(__name__)
class SpikeGeneratorGroup(Group, CodeRunner, SpikeSource):
"""
SpikeGeneratorGroup(N, indices, times, dt=None, clock=None,
period=0*second, when='thresholds', order=0,
sorted=False, name='spikegeneratorgroup*',
codeobj_class=None)
A group emitting spikes at given times.
Parameters
----------
N : int
The number of "neurons" in this group
indices : array of integers
The indices of the spiking cells
times : `Quantity`
The spike times for the cells given in ``indices``. Has to have the
same length as ``indices``.
period : `Quantity`, optional
If this is specified, it will repeat spikes with this period. A
period of 0s means not repeating spikes.
dt : `Quantity`, optional
The time step to be used for the simulation. Cannot be combined with
the `clock` argument.
clock : `Clock`, optional
The update clock to be used. If neither a clock, nor the `dt` argument
is specified, the `defaultclock` will be used.
when : str, optional
When to run within a time step, defaults to the ``'thresholds'`` slot.
See :ref:`scheduling` for possible values.
order : int, optional
The priority of of this group for operations occurring at the same time
step and in the same scheduling slot. Defaults to 0.
sorted : bool, optional
Whether the given indices and times are already sorted. Set to ``True``
if your events are already sorted (first by spike time, then by index),
this can save significant time at construction if your arrays contain
large numbers of spikes. Defaults to ``False``.
Notes
-----
* If `sorted` is set to ``True``, the given arrays will not be copied
(only affects runtime mode)..
"""
@check_units(N=1, indices=1, times=second, period=second)
def __init__(
self,
N,
indices,
times,
dt=None,
clock=None,
period=0 * second,
when="thresholds",
order=0,
sorted=False,
name="spikegeneratorgroup*",
codeobj_class=None,
):
Group.__init__(self, dt=dt, clock=clock, when=when, order=order, name=name)
# We store the indices and times also directly in the Python object,
# this way we can use them for checks in `before_run` even in standalone
# TODO: Remove this when the checks in `before_run` have been moved to the template
#: Array of spiking neuron indices.
self._neuron_index = None
#: Array of spiking neuron times.
self._spike_time = None
#: "Dirty flag" that will be set when spikes are changed after the
#: `before_run` check
self._spikes_changed = True
# Let other objects know that we emit spikes events
self.events = {"spike": None}
self.codeobj_class = codeobj_class
if N < 1 or int(N) != N:
raise TypeError("N has to be an integer >=1.")
N = int(N)
self.start = 0
self.stop = N
self.variables = Variables(self)
self.variables.create_clock_variables(self._clock)
indices, times = self._check_args(
indices, times, period, N, sorted, self._clock.dt
)
self.variables.add_constant("N", value=N)
self.variables.add_array(
"period",
dimensions=second.dim,
size=1,
constant=True,
read_only=True,
scalar=True,
dtype=self._clock.variables["t"].dtype,
)
self.variables.add_arange("i", N)
self.variables.add_dynamic_array(
"spike_number",
values=np.arange(len(indices)),
size=len(indices),
dtype=np.int32,
read_only=True,
constant=True,
index="spike_number",
unique=True,
)
self.variables.add_dynamic_array(
"neuron_index",
values=indices,
size=len(indices),
dtype=np.int32,
index="spike_number",
read_only=True,
constant=True,
)
self.variables.add_dynamic_array(
"spike_time",
values=times,
size=len(times),
dimensions=second.dim,
index="spike_number",
read_only=True,
constant=True,
dtype=self._clock.variables["t"].dtype,
)
self.variables.add_dynamic_array(
"_timebins",
size=len(times),
index="spike_number",
read_only=True,
constant=True,
dtype=np.int32,
)
self.variables.add_array(
"_period_bins",
size=1,
constant=True,
read_only=True,
scalar=True,
dtype=np.int32,
)
self.variables.add_array("_spikespace", size=N + 1, dtype=np.int32)
self.variables.add_array(
"_lastindex", size=1, values=0, dtype=np.int32, read_only=True, scalar=True
)
#: Remember the dt we used the last time when we checked the spike bins
#: to not repeat the work for multiple runs with the same dt
self._previous_dt = None
CodeRunner.__init__(
self,
self,
code="",
template="spikegenerator",
clock=self._clock,
when=when,
order=order,
name=None,
)
# Activate name attribute access
self._enable_group_attributes()
self.variables["period"].set_value(period)
def _full_state(self):
state = super()._full_state()
# Store the internal information we use to decide whether to rebuild
# the time bins
state["_previous_dt"] = self._previous_dt
state["_spikes_changed"] = self._spikes_changed
return state
def _restore_from_full_state(self, state):
state = state.copy() # copy to avoid errors for multiple restores
self._previous_dt = state.pop("_previous_dt")
self._spikes_changed = state.pop("_spikes_changed")
super()._restore_from_full_state(state)
def before_run(self, run_namespace):
# Do some checks on the period vs. dt
dt = self.dt_[:] # make a copy
period = self.period_
if period < np.inf and period != 0:
if period < dt:
raise ValueError(
f"The period of '{self.name}' is {self.period[:]!s}, "
f"which is smaller than its dt of {dt*second!s}."
)
if self._spikes_changed:
current_t = self.variables["t"].get_value().item()
timesteps = timestep(self._spike_time, dt)
current_step = timestep(current_t, dt)
in_the_past = np.nonzero(timesteps < current_step)[0]
if len(in_the_past):
logger.warn(
"The SpikeGeneratorGroup contains spike times "
"earlier than the start time of the current run "
f"(t = {current_t*second!s}), these spikes will be "
"ignored.",
name_suffix="ignored_spikes",
)
self.variables["_lastindex"].set_value(in_the_past[-1] + 1)
else:
self.variables["_lastindex"].set_value(0)
# Check that we don't have more than one spike per neuron in a time bin
if self._previous_dt is None or dt != self._previous_dt or self._spikes_changed:
# We shift all the spikes by a tiny amount to make sure that spikes
# at exact multiples of dt do not end up in the previous time bin
# This shift has to be quite significant relative to machine
# epsilon, we use 1e-3 of the dt here
shift = 1e-3 * dt
timebins = np.asarray(
np.asarray(self._spike_time + shift) / dt, dtype=np.int32
)
# time is already in sorted order, so it's enough to check if the condition
# that timebins[i]==timebins[i+1] and self._neuron_index[i]==self._neuron_index[i+1]
# is ever both true
if (
np.logical_and(np.diff(timebins) == 0, np.diff(self._neuron_index) == 0)
).any():
raise ValueError(
f"Using a dt of {self.dt!s}, some neurons of "
f"SpikeGeneratorGroup '{self.name}' spike more than "
"once during a time step."
)
self.variables["_timebins"].set_value(timebins)
period_bins = np.round(period / dt)
max_int = np.iinfo(np.int32).max
if period_bins > max_int:
logger.warn(
f"Periods longer than {max_int} timesteps "
f"(={max_int*dt*second!s}) are not "
"supported, the period will therefore be "
"considered infinite. Set the period to 0*second "
"to avoid this "
"warning.",
"spikegenerator_long_period",
)
period = period_bins = 0
if np.abs(period_bins * dt - period) > period * np.finfo(dt.dtype).eps:
raise NotImplementedError(
f"The period of '{self.name}' is "
f"{self.period[:]!s}, which is "
"not an integer multiple of its dt "
f"of {dt*second!s}."
)
self.variables["_period_bins"].set_value(period_bins)
self._previous_dt = dt
self._spikes_changed = False
super().before_run(run_namespace=run_namespace)
@check_units(indices=1, times=second, period=second)
def set_spikes(self, indices, times, period=0 * second, sorted=False):
"""
set_spikes(indices, times, period=0*second, sorted=False)
Change the spikes that this group will generate.
This can be used to set the input for a second run of a model based on
the output of a first run (if the input for the second run is already
known before the first run, then all the information should simply be
included in the initial `SpikeGeneratorGroup` initializer call,
instead).
Parameters
----------
indices : array of integers
The indices of the spiking cells
times : `Quantity`
The spike times for the cells given in ``indices``. Has to have the
same length as ``indices``.
period : `Quantity`, optional
If this is specified, it will repeat spikes with this period. A
period of 0s means not repeating spikes.
sorted : bool, optional
Whether the given indices and times are already sorted. Set to
``True`` if your events are already sorted (first by spike time,
then by index), this can save significant time at construction if
your arrays contain large numbers of spikes. Defaults to ``False``.
"""
indices, times = self._check_args(
indices, times, period, self.N, sorted, self.dt
)
self.variables["period"].set_value(period)
self.variables["neuron_index"].resize(len(indices))
self.variables["spike_time"].resize(len(indices))
self.variables["spike_number"].resize(len(indices))
self.variables["spike_number"].set_value(np.arange(len(indices)))
self.variables["_timebins"].resize(len(indices))
self.variables["neuron_index"].set_value(indices)
self.variables["spike_time"].set_value(times)
# _lastindex and _timebins will be set as part of before_run
def _check_args(self, indices, times, period, N, sorted, dt):
times = Quantity(times)
if len(indices) != len(times):
raise ValueError(
"Length of the indices and times array must "
f"match, but {len(indices)} != {len(times)}"
)
if period < 0 * second:
raise ValueError("The period cannot be negative.")
elif len(times) and period != 0 * second:
period_bins = np.round(period / dt)
# Note: we have to use the timestep function here, to use the same
# binning as in the actual simulation
max_bin = timestep(np.max(times), dt)
if max_bin >= period_bins:
raise ValueError(
"The period has to be greater than the maximum of the spike times"
)
if len(times) and np.min(times) < 0 * second:
raise ValueError("Spike times cannot be negative")
if len(indices) and (np.min(indices) < 0 or np.max(indices) >= N):
raise ValueError(f"Indices have to lie in the interval [0, {int(N)}[")
times = np.asarray(times)
indices = np.asarray(indices)
if not sorted:
# sort times and indices first by time, then by indices
sort_indices = np.lexsort((indices, times))
indices = indices[sort_indices]
times = times[sort_indices]
# We store the indices and times also directly in the Python object,
# this way we can use them for checks in `before_run` even in standalone
# TODO: Remove this when the checks in `before_run` have been moved to the template
self._neuron_index = indices
self._spike_time = times
self._spikes_changed = True
return indices, times
@property
def spikes(self):
"""
The spikes returned by the most recent thresholding operation.
"""
# Note that we have to directly access the ArrayVariable object here
# instead of using the Group mechanism by accessing self._spikespace
# Using the latter would cut _spikespace to the length of the group
spikespace = self.variables["_spikespace"].get_value()
return spikespace[: spikespace[-1]]
def __repr__(self):
cls = self.__class__.__name__
size = self.variables["neuron_index"].size
return (
f"{cls}({self.N}, indices=<length {size} array>, times=<length"
f" {size} array>)"
) | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/fitbenchmarking/controllers/bumps_controller.py | from bumps.fitters import fit as bumpsFit
from bumps.names import Curve, FitProblem, PoissonCurve
import numpy as np
from fitbenchmarking.controllers.base_controller import Controller
from fitbenchmarking.cost_func.cost_func_factory import create_cost_func
from fitbenchmarking.utils.exceptions import MaxRuntimeError
class BumpsController(Controller):
"""
Controller for the Bumps fitting software.
Sasview requires a model to fit.
Setup creates a model with the correct function.
"""
algorithm_check = {
'all': ['amoeba',
'lm-bumps',
'newton',
'de',
'scipy-leastsq',
'dream'],
'ls': ['lm-bumps', 'scipy-leastsq'],
'deriv_free': ['amoeba', 'de', 'dream'],
'general': ['amoeba', 'newton', 'de', 'dream'],
'simplex': ['amoeba'],
'trust_region': ['lm-bumps', 'scipy-leastsq'],
'levenberg-marquardt': ['lm-bumps', 'scipy-leastsq'],
'gauss_newton': [],
'bfgs': ['newton'],
'conjugate_gradient': [],
'steepest_descent': [],
'global_optimization': ['de', 'dream']}
def __init__(self, cost_func):
"""
Extract param names for function setup
:param cost_func: Cost function object selected from options.
:type cost_func: subclass of
:class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`
"""
super().__init__(cost_func)
self._param_names = [name.replace('.', '_')
for name in self.problem.param_names]
self.support_for_bounds = True
self._func_wrapper = None
self._fit_problem = None
self.fit_order = None
self._status = None
self._bumps_result = None
# Need to map the minimizer to an internal one to avoid changing the
# minimizer in results
self._minimizer = ''
def setup(self):
# pylint: disable=exec-used,protected-access
"""
Setup problem ready to run with Bumps.
Creates a FitProblem for calling in the fit() function of Bumps
"""
# Bumps fails with the *args notation
param_name_str = ', '.join(self._param_names)
wrapper = f"def fitFunction(x, {param_name_str}):\n"
wrapper += f" return func([{param_name_str}], x=x)"
# Remove any function attribute. BinWidth is the only attribute in all
# FitBenchmark (Mantid) problems.
param_dict = dict(zip(self._param_names, self.initial_params))
# Create a Function Wrapper for the problem function. The type of the
# Function Wrapper is acceptable by Bumps.
if isinstance(self.cost_func, create_cost_func('poisson')):
# Bumps has a built in poisson cost fucntion, so use that.
exec_dict = {'func': self.problem.eval_model}
exec(wrapper, exec_dict)
model = exec_dict['fitFunction']
func_wrapper = PoissonCurve(fn=model,
x=self.data_x,
y=self.data_y,
**param_dict)
else: # nlls cost functions
# Send in the residual as the model, with zero
# y data. This allows all our supported nlls
# cost fucntions to be used.
exec_dict = {'func': self.cost_func.eval_r}
exec(wrapper, exec_dict)
model = exec_dict['fitFunction']
zero_y = np.zeros(len(self.data_y))
func_wrapper = Curve(fn=model,
x=self.data_x,
y=zero_y,
**param_dict)
# Set a range for each parameter
for ind, name in enumerate(self._param_names):
if self.value_ranges is not None:
min_val = self.value_ranges[ind][0]
max_val = self.value_ranges[ind][1]
else:
min_val = -np.inf
max_val = np.inf
func_wrapper.__dict__[name].range(min_val, max_val)
# Create a Problem Wrapper. The type of the Problem Wrapper is
# acceptable by Bumps fitting.
self._func_wrapper = func_wrapper
self._fit_problem = FitProblem(func_wrapper)
# Determine the order of the parameters in `self.fit_problem` as this
# could differ from the ordering of parameters in `self._param_names`
param_order = []
for i in range(len(self._param_names)):
param_order.append(str(self._fit_problem._parameters[i]))
self.fit_order = param_order
if self.minimizer == "lm-bumps":
self._minimizer = "lm"
elif self.minimizer == "scipy-leastsq":
self._minimizer = "scipy.leastsq"
else:
self._minimizer = self.minimizer
def _check_timer_abort_test(self):
"""
Boolean check for if the fit should be stopped.
:return: If the time limit has been reached.
:rtype: bool
"""
try:
self.timer.check_elapsed_time()
except MaxRuntimeError:
return True
return False
def fit(self):
"""
Run problem with Bumps.
"""
result = bumpsFit(self._fit_problem,
method=self._minimizer,
abort_test=self._check_timer_abort_test)
self._bumps_result = result
self._status = self._bumps_result.status
def cleanup(self):
"""
Convert the result to a numpy array and populate the variables results
will be read from.
"""
if self._status == 0:
self.flag = 0
elif self._status == 2:
self.flag = 1
else:
self.flag = 2
# Set result variable where parameters are in the same
# order that are listed in `self._param_names`
result = []
if self.fit_order != self._param_names:
for name in self._param_names:
ind = self.fit_order.index(name)
result.append(self._bumps_result.x[ind])
else:
result = self._bumps_result.x
self.final_params = result | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/jquery/src/traversing/findFilter.js | define( [
"../core",
"../var/indexOf",
"../var/isFunction",
"./var/rneedsContext",
"../selector"
], function( jQuery, indexOf, isFunction, rneedsContext ) {
"use strict";
// Implement the identical functionality for filter and not
function winnow( elements, qualifier, not ) {
if ( isFunction( qualifier ) ) {
return jQuery.grep( elements, function( elem, i ) {
return !!qualifier.call( elem, i, elem ) !== not;
} );
}
// Single element
if ( qualifier.nodeType ) {
return jQuery.grep( elements, function( elem ) {
return ( elem === qualifier ) !== not;
} );
}
// Arraylike of elements (jQuery, arguments, Array)
if ( typeof qualifier !== "string" ) {
return jQuery.grep( elements, function( elem ) {
return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
} );
}
// Filtered directly for both simple and complex selectors
return jQuery.filter( qualifier, elements, not );
}
jQuery.filter = function( expr, elems, not ) {
var elem = elems[ 0 ];
if ( not ) {
expr = ":not(" + expr + ")";
}
if ( elems.length === 1 && elem.nodeType === 1 ) {
return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
}
return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
return elem.nodeType === 1;
} ) );
};
jQuery.fn.extend( {
find: function( selector ) {
var i, ret,
len = this.length,
self = this;
if ( typeof selector !== "string" ) {
return this.pushStack( jQuery( selector ).filter( function() {
for ( i = 0; i < len; i++ ) {
if ( jQuery.contains( self[ i ], this ) ) {
return true;
}
}
} ) );
}
ret = this.pushStack( [] );
for ( i = 0; i < len; i++ ) {
jQuery.find( selector, self[ i ], ret );
}
return len > 1 ? jQuery.uniqueSort( ret ) : ret;
},
filter: function( selector ) {
return this.pushStack( winnow( this, selector || [], false ) );
},
not: function( selector ) {
return this.pushStack( winnow( this, selector || [], true ) );
},
is: function( selector ) {
return !!winnow(
this,
// If this is a positional/relative selector, check membership in the returned set
// so $("p:first").is("p:last") won't return true for a doc with two "p".
typeof selector === "string" && rneedsContext.test( selector ) ?
jQuery( selector ) :
selector || [],
false
).length;
}
} );
} ); | PypiClean |
/DTMC/spatialModel/StrongInfectious/StrongInfSEIRV.py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from Eir.utility import dist, Person, randEvent
from ..Hub.HubSEIRV import HubSEIRV
class StrongInfSEIRV(HubSEIRV):
def __init__(self, S0: int, E0: int, I0: int, R0: int, V0:int, pss: float, rho: float,
gamma: float, eta: float, side: float, rstart:float, days: int, w0=0.7, timeDelay=-1, alpha=2.0):
# error checking
self.intCheck([S0, E0, I0, R0, V0, days])
self.floatCheck([pss, rho, gamma, eta, side, rstart, w0, alpha, timeDelay])
self.negValCheck([S0, E0, I0, R0, pss, rho, gamma, eta, side, rstart, days, w0, alpha])
self.probValCheck([pss, rho, gamma, eta, w0])
super().__init__(S0=S0, E0=E0, I0=I0, R0=R0, V0=V0, pss=pss, rho=rho, gamma=gamma, eta=eta, side=side, rstart=rstart, alpha=alpha, days=days, w0=w0, hubConstant=1, timeDelay=timeDelay)
def _infect(self, inf: Person, sus: Person):
"""
Computes the probability of infection between an infectious persona and susceptible based on Strong Infectious Model assumptions
"""
# compute the distance between two Person objects
r = dist(inf, sus)
# make variable that can potentially be changed if someone is a super spreader
r0 = self.rstart
# if the susceptible is too far away from the infectious person
if r > r0:
return 0
# in range of the infected person
if inf.ss:
return self.w0
# return using the normal probability function if not a super spreader
return self.w0 * (1 - r / r0) ** self.alpha
def plot(self):
"""
Plots all variables on subplots
Return
-------
pyplot.Figure:
return a fig object that will contian the graphs
"""
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("Number of Susceptible People")
ax1.set_title("Strong Infecitous Model SEIRV Simulation")
ax3.plot(t, self.I, label="Active Cases", color='b')
ax3.set_ylabel("Active Cases")
ax2.plot(t, self.E, label="Exposed", color='c')
ax2.set_ylabel("# of Exposed")
ax4.plot(t, self.R, label="Recovered", color='m')
ax5.set_xlabel("Days")
ax4.set_ylabel('Number of Recovered')
ax5.plot(t, self.V, label="Vaccinated")
ax5.set_ylabel("# Vaccinated")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show()
return fig | PypiClean |
/DeepCell-0.12.9.tar.gz/DeepCell-0.12.9/deepcell/running.py | import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from deepcell.utils.data_utils import trim_padding
def get_cropped_input_shape(images,
num_crops=4,
receptive_field=61,
data_format=None):
"""Calculate the input_shape for models to process cropped sub-images.
Args:
images (numpy.array): numpy array of original data
num_crops (int): number of slices for the x and y axis
to create sub-images
receptive_field (int): the receptive field of the neural network.
data_format (str): "channels_first" or "channels_last"
Returns:
tuple: new ``input_shape`` for model to process sub-images.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format == 'channels_first':
channel_axis = 1
row_axis = len(images.shape) - 2
col_axis = len(images.shape) - 1
else:
channel_axis = len(images.shape) - 1
row_axis = len(images.shape) - 3
col_axis = len(images.shape) - 2
channel_dim = images.shape[channel_axis]
# Split the frames into quarters, as the full image size is too large
crop_x = images.shape[row_axis] // num_crops + (receptive_field - 1)
crop_y = images.shape[col_axis] // num_crops + (receptive_field - 1)
if images.ndim == 5:
input_shape = (images.shape[row_axis - 1], crop_x, crop_y, channel_dim)
else:
input_shape = (crop_x, crop_y, channel_dim)
# switch to channels_first if necessary
if channel_axis == 1:
input_shape = tuple([input_shape[-1]] + list(input_shape[:-1]))
return input_shape
def get_padding_layers(model):
"""Get all names of padding layers in a model
Args:
model (tensorflow.keras.Model): Keras model
Returns:
list: list of names of padding layers inside model
"""
padding_layers = []
for layer in model.layers:
if 'padding' in layer.name:
padding_layers.append(layer.name)
elif isinstance(layer, Model):
padding_layers.extend(get_padding_layers(layer))
return padding_layers
def process_whole_image(model, images, num_crops=4, receptive_field=61, padding=None):
"""Slice images into num_crops * num_crops pieces, and use the model to
process each small image.
Args:
model (tensorflow.keras.Model): model that will process each small image
images (numpy.array): numpy array that is too big for model.predict
num_crops (int): number of slices for the x and y axis
to create sub-images
receptive_field (int): receptive field used by model,
required to pad images
padding (str): type of padding for input images,
one of {'reflect', 'zero'}.
Returns:
numpy.array: model outputs for each sub-image
Raises:
ValueError: invalid padding value
ValueError: model input shape is different than expected_input_shape
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
row_axis = len(images.shape) - 2
col_axis = len(images.shape) - 1
else:
channel_axis = len(images.shape) - 1
row_axis = len(images.shape) - 3
col_axis = len(images.shape) - 2
if not padding:
padding_layers = get_padding_layers(model)
if padding_layers:
padding = 'reflect' if 'reflect' in padding_layers[0] else 'zero'
if str(padding).lower() not in {'reflect', 'zero'}:
raise ValueError('Expected `padding_mode` to be either `zero` or '
'`reflect`. Got ', padding)
# Split the frames into quarters, as the full image size is too large
crop_x = images.shape[row_axis] // num_crops
crop_y = images.shape[col_axis] // num_crops
# Set up receptive field window for padding
win_x, win_y = (receptive_field - 1) // 2, (receptive_field - 1) // 2
# instantiate matrix for model output
model_output_shape = tuple(list(model.layers[-1].output_shape)[1:])
if channel_axis == 1:
output = np.zeros(tuple([images.shape[0], model_output_shape[0]] +
list(images.shape[2:])))
else:
output = np.zeros(tuple(list(images.shape[0:-1]) +
[model_output_shape[-1]]))
expected_input_shape = get_cropped_input_shape(
images, num_crops, receptive_field)
if expected_input_shape != model.input_shape[1:]:
raise ValueError('Expected model.input_shape to be {}. Got {}. Use '
'`get_cropped_input_shape()` to recreate your model '
' with the proper input_shape'.format(
expected_input_shape, model.input_shape[1:]))
# pad the images only in the x and y axes
pad_width = []
for i in range(len(images.shape)):
if i == row_axis:
pad_width.append((win_x, win_x))
elif i == col_axis:
pad_width.append((win_y, win_y))
else:
pad_width.append((0, 0))
if str(padding).lower() == 'reflect':
padded_images = np.pad(images, pad_width, mode='reflect')
else:
padded_images = np.pad(images, pad_width, mode='constant', constant_values=0)
for i in range(num_crops):
for j in range(num_crops):
e, f = i * crop_x, (i + 1) * crop_x + 2 * win_x
g, h = j * crop_y, (j + 1) * crop_y + 2 * win_y
if images.ndim == 5:
if channel_axis == 1:
predicted = model.predict(padded_images[:, :, :, e:f, g:h])
else:
predicted = model.predict(padded_images[:, :, e:f, g:h, :])
else:
if channel_axis == 1:
predicted = model.predict(padded_images[:, :, e:f, g:h])
else:
predicted = model.predict(padded_images[:, e:f, g:h, :])
# if using skip_connections, get the final model output
if isinstance(predicted, list):
predicted = predicted[-1]
# if the model uses padding, trim the output images to proper shape
# if model does not use padding, images should already be correct
if padding:
predicted = trim_padding(predicted, win_x, win_y)
a, b = i * crop_x, (i + 1) * crop_x
c, d = j * crop_y, (j + 1) * crop_y
if images.ndim == 5:
if channel_axis == 1:
output[:, :, :, a:b, c:d] = predicted
else:
output[:, :, a:b, c:d, :] = predicted
else:
if channel_axis == 1:
output[:, :, a:b, c:d] = predicted
else:
output[:, a:b, c:d, :] = predicted
return output | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/eclib/_filetree.py | __author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _filetree.py 73347 2013-01-05 19:58:31Z CJP $"
__revision__ = "$Revision: 73347 $"
__all__ = ['FileTree',]
#-----------------------------------------------------------------------------#
# Imports
import sys
import os
import types
import wx
#-----------------------------------------------------------------------------#
class FileTree(wx.TreeCtrl):
"""Simple base control for displaying directories and files in a
hierarchical view.
"""
def __init__(self, parent):
super(FileTree, self).__init__(parent,
style=wx.TR_HIDE_ROOT|
wx.TR_FULL_ROW_HIGHLIGHT|
wx.TR_LINES_AT_ROOT|
wx.TR_HAS_BUTTONS|
wx.TR_MULTIPLE|
wx.TR_EDIT_LABELS)
# Attributes
self._watch = list() # Root directories to watch
self._il = None
self._editlabels = True
# Setup
self.SetupImageList()
self.AddRoot('root')
self.SetPyData(self.RootItem, "root")
# Event Handlers
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self._OnGetToolTip)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self._OnItemActivated)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self._OnItemCollapsed)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self._OnItemExpanding)
self.Bind(wx.EVT_TREE_ITEM_MENU, self._OnMenu)
self.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self._OnBeginEdit)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self._OnEndEdit)
def _OnBeginEdit(self, evt):
if not self._editlabels:
evt.Veto()
else:
item = evt.GetItem()
if self.DoBeginEdit(item):
evt.Skip()
else:
evt.Veto()
def _OnEndEdit(self, evt):
if self._editlabels:
item = evt.GetItem()
newlabel = evt.GetLabel()
if self.DoEndEdit(item, newlabel):
evt.Skip()
return
evt.Veto()
def _OnGetToolTip(self, evt):
item = evt.GetItem()
tt = self.DoGetToolTip(item)
if tt:
evt.ToolTip = tt
else:
evt.Skip()
def _OnItemActivated(self, evt):
item = evt.GetItem()
self.DoItemActivated(item)
evt.Skip()
def _OnItemCollapsed(self, evt):
item = evt.GetItem()
self.DoItemCollapsed(item)
evt.Skip()
def _OnItemExpanding(self, evt):
item = evt.GetItem()
self.DoItemExpanding(item)
evt.Skip()
def _OnMenu(self, evt):
try:
item = evt.GetItem()
self.DoShowMenu(item)
except:
pass
#---- Properties ----#
SelectedFiles = property(lambda self: self.GetSelectedFiles())
#---- Overridable methods ----#
def DoBeginEdit(self, item):
"""Overridable method that will be called when
a user has started to edit an item.
@param item: TreeItem
return: bool (True == Allow Edit)
"""
return False
def DoEndEdit(self, item, newlabel):
"""Overridable method that will be called when
a user has finished editing an item.
@param item: TreeItem
@param newlabel: unicode (newly entered value)
return: bool (True == Change Accepted)
"""
return False
def DoGetToolTip(self, item):
"""Get the tooltip to show for an item
@return: string or None
"""
data = self.GetItemPyData(item)
return data
def DoItemActivated(self, item):
"""Override to handle item activation
@param item: TreeItem
"""
pass
def DoItemCollapsed(self, item):
"""Handle when an item is collapsed
@param item: TreeItem
"""
self.DeleteChildren(item)
def DoItemExpanding(self, item):
"""Handle when an item is expanding
@param item: TreeItem
"""
d = self.GetPyData(item)
if d and os.path.exists(d):
contents = FileTree.GetDirContents(d)
for p in contents:
self.AppendFileNode(item, p)
def DoShowMenu(self, item):
"""Context menu has been requested for the given item.
@param item: wx.TreeItem
"""
pass
def DoSetupImageList(self):
"""Add the images to the control's ImageList. It is guaranteed
that self.ImageList is valid and empty when this is called.
"""
bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_MENU, (16,16))
self.ImageList.Add(bmp)
bmp = wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_MENU, (16,16))
self.ImageList.Add(bmp)
bmp = wx.ArtProvider.GetBitmap(wx.ART_ERROR, wx.ART_MENU, (16,16))
self.ImageList.Add(bmp)
def DoGetFileImage(self, path):
"""Get the index of the image from the image list to use
for the file.
@param path: Absolute path of file
@return: long
"""
# TODO: image handling
if not os.access(path, os.R_OK):
img = 2
else:
if os.path.isdir(path):
img = 0 # Directory image
else:
img = 1 # Normal file image
return img
#---- End Overrides ----#
#---- Properties ----#
WatchDirs = property(lambda self: self._watch)
#---- FileTree Api ---#
def AddWatchDirectory(self, dname):
"""Add a directory to the controls top level view
@param dname: directory path
@return: TreeItem or None
@todo: add additional api for getting already existing nodes based
on path.
"""
assert os.path.exists(dname), "Path(%s) doesn't exist!" % dname
if dname not in self._watch:
self._watch.append(dname)
return self.AppendFileNode(self.RootItem, dname)
def RemoveWatchDirectory(self, dname):
"""Remove a directory from the watch list
@param dname: directory path
"""
if dname in self._watch:
self._watch.remove(dname)
nodes = self.GetChildNodes(self.RootItem)
for node in nodes:
data = self.GetPyData(node)
if dname == data:
self.Delete(node)
break
def SetupImageList(self):
"""Setup/Refresh the control's ImageList.
Override DoSetupImageList to customize the behavior of this method.
"""
if self._il:
self._il.Destroy()
self._il = None
self._il = wx.ImageList(16, 16)
self.SetImageList(self._il)
self.DoSetupImageList()
def AppendFileNode(self, item, path):
"""Append a child node to the tree
@param item: TreeItem parent node
@param path: path to add to node
@return: new node
"""
img = self.DoGetFileImage(path)
name = os.path.basename(path)
if not name:
name = path
child = self.AppendItem(item, name, img)
self.SetPyData(child, path)
if os.path.isdir(path):
self.SetItemHasChildren(child, True)
return child
def AppendFileNodes(self, item, paths):
"""Append a list of child node to the tree. This
method can be used instead of looping on AppendFileNode
to get slightly better performance for large sets.
@param item: TreeItem parent node
@param paths: list of file paths
@return: None
"""
getBaseName = os.path.basename
isDir = os.path.isdir
getImg = self.DoGetFileImage
appendNode = self.AppendItem
setData = self.SetPyData
for path in paths:
img = getImg(path)
name = getBaseName(path)
if not name:
name = path
child = appendNode(item, name, img)
setData(child, path)
if isDir(path):
self.SetItemHasChildren(child, True)
def GetChildNodes(self, parent):
"""Get all the TreeItemIds under the given parent
@param parent: TreeItem
@return: list of TreeItems
"""
rlist = list()
child, cookie = self.GetFirstChild(parent)
if not child or not child.IsOk():
return rlist
rlist.append(child)
while True:
child, cookie = self.GetNextChild(parent, cookie)
if not child or not child.IsOk():
return rlist
rlist.append(child)
return rlist
def GetExpandedNodes(self):
"""Get all nodes that are currently expanded in the view
this logically corresponds to all parent directory nodes which
are expanded.
@return: list of TreeItems
"""
def NodeWalker(parent, rlist):
"""Recursively find expanded nodes
@param parent: parent node
@param rlist: list (outparam)
"""
children = self.GetChildNodes(parent)
for node in children:
if self.IsExpanded(node):
rlist.append(node)
NodeWalker(node, rlist)
nodes = list()
NodeWalker(self.RootItem, nodes)
return nodes
def GetSelectedFiles(self):
"""Get a list of the selected files
@return: list of strings
"""
nodes = self.GetSelections()
files = [ self.GetPyData(node) for node in nodes ]
return files
def EnableLabelEditing(self, enable=True):
"""Enable/Disable label editing. This functionality is
enabled by default.
@keyword enable: bool
"""
self._editlabels = enable
def SelectFile(self, filename):
"""Select the given path
@param filename: full path to select
@return: bool
"""
bSelected = False
# Find the root
for node in self.GetChildNodes(self.RootItem):
dname = self.GetPyData(node)
if not os.path.isdir(dname):
dname = os.path.dirname(dname)
if not dname.endswith(os.sep):
dname += os.sep
if filename.startswith(dname):
filename = filename[len(dname):].split(os.sep)
if not self.IsExpanded(node):
self.Expand(node)
folder = node
try:
while filename:
name = filename.pop(0)
for item in self.GetChildNodes(folder):
if self.GetItemText(item) == name:
if not self.IsExpanded(item):
self.Expand(item)
folder = item
continue
except:
pass
self.UnselectAll()
self.EnsureVisible(folder)
self.SelectItem(folder)
break
#---- Static Methods ----#
@staticmethod
def GetDirContents(directory):
"""Get the list of files contained in the given directory"""
assert os.path.isdir(directory)
files = list()
try:
joinPath = os.path.join
fappend = files.append
fs_encoding = sys.getfilesystemencoding()
for p in os.listdir(directory):
fullpath = joinPath(directory, p)
if type(fullpath) != types.UnicodeType:
fullpath = fullpath.decode(fs_encoding)
fappend(fullpath)
except OSError:
pass
return files
def GetNodePaths(self, dirNode):
"""Get a list of paths contained below the given
directory node.
@param dirNode: wx.TreeItemId
@return: list of paths
"""
paths = list()
if self.ItemHasChildren(dirNode):
append = paths.append
getData = self.GetPyData
for node in self.GetChildNodes(dirNode):
try:
append(getData(node))
except wx.PyAssertionError:
pass
return paths
def GetPyData(self, item):
"""Get data from given tree item
@param item: TreeItemId
"""
data = None
# avoid assertions in base class when retrieving data...
if item and item.IsOk():
try:
data = super(FileTree, self).GetPyData(item)
except wx.PyAssertionError:
pass
return data
def SortParentDirectory(self, item):
"""Sort the parent directory of the given item"""
parent = self.GetItemParent(item)
if parent.IsOk():
self.SortChildren(parent)
#-----------------------------------------------------------------------------#
# Test
if __name__ == '__main__':
app = wx.App(False)
f = wx.Frame(None)
ft = FileTree(f)
d = wx.GetUserHome()
ft.AddWatchDirectory(d)
f.Show()
app.MainLoop() | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Chronics/fromNPY.py |
from typing import Optional, Union
import numpy as np
import hashlib
from datetime import datetime, timedelta
import grid2op
from grid2op.dtypes import dt_int
from grid2op.Chronics.gridValue import GridValue
from grid2op.Exceptions import ChronicsError
class FromNPY(GridValue):
"""
This class allows to generate some chronics compatible with grid2op if the data are provided in numpy format.
It also enables the use of the starting the chronics at different time than the original time and to end it before the end
of the chronics.
It is then much more flexible in its usage than the defaults chronics. But it is also much more error prone. For example, it does not check
the order of the loads / generators that you provide.
.. warning::
It assume the order of the elements are consistent with the powergrid backend ! It will not attempt to reorder the columns of the dataset
.. note::
The effect if "i_start" and "i_end" are persistant. If you set it once, it affects the object even after "env.reset()" is called.
If you want to modify them, you need to use the :func:`FromNPY.chronics.change_i_start` and :func:`FromNPY.chronics.change_i_end` methods
(and call `env.reset()`!)
TODO implement methods to change the loads / production "based on sampling" (online sampling instead of only reading data)
TODO implement the possibility to simulate maintenance / hazards "on the fly"
TODO implement hazards !
Examples
--------
Usage example, for what you don't really have to do:
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# first retrieve the data that you want, the easiest wayt is to create an environment and read the data from it.
env_name = "l2rpn_case14_sandbox" # for example
env_ref = grid2op.make(env_name)
# retrieve the data
load_p = 1.0 * env_ref.chronics_handler.real_data.data.load_p
load_q = 1.0 * env_ref.chronics_handler.real_data.data.load_q
prod_p = 1.0 * env_ref.chronics_handler.real_data.data.prod_p
prod_v = 1.0 * env_ref.chronics_handler.real_data.data.prod_v
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"i_start": 5, # start at the "step" 5 NB first step is first observation, available with `obs = env.reset()`
"i_end": 18, # end index: data after that will not be considered (excluded as per python convention)
"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v
# other parameters includes
# maintenance
# load_p_forecast
# load_q_forecast
# prod_p_forecast
# prod_v_forecast
})
# you can use env normally, including in runners
obs = env.reset()
# obs.load_p is load_p[5] (because you set "i_start" = 5, by default it's 0)
You can, after creation, change the data with:
.. code-block:: python
# create env as above
# retrieve some new values that you would like
new_load_p = ...
new_load_q = ...
new_prod_p = ...
new_prod_v = ...
# change the values
env.chronics_handler.real_data.change_chronics(new_load_p, new_load_q, new_prod_p, new_prod_v)
obs = env.reset() # mandatory if you want the change to be taken into account
# obs.load_p is new_load_p[5] (or rather load_p[env.chronics_handler.real_data._i_start])
.. seealso::
More usage examples in:
- :func:`FromNPY.change_chronics`
- :func:`FromNPY.change_forecasts`
- :func:`FromNPY.change_i_start`
- :func:`FromNPY.change_i_end`
Attributes
----------
TODO
"""
MULTI_CHRONICS = False
def __init__(
self,
load_p: np.ndarray,
load_q: np.ndarray,
prod_p: np.ndarray,
prod_v: Optional[np.ndarray] = None,
hazards: Optional[np.ndarray] = None,
maintenance: Optional[np.ndarray] = None,
load_p_forecast: Optional[np.ndarray] = None, # TODO forecasts !!
load_q_forecast: Optional[np.ndarray] = None,
prod_p_forecast: Optional[np.ndarray] = None,
prod_v_forecast: Optional[np.ndarray] = None,
time_interval: timedelta = timedelta(minutes=5),
max_iter: int = -1,
start_datetime: datetime = datetime(year=2019, month=1, day=1),
chunk_size: Optional[int] = None,
i_start: Optional[int] = None,
i_end: Optional[int] = None, # excluded, as always in python
**kwargs
):
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
self._i_start: int = i_start if i_start is not None else 0
self.__new_istart: Optional[int] = i_start
self.n_gen: int = prod_p.shape[1]
self.n_load: int = load_p.shape[1]
self.n_line: Union[int, None] = None
self._load_p: np.ndarray = 1.0 * load_p
self._load_q: np.ndarray = 1.0 * load_q
self._prod_p: np.ndarray = 1.0 * prod_p
self._prod_v = None
if prod_v is not None:
self._prod_v = 1.0 * prod_v
self.__new_load_p: Optional[np.ndarray] = None
self.__new_prod_p: Optional[np.ndarray] = None
self.__new_prod_v: Optional[np.ndarray] = None
self.__new_load_q: Optional[np.ndarray] = None
self._i_end: int = i_end if i_end is not None else load_p.shape[0]
self.__new_iend: Optional[int] = i_end
self.has_maintenance = False
self.maintenance = None
self.maintenance_duration = None
self.maintenance_time = None
if maintenance is not None:
self.has_maintenance = True
self.n_line = maintenance.shape[1]
assert load_p.shape[0] == maintenance.shape[0]
self.maintenance = maintenance # TODO copy
self.maintenance_time = (
np.zeros(shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int)
- 1
)
self.maintenance_duration = np.zeros(
shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int
)
for line_id in range(self.n_line):
self.maintenance_time[:, line_id] = self.get_maintenance_time_1d(
self.maintenance[:, line_id]
)
self.maintenance_duration[
:, line_id
] = self.get_maintenance_duration_1d(self.maintenance[:, line_id])
self.has_hazards = False
self.hazards = None
self.hazard_duration = None
if hazards is not None:
raise ChronicsError(
"This feature is not available at the moment. Fill a github issue at "
"https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title="
)
# self.has_hazards = True
# if self.n_line is None:
# self.n_line = hazards.shape[1]
# else:
# assert self.n_line == hazards.shape[1]
# assert load_p.shape[0] == hazards.shape[0]
# self.hazards = hazards # TODO copy !
# self.hazard_duration = np.zeros(shape=(self.hazards.shape[0], self.n_line), dtype=dt_int)
# for line_id in range(self.n_line):
# self.hazard_duration[:, line_id] = self.get_hazard_duration_1d(self.hazards[:, line_id])
self._forecasts = None
if load_p_forecast is not None:
assert load_q_forecast is not None
assert prod_p_forecast is not None
self._forecasts = FromNPY(
load_p=load_p_forecast,
load_q=load_q_forecast,
prod_p=prod_p_forecast,
prod_v=prod_v_forecast,
load_p_forecast=None,
load_q_forecast=None,
prod_p_forecast=None,
prod_v_forecast=None,
i_start=i_start,
i_end=i_end,
)
elif load_q_forecast is not None:
raise ChronicsError(
"if load_q_forecast is not None, then load_p_forecast should not be None"
)
elif prod_p_forecast is not None:
raise ChronicsError(
"if prod_p_forecast is not None, then load_p_forecast should not be None"
)
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
assert len(order_backend_prods) == self.n_gen
assert len(order_backend_loads) == self.n_load
if self.n_line is None:
self.n_line = len(order_backend_lines)
else:
assert len(order_backend_lines) == self.n_line
if self._forecasts is not None:
self._forecasts.initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
)
self.maintenance_time_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int) - 1
self.maintenance_duration_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.hazard_duration_nohaz = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.curr_iter = 0
self.current_index = self._i_start - 1
def _get_long_hash(self, hash_: hashlib.blake2b = None):
# get the "long hash" from blake2b
if hash_ is None:
hash_ = (
hashlib.blake2b()
) # should be faster than md5 ! (and safer, but we only care about speed here)
hash_.update(self._load_p.tobytes())
hash_.update(self._load_q.tobytes())
hash_.update(self._prod_p.tobytes())
if self._prod_v is not None:
hash_.update(self._prod_v.tobytes())
if self.maintenance is not None:
hash_.update(self.maintenance.tobytes())
if self.hazards is not None:
hash_.update(self.hazards.tobytes())
if self._forecasts:
self._forecasts._get_long_hash(hash_)
return hash_.digest()
def get_id(self) -> str:
"""
To return a unique ID of the chronics, we use a hash function (black2b), but it outputs a name too big (64 characters or so).
So we hash it again with md5 to get a reasonable length id (32 characters)
Returns:
str: the hash of the arrays (load_p, load_q, etc.) in the chronics
"""
long_hash_byte = self._get_long_hash()
# now shorten it with md5
short_hash = hashlib.md5(long_hash_byte)
return short_hash.hexdigest()
@staticmethod
def _create_dict_inj(res, obj_with_inj_data):
dict_ = {}
prod_v = None
if obj_with_inj_data._load_p is not None:
dict_["load_p"] = 1.0 * obj_with_inj_data._load_p[obj_with_inj_data.current_index, :]
if obj_with_inj_data._load_q is not None:
dict_["load_q"] = 1.0 * obj_with_inj_data._load_q[obj_with_inj_data.current_index, :]
array_gen_p = obj_with_inj_data._gen_p if hasattr(obj_with_inj_data, "_gen_p") else obj_with_inj_data._prod_p
if array_gen_p is not None:
dict_["prod_p"] = 1.0 * array_gen_p[obj_with_inj_data.current_index, :]
array_gen_v = obj_with_inj_data._gen_v if hasattr(obj_with_inj_data, "_gen_v") else obj_with_inj_data._prod_v
if array_gen_v is not None:
prod_v = 1.0 * array_gen_v[obj_with_inj_data.current_index, :]
if dict_:
res["injection"] = dict_
return prod_v
@staticmethod
def _create_dict_maintenance_hazards(res, obj_with_inj_data):
if obj_with_inj_data.maintenance is not None and obj_with_inj_data.has_maintenance:
res["maintenance"] = obj_with_inj_data.maintenance[obj_with_inj_data.current_index, :]
if obj_with_inj_data.hazards is not None and obj_with_inj_data.has_hazards:
res["hazards"] = obj_with_inj_data.hazards[obj_with_inj_data.current_index, :]
if (
obj_with_inj_data.maintenance_time is not None
and obj_with_inj_data.maintenance_duration is not None
and obj_with_inj_data.has_maintenance
):
maintenance_time = dt_int(1 * obj_with_inj_data.maintenance_time[obj_with_inj_data.current_index, :])
maintenance_duration = dt_int(
1 * obj_with_inj_data.maintenance_duration[obj_with_inj_data.current_index, :]
)
else:
maintenance_time = obj_with_inj_data.maintenance_time_nomaint
maintenance_duration = obj_with_inj_data.maintenance_duration_nomaint
if obj_with_inj_data.hazard_duration is not None and obj_with_inj_data.has_hazards:
hazard_duration = 1 * obj_with_inj_data.hazard_duration[obj_with_inj_data.current_index, :]
else:
hazard_duration = obj_with_inj_data.hazard_duration_nohaz
return maintenance_time, maintenance_duration, hazard_duration
def load_next(self):
self.current_index += 1
if (
self.current_index > self._i_end
or self.current_index >= self._load_p.shape[0]
):
raise StopIteration
res = {}
prod_v = FromNPY._create_dict_inj(res, self)
maintenance_time, maintenance_duration, hazard_duration = FromNPY._create_dict_maintenance_hazards(res, self)
self.current_datetime += self.time_interval
self.curr_iter += 1
return (
self.current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
)
def check_validity(
self, backend: Optional["grid2op.Backend.backend.Backend"]
) -> None:
# TODO raise the proper errors from ChronicsError here rather than AssertError
assert self._load_p.shape[0] == self._load_q.shape[0]
assert self._load_p.shape[0] == self._prod_p.shape[0]
if self._prod_v is not None:
assert self._load_p.shape[0] == self._prod_v.shape[0]
if self.hazards is not None:
assert self.hazards.shape[1] == self.n_line
if self.maintenance is not None:
assert self.maintenance.shape[1] == self.n_line
if self.maintenance_duration is not None:
assert self.n_line == self.maintenance_duration.shape[1]
if self.maintenance_time is not None:
assert self.n_line == self.maintenance_time.shape[1]
# TODO forecast
if self._forecasts is not None:
assert self._forecasts.n_line == self.n_line
assert self._forecasts.n_gen == self.n_gen
assert self._forecasts.n_load == self.n_load
assert self._load_p.shape[0] == self._forecasts._load_p.shape[0]
assert self._load_q.shape[0] == self._forecasts._load_q.shape[0]
assert self._prod_p.shape[0] == self._forecasts._prod_p.shape[0]
if self._prod_v is not None and self._forecasts._prod_v is not None:
assert self._prod_v.shape[0] == self._forecasts._prod_v.shape[0]
self._forecasts.check_validity(backend=backend)
def next_chronics(self):
# restart the chronics: read it again !
self.current_datetime = self.start_datetime
self.curr_iter = 0
if self.__new_istart is not None:
self._i_start = self.__new_istart
else:
self._i_start = 0
self.current_index = self._i_start
if self.__new_load_p is not None:
self._load_p = self.__new_load_p
self.__new_load_p = None
if self.__new_load_q is not None:
self._load_q = self.__new_load_q
self.__new_load_q = None
if self.__new_prod_p is not None:
self._prod_p = self.__new_prod_p
self.__new_prod_p = None
if self.__new_prod_v is not None:
self._prod_v = self.__new_prod_v
self.__new_prod_v = None
if self.__new_iend is None:
self._i_end = self._load_p.shape[0]
else:
self._i_end = self.__new_iend
if self._forecasts is not None:
# update the forecast
self._forecasts.next_chronics()
self.check_validity(backend=None)
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to :func:`GridValue.done` an episode can be over for 2 main reasons:
- :attr:`GridValue.max_iter` has been reached
- There are no data in the numpy array.
- i_end has been reached
The episode is done if one of the above condition is met.
Returns
-------
res: ``bool``
Whether the episode has reached its end or not.
"""
res = False
if (
self.current_index >= self._i_end
or self.current_index >= self._load_p.shape[0]
):
res = True
elif self._max_iter > 0:
if self.curr_iter > self._max_iter:
res = True
return res
def forecasts(self):
"""
By default, forecasts are only made 1 step ahead.
We could change that. Do not hesitate to make a feature request
(https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=) if that is necessary for you.
"""
if self._forecasts is None:
return []
self._forecasts.current_index = self.current_index - 1
dt, dict_, *rest = self._forecasts.load_next()
return [(self.current_datetime + self.time_interval, dict_)]
def change_chronics(
self,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
new_prod_p: np.ndarray = None,
new_prod_v: np.ndarray = None,
):
"""
Allows to change the data used by this class.
.. warning::
This has an effect only after "env.reset" has been called !
Args:
new_load_p (np.ndarray, optional): change the load_p. Defaults to None (= do not change).
new_load_q (np.ndarray, optional): change the load_q. Defaults to None (= do not change).
new_prod_p (np.ndarray, optional): change the prod_p. Defaults to None (= do not change).
new_prod_v (np.ndarray, optional): change the prod_v. Defaults to None (= do not change).
Examples
---------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset() # obs.load_p is load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
new_load_p = ... # find somehow a new suitable "load_p"
new_load_q = ...
new_prod_p = ...
new_prod_v = ...
env.chronics_handler.real_data.change_chronics(new_load_p, new_load_q, new_prod_p, new_prod_v)
# has no effect at this stage
obs = env.reset() # now has some effect !
# obs.load_p is new_load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
"""
if new_load_p is not None:
self.__new_load_p = 1.0 * new_load_p
if new_load_q is not None:
self.__new_load_q = 1.0 * new_load_q
if new_prod_p is not None:
self.__new_prod_p = 1.0 * new_prod_p
if new_prod_v is not None:
self.__new_prod_v = 1.0 * new_prod_v
def change_forecasts(
self,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
new_prod_p: np.ndarray = None,
new_prod_v: np.ndarray = None,
):
"""
Allows to change the data used by this class in the "obs.simulate" function.
.. warning::
This has an effect only after "env.reset" has been called !
Args:
new_load_p (np.ndarray, optional): change the load_p_forecast. Defaults to None (= do not change).
new_load_q (np.ndarray, optional): change the load_q_forecast. Defaults to None (= do not change).
new_prod_p (np.ndarray, optional): change the prod_p_forecast. Defaults to None (= do not change).
new_prod_v (np.ndarray, optional): change the prod_v_forecast. Defaults to None (= do not change).
Examples
---------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
load_p_forecast = ...
load_q_forecast = ...
prod_p_forecast = ...
prod_v_forecast = ...
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v,
"load_p_forecast": load_p_forecast
"load_q_forecast": load_q_forecast
"prod_p_forecast": prod_p_forecast
"prod_v_forecast": prod_v_forecast
})
new_load_p_forecast = ... # find somehow a new suitable "load_p"
new_load_q_forecast = ...
new_prod_p_forecast = ...
new_prod_v_forecast = ...
env.chronics_handler.real_data.change_forecasts(new_load_p_forecast, new_load_q_forecast, new_prod_p_forecast, new_prod_v_forecast)
# has no effect at this stage
obs = env.reset() # now has some effect !
sim_o, *_ = obs.simulate() # sim_o.load_p has the values of new_load_p_forecast[0]
"""
if self._forecasts is None:
raise ChronicsError(
"You cannot change the forecast for this chronics are there are no forecasts enabled"
)
self._forecasts.change_chronics(
new_load_p=new_load_p,
new_load_q=new_load_q,
new_prod_p=new_prod_p,
new_prod_v=new_prod_v,
)
def max_timestep(self):
if self._max_iter >= 0:
return min(self._max_iter, self._load_p.shape[0], self._i_end)
return min(self._load_p.shape[0], self._i_end)
def change_i_start(self, new_i_start: Union[int, None]):
"""
Allows to change the "i_start".
.. warning::
It has only an affect after "env.reset()" is called.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset() # obs.load_p is load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
env.chronics_handler.real_data.change_i_start(10)
obs = env.reset() # obs.load_p is load_p[10]
# indeed `env.chronics_handler.real_data._i_start` has been changed to 10.
# to undo all changes (and use the defaults) you can:
# env.chronics_handler.real_data.change_i_start(None)
"""
if new_i_start is not None:
self.__new_istart = int(new_i_start)
else:
self.__new_istart = None
def change_i_end(self, new_i_end: Union[int, None]):
"""
Allows to change the "i_end".
.. warning::
It has only an affect after "env.reset()" is called.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset()
env.chronics_handler.real_data.change_i_end(150)
obs = env.reset()
# indeed `env.chronics_handler.real_data._i_end` has been changed to 10.
# scenario lenght will be at best 150 !
# to undo all changes (and use the defaults) you can:
# env.chronics_handler.real_data.change_i_end(None)
"""
if new_i_end is not None:
self.__new_iend = int(new_i_end)
else:
self.__new_iend = None | PypiClean |
/NehorayRapid1-0.0.1-py3-none-any.whl/mmedit/models/losses/feature_loss.py | import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from mmedit.models.components.discriminators import LightCNN
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class LightCNNFeature(nn.Module):
"""Feature of LightCNN.
It is used to train DICGAN.
"""
def __init__(self) -> None:
super().__init__()
model = LightCNN(3)
self.features = nn.Sequential(*list(model.features.children()))
self.features.requires_grad_(False)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor.
Returns:
Tensor: Forward results.
"""
return self.features(x)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
@LOSSES.register_module()
class LightCNNFeatureLoss(nn.Module):
"""Feature loss of DICGAN, based on LightCNN.
Args:
pretrained (str): Path for pretrained weights.
loss_weight (float): Loss weight. Default: 1.0.
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self, pretrained, loss_weight=1.0, criterion='l1'):
super().__init__()
self.model = LightCNNFeature()
assert isinstance(pretrained, str), 'Model must be pretrained'
self.model.init_weights(pretrained)
self.model.eval()
self.loss_weight = loss_weight
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise ValueError("'criterion' should be 'l1' or 'mse', "
f'but got {criterion}')
def forward(self, pred, gt):
"""Forward function.
Args:
pred (Tensor): Predicted tensor.
gt (Tensor): GT tensor.
Returns:
Tensor: Forward results.
"""
assert self.model.training is False
pred_feature = self.model(pred)
gt_feature = self.model(gt).detach()
feature_loss = self.criterion(pred_feature, gt_feature)
return feature_loss * self.loss_weight | PypiClean |
/AssertionLib-3.2.2-py3-none-any.whl/assertionlib/dataclass.py |
import textwrap
import copy
from abc import ABCMeta
from functools import wraps
from threading import get_ident
from typing import (
Any, Dict, Set, Iterable, Tuple, ClassVar, NoReturn, cast, Iterator, Union,
Callable, Optional, Mapping, TypeVar, Type
)
__all__ = ['AbstractDataClass']
T = TypeVar('T')
TT = TypeVar('TT', bound='_MetaADC')
AT = TypeVar('AT', bound='AbstractDataClass')
FT = TypeVar('FT', bound=Callable[..., Any])
def recursion_safeguard(fallback: FT) -> Callable[[FT], FT]:
"""Decorate a function such that it calls **fallback** in case of recursive calls.
Implementation based on :func:`reprlib.recursive_repr`.
"""
def decorating_function(user_function: FT) -> FT:
running: Set[Tuple[int, int]] = set()
@wraps(user_function)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
key = id(self), get_ident()
if key in running:
return fallback(self, *args, **kwargs)
running.add(key)
try:
result = user_function(self, *args, **kwargs)
finally:
running.discard(key)
return result
return cast(FT, wrapper)
return decorating_function
class _MetaADC(ABCMeta):
_HASHABLE: bool = NotImplemented
def __new__(mcls: Type[TT], name: str, bases: Tuple[type, ...],
namespace: Dict[str, Any]) -> TT:
cls = super().__new__(mcls, name, bases, namespace)
if not cls._HASHABLE:
setattr(cls, '__hash__', mcls._hash_template1)
else:
func = recursion_safeguard(cls._repr_fallback)(mcls._hash_template2) # type: ignore
setattr(cls, '__hash__', func)
return cls
@staticmethod
def _hash_template1(self: 'AbstractDataClass') -> NoReturn:
"""Unhashable type; raise a :exc:`TypeError`."""
raise TypeError(f"Unhashable type: {self.__class__.__name__!r}")
@staticmethod
def _hash_template2(self: 'AbstractDataClass') -> int:
"""Return the hash of this instance.
The returned hash is constructed from two components:
* The hash of this instances' class type.
* The hashes of all key/value pairs in this instances' (non-private) attributes.
If an unhashable instance variable is encountered, *e.g.* a :class:`list`,
then its :func:`id` is used for hashing.
This method will raise a :exc:`TypeError` if the class attribute
:attr:`AbstractDataClass._HASHABLE` is :data:`False`.
See Also
--------
:attr:`AbstractDataClass._PRIVATE_ATTR`
A set with the names of private instance variables.
:attr:`AbstractDataClass._HASHABLE`
Whether or not this class is hashable.
:attr:`AbstractDataClass._hash_fallback`
Fallback function for :meth:`AbstractDataClass.__hash__` incase of recursive calls.
:attr:`AbstractDataClass._hash`
An instance variable for caching the :func:`hash` of this instance.
"""
# Return a cached hash
if self._hash:
return self._hash
ret = hash(type(self))
for k, v in self._iter_attrs():
if k in self._PRIVATE_ATTR:
continue
try:
ret ^= hash((k, v))
except TypeError:
ret ^= hash((k, id(v)))
# Cache the hash and return
self._hash = ret
return ret
class AbstractDataClass(metaclass=_MetaADC):
"""A dataclass with a number of generic pre-defined (magic) methods.
Provides methods for:
* String conversion: :meth:`AbstractDataClass.__repr__`.
* Object comparisons: :meth:`AbstractDataClass.__eq__`.
* Hash construction: :meth:`AbstractDataClass.__hash__`.
* Copying: :meth:`AbstractDataClass.copy`, :meth:`AbstractDataClass.__copy__` and
:meth:`AbstractDataClass.__deepcopy__`.
* Dictionary interconversion: :meth:`AbstractDataClass.as_dict` and
:meth:`AbstractDataClass.from_dict`.
* Inherting method docstrings and annotations: :meth:`AbstractDataClass.inherit_annotations`.
Attributes
----------
_PRIVATE_ATTR : :class:`frozenset` [:class:`str`] or :class:`set` [:class:`str`]
A class variable with the names of private instance variable.
These attributes will be excluded whenever calling :meth:`AbstractDataClass.as_dict`,
printing or comparing objects.
The set is unfrozen (and added as instance variables)
the moment a class instance is initiated.
_HASHABLE : :class:`bool`
A class variable denoting whether or not class instances are hashable.
The :attr:`AbstractDataClass.__hash__` method will be unavailable if :data:`False`.
_hash : :class:`int`
An attribute for caching the :func:`hash` of this instance.
Only available if :attr:`AbstractDataClass._HASHABLE` is :data:`True`.
"""
#: A :class:`frozenset` with the names of private instance variables.
#: These attributes will be excluded whenever calling :meth:`AbstractDataClass.as_dict`,
#: printing or comparing objects.
_PRIVATE_ATTR: Set[str] = frozenset() # type: ignore
#: Whether or not this class is hashable.
#: If :data:`False`, raise a :exc:`TypeError` when calling :meth:`AbstractDataClass.__hash__`.
_HASHABLE: ClassVar[bool] = True
#: Empty slots which can be filled by subclasses.
__slots__: Union[str, Iterable[str]] = ('__dict__',)
__hash__: Callable[[type], Union[int, NoReturn]]
def __init__(self) -> None:
"""Initialize a :class:`AbstractDataClass` instance."""
# Assign cls._PRIVATE_ATTR as a (unfrozen) set to this instance as attribute
cls = type(self)
self._PRIVATE_ATTR = {'_PRIVATE_ATTR'}.union(cls._PRIVATE_ATTR)
# Extra attributes in case the class is hashable
if cls._HASHABLE:
self._PRIVATE_ATTR.add('_hash')
self._hash: int = 0
def _iter_attrs(self) -> Iterator[Tuple[str, Any]]:
"""Return an iterator which iterates over this instance's attributes as key/value pairs.
If all attributes are stored in the instance :attr:`~object.__dict__` then
further alterations to this method are not necessary.
If :attr:`~object.__slots__` are utilized for defining attributes then alterations
will have to be made to this method, *e.g.*:
.. code:: python
>>> def _iter_attrs(self):
... yield 'a', self.a
... yield 'b', self.b
... yield 'c', self.c
"""
return iter(vars(self).items())
def _hash_fallback(self) -> int:
"""Fallback function for :meth:`AbstractDataClass.__hash__` incase of recursive calls."""
return id(self)
def _repr_fallback(self) -> str:
"""Fallback function for :meth:`AbstractDataClass.__repr__` incase of recursive calls."""
return object.__repr__(self).rstrip('>').rsplit(maxsplit=1)[1]
@recursion_safeguard(fallback=_repr_fallback) # type: ignore
def __repr__(self) -> str:
"""Return a (machine readable) string representation of this instance.
The string representation consists of this instances' class name in addition
to all (non-private) instance variables.
Returns
-------
:class:`str`
A string representation of this instance.
See Also
--------
:attr:`AbstractDataClass._PRIVATE_ATTR`
A set with the names of private instance variables.
:attr:`AbstractDataClass._repr_fallback`
Fallback function for :meth:`AbstractDataClass.__repr__` incase of recursive calls.
:meth:`AbstractDataClass._str_iterator`
Return an iterable for the iterating over this instances' attributes.
:meth:`AbstractDataClass._str`
Returns a string representation of a single **key**/**value** pair.
"""
try:
width = max(len(k) for k, _ in self._str_iterator())
except ValueError: # Raised if this instance has no instance variables
return f'{self.__class__.__name__}()'
ret = ',\n'.join(self._str(k, v, width, 3+width) for k, v in self._str_iterator())
indent = ' ' * 4
return f'{self.__class__.__name__}(\n{textwrap.indent(ret, indent)}\n)'
def _str_iterator(self) -> Iterable[Tuple[str, Any]]:
"""Return an iterable for the :meth:`AbstractDataClass.__repr__` method."""
return ((k, v) for k, v in sorted(self._iter_attrs()) if k not in self._PRIVATE_ATTR)
@staticmethod
def _str(key: str, value: Any,
width: Optional[int] = None,
indent: Optional[int] = None) -> str:
"""Return a string representation of a single **key**/**value** pair."""
key_str = f'{key} = ' if width is None else f'{key:{width}} = '
if indent is not None:
value_str = textwrap.indent(repr(value), ' ' * indent)[indent:]
else:
value_str = repr(value)
return f'{key_str}{value_str}' # e.g.: "key = 'value'"
def _eq_fallback(self, value: Any) -> bool:
"""Fallback function for :meth:`AbstractDataClass.__eq__` incase of recursive calls."""
return self is value
@recursion_safeguard(fallback=_eq_fallback) # type: ignore
def __eq__(self, value: Any) -> bool:
"""Check if this instance is equivalent to **value**.
The comparison checks if the class type of this instance and **value** are identical
and if all (non-private) instance variables are equivalent.
Returns
-------
:class:`bool`
Whether or not this instance and **value** are equivalent.
See Also
--------
:attr:`AbstractDataClass._PRIVATE_ATTR`
A set with the names of private instance variables.
:attr:`AbstractDataClass._eq`
Return if **v1** and **v2** are equivalent.
:attr:`AbstractDataClass._eq_fallback`
Fallback function for :meth:`AbstractDataClass.__eq__` incase of recursive calls.
"""
# Compare instance types
if type(self) is not type(value):
return False
# Compare instance variables
try:
for k, v1 in self._iter_attrs():
if k in self._PRIVATE_ATTR:
continue
v2 = getattr(value, k)
assert self._eq(v1, v2)
except (AttributeError, AssertionError):
return False
else:
return True
@staticmethod
def _eq(v1: Any, v2: Any) -> bool:
"""Return if **v1** and **v2** are equivalent."""
return cast(bool, v1 == v2)
def copy(self: AT, deep: bool = False) -> AT:
"""Return a shallow or deep copy of this instance.
Parameters
----------
deep : :class:`bool`
Whether or not to return a deep or shallow copy.
Returns
-------
:class:`AbstractDataClass`
A new instance constructed from this instance.
"""
def return_arg(arg: T) -> T:
return arg
copy_func = cast(Callable[[T], T], copy.deepcopy if deep else return_arg)
cls = type(self)
ret: AT = cls.__new__(cls)
for k, v in self._iter_attrs():
setattr(ret, k, copy_func(v))
return ret
def __copy__(self: AT) -> AT:
"""Return a shallow copy of this instance; see :meth:`AbstractDataClass.copy`."""
return self.copy(deep=False)
def __deepcopy__(self: AT, memo: Optional[Dict[int, Any]] = None) -> AT:
"""Return a deep copy of this instance; see :meth:`AbstractDataClass.copy`."."""
return self.copy(deep=True)
def as_dict(self, return_private: bool = False) -> Dict[str, Any]:
"""Construct a dictionary from this instance with all non-private instance variables.
The returned dictionary values are shallow copies.
Parameters
----------
return_private : :class:`bool`
If :data:`True`, return both public and private instance variables.
Private instance variables are defined in :data:`AbstractDataClass._PRIVATE_ATTR`.
Returns
-------
:class:`dict` [:class:`str`, :data:`~typing.Any`]
A dictionary with keyword arguments for initializing a new
instance of this class.
See Also
--------
:meth:`AbstractDataClass.from_dict`:
Construct a instance of this objects' class from a dictionary with keyword arguments.
:attr:`AbstractDataClass._PRIVATE_ATTR`:
A set with the names of private instance variables.
"""
if return_private:
return {k: copy.copy(v) for k, v in self._iter_attrs()}
else:
return {k: copy.copy(v) for k, v in self._iter_attrs() if k not in self._PRIVATE_ATTR}
@classmethod
def from_dict(cls: Type[AT], dct: Mapping[str, Any]) -> AT:
"""Construct a instance of this objects' class from a dictionary with keyword arguments.
Parameters
----------
dct : :class:`~collections.abc.Mapping` [:class:`str`, :data:`~typing.Any`]
A dictionary with keyword arguments for constructing a new
:class:`AbstractDataClass` instance.
Returns
-------
:class:`AbstractDataClass`
A new instance of this object's class constructed from **dct**.
See Also
--------
:meth:`AbstractDataClass.as_dict`
Construct a dictionary from this instance with all non-private instance variables.
"""
return cls(**dct)
@classmethod
def inherit_annotations(cls) -> Callable[[FT], FT]:
"""A decorator for inheriting annotations and docstrings.
Can be applied to methods of :class:`AbstractDataClass` subclasses to automatically
inherit the docstring and annotations of identical-named functions of its superclass.
References to :class:`AbstractDataClass` are replaced with ones pointing to the
respective subclass.
Returns
-------
:class:`type`
A decorator for updating the annotations and docstring of a callable.
Examples
--------
.. code:: python
>>> class SubClass(AbstractDataClass):
...
... @AbstractDataClass.inherit_annotations()
... def __copy__(self): pass
>>> print(SubClass.__copy__.__doc__)
Return a shallow copy of this instance; see :meth:`SubClass.copy`.
>>> print(SubClass.__copy__.__annotations__)
{'self': ~AT, 'return': ~AT}
"""
def decorator(func: FT) -> FT:
cls_func: str = getattr(cls, func.__name__)
sub_cls_name: str = func.__qualname__.split('.')[0]
# Update annotations
if not getattr(func, '__annotations__', None):
func.__annotations__ = dct = getattr(cls_func, '__annotations__', {}).copy()
if dct.get('return') in {cls, cls.__name__}:
dct['return'] = sub_cls_name
# Update docstring
if func.__doc__ is None and cls_func.__doc__ is not None:
doc_new = cls_func.__doc__.replace(cls.__name__, sub_cls_name)
func.__doc__ = doc_new
return func
return decorator | PypiClean |
/32blit-0.7.3-py3-none-any.whl/ttblit/core/struct.py | import binascii
from construct import (Adapter, Bytes, Checksum, Const, GreedyBytes, Int8ul,
Int16ul, Int32ub, Int32ul, Optional, PaddedString,
Prefixed, PrefixedArray, RawCopy, Rebuild, Struct, len_,
this)
from .compression import ImageCompressor
class PaletteCountAdapter(Adapter):
def _decode(self, obj, context, path):
if obj == 0:
obj = 256
return obj
def _encode(self, obj, context, path):
if obj == 256:
obj = 0
return obj
class ImageSizeAdapter(Adapter):
"""
Adds the header and type size to the size field.
The size field itself is already counted.
"""
def _decode(self, obj, context, path):
return obj - 8
def _encode(self, obj, context, path):
return obj + 8
struct_blit_pixel = Struct(
'r' / Int8ul,
'g' / Int8ul,
'b' / Int8ul,
'a' / Int8ul
)
struct_blit_image_compressed = Struct(
'header' / Const(b'SPRITE'),
'type' / PaddedString(2, 'ASCII'),
'data' / Prefixed(ImageSizeAdapter(Int32ul), Struct(
'width' / Int16ul,
'height' / Int16ul,
'format' / Const(0x02, Int8ul),
'palette' / PrefixedArray(PaletteCountAdapter(Int8ul), struct_blit_pixel),
'pixels' / GreedyBytes,
), includelength=True)
)
struct_blit_image = ImageCompressor(struct_blit_image_compressed)
struct_blit_meta = Struct(
'header' / Const(b'BLITMETA'),
'data' / Prefixed(Int16ul, Struct(
'checksum' / Checksum(
Int32ul,
lambda data: binascii.crc32(data),
this._._.bin.data
),
'date' / PaddedString(16, 'ascii'),
'title' / PaddedString(25, 'ascii'),
'description' / PaddedString(129, 'ascii'),
'version' / PaddedString(17, 'ascii'),
'author' / PaddedString(17, 'ascii'),
Const(b'BLITTYPE'),
'category' / PaddedString(17, 'ascii'),
'url' / PaddedString(129, 'ascii'),
'filetypes' / PrefixedArray(Int8ul, PaddedString(5, 'ascii')),
'icon' / struct_blit_image,
'splash' / struct_blit_image
))
)
struct_blit_meta_standalone = Struct(
'header' / Const(b'BLITMETA'),
'data' / Prefixed(Int16ul, Struct(
'checksum' / Int32ul,
'date' / PaddedString(16, 'ascii'),
'title' / PaddedString(25, 'ascii'),
'description' / PaddedString(129, 'ascii'),
'version' / PaddedString(17, 'ascii'),
'author' / PaddedString(17, 'ascii'),
Const(b'BLITTYPE'),
'category' / PaddedString(17, 'ascii'),
'url' / PaddedString(129, 'ascii'),
'filetypes' / PrefixedArray(Int8ul, PaddedString(5, 'ascii')),
'icon' / struct_blit_image,
'splash' / struct_blit_image
))
)
struct_blit_bin = Struct(
'header' / Const(b'BLIT'),
'render' / Int32ul,
'update' / Int32ul,
'init' / Int32ul,
'length' / Int32ul,
# The length above is actually the _flash_end symbol from startup_user.s
# it includes the offset into 0x90000000 (external flash)
# we mask out the highest nibble to correct this into the actual bin length
# plus subtract 20 bytes for header, symbol and length dwords
'bin' / Bytes((this.length & 0x0FFFFFFF) - 20)
)
struct_blit_relo = Struct(
'header' / Const(b'RELO'),
'relocs' / PrefixedArray(Int32ul, Struct(
'reloc' / Int32ul
))
)
blit_game = Struct(
'relo' / Optional(struct_blit_relo),
'bin' / RawCopy(struct_blit_bin),
'meta' / Optional(struct_blit_meta)
)
blit_game_with_meta = Struct(
'relo' / Optional(struct_blit_relo),
'bin' / RawCopy(struct_blit_bin),
'meta' / struct_blit_meta
)
blit_game_with_meta_and_relo = Struct(
'relo' / struct_blit_relo,
'bin' / RawCopy(struct_blit_bin),
'meta' / struct_blit_meta
)
blit_icns = Struct(
'header' / Const(b'icns'),
'size' / Rebuild(Int32ub, len_(this.data) + 16),
'type' / Const(b'ic07'), # 128×128 icon in PNG format
'data_length' / Rebuild(Int32ub, len_(this.data) + 8),
'data' / Bytes(this.data_length - 8)
)
struct_blit_image_bi = Struct(
'type' / Const(1, Int16ul), # raw data
'tag' / Const(b'3B'), # 32blit tag
'data' / struct_blit_image
) | PypiClean |
/DrQueueIPython-0.0.1.tar.gz/DrQueueIPython-0.0.1/DrQueue/__init__.py | import platform
import os
import sys
import smtplib
import json
from email.mime.text import MIMEText
from .client import Client
from .job import Job
from .computer import Computer
from .computer_pool import ComputerPool
supported_renderers = ['3delight', '3dsmax', 'aftereffects', 'aqsis', 'blender', 'cinema4d', 'general', 'lightwave', 'luxrender', 'mantra', 'maya', 'mentalray', 'nuke', 'shake', 'terragen', 'turtle', 'vray', 'xsi']
supported_os = ['Windows', 'Mac OSX', 'Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', 'AIX', 'Solaris']
# check DrQueue environment
if os.getenv('DRQUEUE_ROOT') is None:
raise ValueError("DRQUEUE_ROOT environment variable not set")
if os.getenv('DRQUEUE_MASTER') is None:
raise ValueError("DRQUEUE_MASTER environment variable not set")
def check_renderer_support(renderer):
"""Check if renderer is supported."""
if renderer in supported_renderers:
return True
else:
return False
def get_rendertemplate(renderer):
"""Return template filename from renderer name"""
filename = ""
if renderer == '3delight':
filename = '3delight_sg.py'
if renderer == '3dsmax':
filename = '3dsmax_sg.py'
if renderer == 'aftereffects':
filename = 'aftereffects_sg.py'
if renderer == 'aqsis':
filename = 'aqsis_sg.py'
if renderer == 'blender':
filename = 'blender_sg.py'
if renderer == 'cinema4d':
filename = 'cinema4d_sg.py'
if renderer == 'general':
filename = 'general_sg.py'
if renderer == 'lightwave':
filename = 'lightwave_sg.py'
if renderer == 'luxrender':
filename = 'luxrender_sg.py'
if renderer == 'mantra':
filename = 'mantra_sg.py'
if renderer == 'maya':
filename = 'maya_sg.py'
if renderer == 'mentalray':
filename = 'mentalray_sg.py'
if renderer == 'nuke':
filename = 'nuke_sg.py'
if renderer == 'pixie':
filename = 'pixie_sg.py'
if renderer == 'shake':
filename = 'shake_sg.py'
if renderer == 'terragen':
filename = 'terragen_sg.py'
if renderer == 'turtle':
filename = 'turtle_sg.py'
if renderer == 'vray':
filename = 'vray_sg.py'
if renderer == 'xsi':
filename = 'xsi_sg.py'
return filename
def get_osname():
"""Return operating system name"""
osname = platform.system()
if osname == 'Darwin':
osname = 'Mac OSX'
return osname
def run_script_with_env(render_script, env_dict):
"""Run template script on IPython engine"""
import platform, os, sys
# set some variables on target machine
env_dict['DRQUEUE_OS'] = platform.system()
env_dict['DRQUEUE_ETC'] = os.path.join(os.getenv('DRQUEUE_ROOT'), "etc")
env_dict['DRQUEUE_LOGFILE'] = os.path.join(os.getenv('DRQUEUE_ROOT'), "logs", env_dict['DRQUEUE_LOGFILE'])
# import specific render template
sys.path.append(env_dict['DRQUEUE_ETC'])
exec("import " + render_script.replace('.py', '') + " as template")
# run template with env_dict
status = template.run_renderer(env_dict)
return status
def check_deps(dep_dict):
"""Run all dependency checking functions."""
if ('os_name' in dep_dict) and (engine_has_os(dep_dict['os_name']) == False):
return False
elif ('minram' in dep_dict) and (engine_has_minram(dep_dict['minram']) == False):
return False
elif ('mincores' in dep_dict) and (engine_has_mincores(dep_dict['mincores']) == False):
return False
elif ('pool_name' in dep_dict) and (engine_is_in_pool(dep_dict['pool_name']) == False):
return False
else:
return True
def engine_is_in_pool(pool_name):
"""Check if engine belongs to certain pool."""
computer_name = Computer.get_hostname()
computers = ComputerPool.query_pool_members(pool_name)
if computer_name in computers:
belongs = True
else:
return False
def engine_has_os(os_name):
"""Check if engine is running on certain OS."""
running_os = get_osname()
if os_name == running_os:
return True
else:
return False
def engine_has_minram(minram):
"""Check if engine has at least minram GB RAM."""
mem = Computer.get_memory()
if mem >= minram:
return True
else:
return False
def engine_has_mincores(mincores):
"""Check if engine has at least mincores CPU cores."""
ncpus = Computer.get_ncpus()
ncorescpu = Computer.get_ncorescpu()
cores = ncpus * ncorescpu
if cores >= mincores:
return True
else:
return False
def send_email(job_name, recipients):
"""Notify recipients about finish of job."""
# load email configuration
user_dir = os.path.expanduser("~")
config_file = os.path.join(user_dir, ".drqueue", "email_config.json")
try:
fp = open(config_file, "rb")
except:
print("Email configuration could not be loaded.")
try:
config = json.load(fp)
except:
print("Email configuration could not be parsed.")
print(config)
mail_from = config['from']
body_text = "Your render job \"%s\" is finished." % job_name
# Create a text/plain message
msg = MIMEText(body_text)
# subject, sender and recipients
msg['Subject'] = "Job \"%s\" is finished" % job_name
msg['From'] = mail_from
msg['To'] = recipients
if config['smtp_ssl'] == "1":
# connect via SSL
smtp = smtplib.SMTP_SSL(config['smtp_server'], int(config['smtp_port']))
else:
# connect without SSL
smtp = smtplib.SMTP(config['smtp_server'], int(config['smtp_port']))
# start TLS encryption
if config['smtp_tls'] == "1":
smtp.starttls()
if config['smtp_auth'] == "1":
# authenticate if required
smtp.login(config['smtp_user'], config['smtp_passwd'])
try:
smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except:
print("Email could not be sent.")
smtp.quit() | PypiClean |
/DB13-898998998.tar.gz/DB13-898998998/src/DIRACbenchmark/dirac_benchmark.py | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import sys
import random
import multiprocessing
VERSION = "1.0.0 DB12"
if sys.version_info[0] >= 3:
#pylint: disable = E, W, R, C
long = int
xrange = range
import urllib.request
urllib = urllib.request
else:
import urllib
def single_dirac_benchmark(iterations_num=1, measured_copies=None):
"""Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12)"""
# This number of iterations corresponds to 1kHS2k.seconds, i.e. 250 HS06 seconds
iters = int(1000 * 1000 * 12.5)
calib = 250.0
m_1 = long(0)
m_2 = long(0)
p_1 = 0
p_2 = 0
# Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration)
# Do one or more extra iterations to avoid tail effects when copies run in parallel
it_1 = 0
while (it_1 <= iterations_num) or (
measured_copies is not None and measured_copies.value > 0
):
if it_1 == 1:
start = os.times()
# Now the iterations
for _j in xrange(iters):
t_1 = random.normalvariate(10, 1)
m_1 += t_1
m_2 += t_1 * t_1
p_1 += t_1
p_2 += t_1 * t_1
if it_1 == iterations_num:
end = os.times()
if measured_copies is not None:
# Reduce the total of running copies by one
measured_copies.value -= 1
it_1 += 1
cput = sum(end[:4]) - sum(start[:4])
wall = end[4] - start[4]
if not cput:
return None
# Return DIRAC-compatible values
return {
"CPU": cput,
"WALL": wall,
"NORM": calib * iterations_num / cput,
"UNIT": "DB12",
}
def single_dirac_benchmark_process(result_object, iterations_num=1, measured_copies=None):
"""Run single_dirac_benchmark() in a multiprocessing friendly way"""
benchmark_result = single_dirac_benchmark(
iterations_num=iterations_num, measured_copies=measured_copies
)
if not benchmark_result or "NORM" not in benchmark_result:
return
# This makes it easy to use with multiprocessing.Process
result_object.value = benchmark_result["NORM"]
def multiple_dirac_benchmark(copies=1, iterations_num=1, extra_iter=False):
"""Run multiple copies of the DIRAC Benchmark in parallel"""
processes = []
results = []
if extra_iter:
# If true, then we run one or more extra iterations in each
# copy until the number still being meausured is zero.
measured_copies = multiprocessing.Value("i", copies)
else:
measured_copies = None
# Set up all the subprocesses
for i in xrange(copies):
results.append(multiprocessing.Value("d", 0.0))
processes.append(
multiprocessing.Process(
target=single_dirac_benchmark_process,
args=(results[i], iterations_num, measured_copies),
)
)
# Start them all off at the same time
for process in processes:
process.start()
# Wait for them all to finish
for process in processes:
process.join()
raw = []
product = 1.0
for res in results:
raw.append(res.value)
product *= res.value
raw.sort()
# Return the list of raw results and various averages
return {
"raw": raw,
"copies": copies,
"sum": sum(raw),
"arithmetic_mean": sum(raw) / copies,
"geometric_mean": product ** (1.0 / copies),
"median": raw[(copies - 1) // 2],
}
def wholenode_dirac_benchmark(copies=None, iterations_num=1, extra_iter=False):
"""Run as many copies as needed to occupy the whole machine"""
# If not given by caller then just count CPUs
if copies is None:
try:
copies = multiprocessing.cpu_count()
except: # pylint: disable=bare-except
copies = 1
return multiple_dirac_benchmark(
copies=copies, iterations_num=iterations_num, extra_iter=extra_iter
)
def jobslot_dirac_benchmark(copies=None, iterations_num=1, extra_iter=False):
"""Run as many copies as needed to occupy the job slot"""
# If not given by caller then just run one copy
if copies is None:
copies = 1
return multiple_dirac_benchmark(
copies=copies, iterations_num=iterations_num, extra_iter=extra_iter
)
def main():
HELP_STRING = """dirac_benchmark.py [--iterations ITERATIONS] [--extra-iteration]
[COPIES|single|wholenode|jobslot|version|help]
Uses the functions within dirac_benchmark.py to run the DB12 benchmark from the
command line.
By default one benchmarking iteration is run, in addition to the initial
iteration which DB12 runs and ignores to avoid ramp-up effects at the start.
The number of benchmarking iterations can be increased using the --iterations
option. Additional iterations which are also ignored can be added with the
--extra-iteration option to avoid tail effects. In this case copies which
finish early run additional iterations until all the measurements finish.
The COPIES (ie an integer) argument causes multiple copies of the benchmark to
be run in parallel. The tokens "wholenode", "jobslot" and "single" can be
given instead to use $MACHINEFEATURES/total_cpu, $JOBFEATURES/allocated_cpu,
or 1 as the number of copies respectively. If $MACHINEFEATURES/total_cpu is
not available, then the number of (logical) processors visible to the
operating system is used.
Unless the token "single" is used, the script prints the following results to
two lines on stdout:
COPIES SUM ARITHMETIC-MEAN GEOMETRIC-MEAN MEDIAN
RAW-RESULTS
The tokens "version" and "help" print information about the script.
The source code of dirac_benchmark.py provides examples of how the functions
within dirac_benchmark.py can be used by other Python programs.
dirac_benchmark.py is distributed from https://github.com/DIRACGrid/DB12
"""
COPIES = None
ITERATIONS = 1
EXTRA_ITERATION = False
for arg in sys.argv[1:]:
if arg.startswith("--iterations="):
ITERATIONS = int(arg[13:])
elif arg == "--extra-iteration":
EXTRA_ITERATION = True
elif arg in ("--help", "help"):
print(HELP_STRING)
sys.exit(0)
elif not arg.startswith("--"):
COPIES = arg
if COPIES == "version":
print(VERSION)
sys.exit(0)
if COPIES is None or COPIES == "single":
print(single_dirac_benchmark()["NORM"])
sys.exit(0)
if COPIES == "wholenode":
RESULT = wholenode_dirac_benchmark(
iterations_num=ITERATIONS, extra_iter=EXTRA_ITERATION
)
print(
RESULT["copies"],
RESULT["sum"],
RESULT["arithmetic_mean"],
RESULT["geometric_mean"],
RESULT["median"],
)
print(" ".join([str(j) for j in RESULT["raw"]]))
sys.exit(0)
if COPIES == "jobslot":
RESULT = jobslot_dirac_benchmark(
iterations_num=ITERATIONS, extra_iter=EXTRA_ITERATION
)
print(
RESULT["copies"],
RESULT["sum"],
RESULT["arithmetic_mean"],
RESULT["geometric_mean"],
RESULT["median"],
)
print(" ".join([str(j) for j in RESULT["raw"]]))
sys.exit(0)
RESULT = multiple_dirac_benchmark(
copies=int(COPIES), iterations_num=ITERATIONS, extra_iter=EXTRA_ITERATION
)
print(
RESULT["copies"],
RESULT["sum"],
RESULT["arithmetic_mean"],
RESULT["geometric_mean"],
RESULT["median"],
)
print(" ".join([str(k) for k in RESULT["raw"]]))
sys.exit(0)
#
# If we run as a command
#
if __name__ == "__main__":
main() | PypiClean |
/Mopidy-Grooveshark-1.0.4.tar.gz/Mopidy-Grooveshark-1.0.4/mopidy_grooveshark/backend.py | from __future__ import unicode_literals
import pykka
from urlparse import urlparse
from multiprocessing.pool import ThreadPool
from mopidy import backend
from mopidy.models import Track
from mopidy.models import Album
from mopidy.models import SearchResult
from grooveshark import Client
from mopidy_grooveshark import logger
def get_track(song):
"""
Returns a Mopidy track from a Grooveshark song object.
"""
try:
length = int(song.duration.split('.')[0]) * 1000
if length == 0:
logger.debug("Grooveshark report 0 duration for: %s", song)
return None
return Track(
name=song.name,
comment=song.artist.name,
length=length,
album=Album(
name=song.album.name,
images=[song.album.cover._url]
),
uri=song.stream.url,
)
except TypeError:
logger.debug("Grooveshark API error for: %s", song)
return None
def play_a_song(uri):
"""
Play a song from Grooveshark. Needs to have a token.
http://grooveshark.com/s/Because+Of+You/4DYDAi
Token: 4DYDAi
"""
logger.debug("Playing Grooveshark Song '%s'", uri)
# Get token from uri
token = urlparse(uri).path.split('?')[0].split('/')[-1]
client = Client()
client.init()
song = client.get_song_by_token(token)
return get_track(song)
def play_a_playlist(uri):
"""
Play a playlist from Grooveshark.
http://grooveshark.com/playlist/Office+Hours+Jazz/19537110
Playlist ID: 19537110
"""
logger.debug("Playing Grooveshark Playlist '%s'", uri)
# Get playlist_id from uri
playlist_id = urlparse(uri).path.split('?')[0].split('/')[-1]
client = Client()
client.init()
playlist = client.playlist(playlist_id)
resolve_pool = ThreadPool(processes=16)
playlist = resolve_pool.map(get_track, playlist.songs)
resolve_pool.close()
return [song for song in playlist if song]
def search_grooveshark(query):
"""
Makes a search on Grooveshark and return the songs.
"""
logger.debug("Searching Grooveshark for: '%s'", query)
client = Client()
client.init()
resolve_pool = ThreadPool(processes=16)
track_list = resolve_pool.map(get_track, client.search(query))
resolve_pool.close()
return [song for song in track_list if song]
class GroovesharkBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(GroovesharkBackend, self).__init__()
self.config = config
self.uri_schemes = ['grooveshark', 'gs']
self.library = GroovesharkLibraryProvider(backend=self)
class GroovesharkLibraryProvider(backend.LibraryProvider):
def lookup(self, uri):
# Clean the prefix
if uri.startswith('gs:'):
uri = uri[3:]
elif uri.startswith('grooveshark:'):
uri = uri[12:]
# Remove hashbang
if '/#!' in uri:
uri = uri.replace('/#!', '')
# Play a song or playlist
if '//grooveshark.com/s/' in uri:
return [play_a_song(uri)]
elif '//grooveshark.com/playlist/' in uri:
return play_a_playlist(uri)
def search(self, query=None, uris=None, exact=False):
return SearchResult(
uri='grooveshark:search',
tracks=search_grooveshark(' '.join(query.values()[0])),
) | PypiClean |
/Mezzanine-6.0.0.tar.gz/Mezzanine-6.0.0/mezzanine/core/management/commands/createdb.py | from socket import gethostname
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
from mezzanine.conf import settings
from mezzanine.utils.tests import copy_test_to_media
DEFAULT_USERNAME = "admin"
DEFAULT_EMAIL = "example@example.com"
DEFAULT_PASSWORD = "default"
class Command(BaseCommand):
help = "Performs initial Mezzanine database setup."
can_import_settings = True
def add_arguments(self, parser):
"""
Adds extra command options (executed only by Django >= 1.8).
"""
parser.add_argument(
"--nodata", action="store_true", dest="nodata", help="Do not add demo data."
)
parser.add_argument(
"--noinput",
action="store_false",
dest="interactive",
help="Do not prompt the user for input of any kind.",
)
def handle(self, **options):
if "conf_setting" in connection.introspection.table_names():
raise CommandError(
"Database already created, you probably " "want the migrate command"
)
self.verbosity = int(options.get("verbosity", 0))
self.interactive = int(options.get("interactive", 0))
self.no_data = int(options.get("nodata", 0))
call_command("migrate", verbosity=self.verbosity, interactive=self.interactive)
mapping = [
[self.create_site, ["django.contrib.sites"]],
[self.create_user, ["django.contrib.auth"]],
[self.translation_fields, ["modeltranslation"]],
[
self.create_pages,
[
"mezzanine.pages",
"mezzanine.forms",
"mezzanine.blog",
"mezzanine.galleries",
],
],
[self.create_shop, ["cartridge.shop"]],
]
for func, apps in mapping:
if set(apps).issubset(set(settings.INSTALLED_APPS)):
func()
def confirm(self, prompt):
if not self.interactive:
return True
confirm = input(prompt)
while confirm not in ("yes", "no"):
confirm = input("Please enter either 'yes' or 'no': ")
return confirm == "yes"
def create_site(self):
domain = "127.0.0.1:8000" if settings.DEBUG else gethostname()
if self.interactive:
entered = input(
"\nA site record is required.\nPlease "
"enter the domain and optional port in "
"the format 'domain:port'.\nFor example "
"'localhost:8000' or 'www.example.com'. "
"\nHit enter to use the default (%s): " % domain
)
if entered:
domain = entered.strip("': ")
if self.verbosity >= 1:
print("\nCreating default site record: %s ...\n" % domain)
try:
site = Site.objects.get()
except Site.DoesNotExist:
site = Site()
site.name = "Default"
site.domain = domain
site.save()
def create_user(self):
User = get_user_model()
if not settings.DEBUG or User.objects.count() > 0:
return
if self.interactive:
if self.verbosity >= 1:
print("\nCreating default account ...\n")
call_command("createsuperuser")
else:
if self.verbosity >= 1:
print(
"\nCreating default account "
"(username: %s / password: %s) ...\n"
% (DEFAULT_USERNAME, DEFAULT_PASSWORD)
)
args = (DEFAULT_USERNAME, DEFAULT_EMAIL, DEFAULT_PASSWORD)
User.objects.create_superuser(*args)
def create_pages(self):
call_command("loaddata", "mezzanine_required.json")
install_optional = not self.no_data and self.confirm(
"\nWould you like to install some initial "
"demo pages?\nEg: About us, Contact form, "
"Gallery. (yes/no): "
)
if install_optional:
if self.verbosity >= 1:
print("\nCreating demo pages: About us, Contact form, " "Gallery ...\n")
from mezzanine.galleries.models import Gallery
call_command("loaddata", "mezzanine_optional.json")
zip_name = "gallery.zip"
copy_test_to_media("mezzanine.core", zip_name)
gallery = Gallery.objects.get()
gallery.zip_import = zip_name
gallery.save()
def create_shop(self):
call_command("loaddata", "cartridge_required.json")
install_optional = not self.no_data and self.confirm(
"\nWould you like to install an initial "
"demo product and sale? (yes/no): "
)
if install_optional:
if self.verbosity >= 1:
print("\nCreating demo product and sale ...\n")
call_command("loaddata", "cartridge_optional.json")
copy_test_to_media("cartridge.shop", "product")
def translation_fields(self):
try:
from modeltranslation.management.commands import (
sync_translation_fields as create_fields,
)
from modeltranslation.management.commands import (
update_translation_fields as update_fields,
)
except ImportError:
return
update = self.confirm(
"\nDjango-modeltranslation is installed for "
"this project and you have specified to use "
"i18n.\nWould you like to update translation "
"fields from the default ones? (yes/no): "
)
if update:
options = {
"verbosity": self.verbosity,
"interactive": self.interactive,
}
call_command(create_fields.Command(), **options)
call_command(update_fields.Command(), **options) | PypiClean |
/Flask-Monitoring-1.1.2.tar.gz/Flask-Monitoring-1.1.2/flask_monitoringdashboard/core/reporting/questions/average_latency.py | from flask_monitoringdashboard.core.reporting.mean_permutation_test import mean_permutation_test
import numpy as np
from flask_monitoringdashboard.core.reporting.questions.report_question import Answer, ReportQuestion
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.database.request import get_latencies_sample
class AverageLatencyAnswer(Answer):
def __init__(self, is_significant, comparison_interval_latencies_sample=None,
compared_to_interval_latencies_sample=None, percentual_diff=None, comparison_interval_avg=None,
compared_to_interval_avg=None):
super().__init__('AVERAGE_LATENCY')
self._is_significant = is_significant
self._comparison_interval_latencies_sample = comparison_interval_latencies_sample
self._compared_to_interval_latencies_sample = compared_to_interval_latencies_sample
self._percentual_diff = percentual_diff
self._compared_to_interval_avg = compared_to_interval_avg
self._comparison_interval_avg = comparison_interval_avg
def meta(self):
return dict(
latencies_sample=dict(
comparison_interval=self._comparison_interval_latencies_sample,
compared_to_interval=self._compared_to_interval_latencies_sample
),
comparison_average=self._comparison_interval_avg,
compared_to_average=self._compared_to_interval_avg,
percentual_diff=self._percentual_diff,
)
def is_significant(self):
return self._is_significant
class AverageLatency(ReportQuestion):
def get_answer(self, endpoint, comparison_interval, compared_to_interval):
with session_scope() as db_session:
comparison_interval_latencies_sample = get_latencies_sample(db_session, endpoint.id, comparison_interval)
compared_to_interval_latencies_sample = get_latencies_sample(db_session, endpoint.id, compared_to_interval)
if len(comparison_interval_latencies_sample) == 0 or len(compared_to_interval_latencies_sample) == 0:
return AverageLatencyAnswer(is_significant=False)
comparison_interval_avg = np.average(comparison_interval_latencies_sample)
compared_to_interval_avg = np.average(compared_to_interval_latencies_sample)
percentual_diff = (comparison_interval_avg - compared_to_interval_avg) / compared_to_interval_avg * 100
p_value = mean_permutation_test(comparison_interval_latencies_sample,
compared_to_interval_latencies_sample,
num_rounds=1000)
is_significant = abs(float(percentual_diff)) > 30 and p_value < 0.05
return AverageLatencyAnswer(
is_significant=is_significant,
percentual_diff=percentual_diff,
# Sample latencies
comparison_interval_latencies_sample=comparison_interval_latencies_sample,
compared_to_interval_latencies_sample=compared_to_interval_latencies_sample,
# Latency averages
comparison_interval_avg=comparison_interval_avg,
compared_to_interval_avg=compared_to_interval_avg
) | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/detectors/kd_one_stage.py | from pathlib import Path
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value) | PypiClean |
/Airfoil_Optimizer-0.9.0-py3-none-any.whl/af_opt/problem.py | import numpy as np
import openmdao.api as om
import sys
import time
from datetime import timedelta
from differential_evolution import DifferentialEvolutionDriver
from . import rank, run_parallel
from .components.airfoil import cst2coords
from .model import AfOptModel
from .recorders import PopulationReporter
def get_de_driver(
gen=100,
tolx=1e-8,
tolf=1e-8,
strategy="rand-to-best/1/exp/random",
f=None,
cr=None,
adaptivity=2,
):
kwargs = dict(
run_parallel=run_parallel,
adaptivity=adaptivity,
max_gen=gen,
tolx=tolx,
tolf=tolf,
strategy=strategy,
show_progress=True,
)
if f is not None:
kwargs.update({"Pm": f})
if cr is not None:
kwargs.update({"Pc": cr})
driver = DifferentialEvolutionDriver(**kwargs)
return driver
def get_coords(prob):
"""
Get the coordinates of the airfoil represented by the current state of the airfoil optimization problem.
Parameters
----------
prob : openmdao.api.Problem
Airfoil optimization problem
Returns
-------
np.ndarray
(n, 2) array of x-, and y-coordinates of the airfoil in counterclockwise direction
"""
x, y_u, y_l, _, _ = cst2coords(prob["a_ca"], prob["a_th"], prob["t_te"])
x = np.reshape(x, (-1, 1))
y_u = np.reshape(y_u, (-1, 1))
y_l = np.reshape(y_l, (-1, 1))
coords_u = np.concatenate((x, y_u), axis=1)
coords_l = np.concatenate((x, y_l), axis=1)
coords = np.concatenate((np.flip(coords_u[1:], axis=0), coords_l))
return coords
def plot(prob, display=False):
"""
Plot the airfoil represented by the current state of the airfoil optimization problem.
Parameters
----------
prob : openmdao.api.Problem
Airfoil optimization problem
display : bool, optional
True if the figure should be displayed. False by default
Returns
-------
figure
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
fig, ax = plt.subplots()
x, y_u, y_l, y_c, _ = cst2coords(prob["a_ca"], prob["a_th"], prob["t_te"])
ax.plot(x, y_u, "k", x, y_l, "k", x, y_c, "k--")
ax.axis("scaled")
ax.set_xlabel("x/c")
ax.set_ylabel("y/c")
ax.xaxis.set_major_locator(MultipleLocator(0.2))
ax.xaxis.set_minor_locator(MultipleLocator(0.05))
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.05))
ax.grid(which="both")
if display:
fig.show()
return fig
def write(prob, filename):
"""
Write airfoil coordinates represented by the current state of the airfoil optimization problem to a file
Parameters
----------
prob : openmdao.api.Problem
Airfoil optimization problem
filename : str
Filename
"""
coords = get_coords(prob)
fmt_str = 2 * ("{: >" + str(6 + 1) + "." + str(6) + "f} ") + "\n"
with open(filename, "w") as f:
for i in range(coords.shape[0]):
f.write(fmt_str.format(coords[i, 0], coords[i, 1]))
def main(
cl,
re,
ma,
n_ca,
n_th,
gen=100,
tolx=1e-8,
tolf=1e-8,
fix_te=True,
t_te_min=0.0,
t_c_min=0.01,
r_le_min=0.05,
A_cs_min=None,
A_bins_min=None,
Cm_max=None,
strategy="rand-to-best/1/exp/random",
f=None,
cr=None,
adaptivity=2,
repr_file="repr.yml",
dat_file="optimized.dat",
png_file="optimized.png",
):
"""
Create, analyze, optimize airfoil, and write optimized coordinates to a file. Then clean the problem up and exit.
Parameters
----------
cl : float
Design lift coefficient
re : float
Reynolds number
ma : float
Mach number
n_ca, n_th : int
Number of CST coefficients for the chord line and thickness distribution, respectively
gen : int, optional
Number of generations to use for the genetic algorithm. 100 by default
tolx : float, optional
Tolerance on the spread of the design vectors.
tolf: float, optional
Tolerance on the spread of objective functions.
fix_te : bool, optional
True if the trailing edge thickness should be fixed. True by default
t_te_min : float, optional
Minimum TE thickness as fraction of chord length. Default is 0.0.
t_c_min : float or None, optional
Minimum thickness over chord ratio. None if unconstrained. Defaults is 0.01.
r_le_min : float or None, optional
Minimum leading edge radius. None if unconstrained. Defaults is 0.05.
A_cs_min : float or None, optional
Minimum cross sectional area. None if unconstrained. Default is None.
A_bins_min : float or None, optional
Minimum relative area of the airfoil in each bin along the chord. None if unconstrained. Default is None.
Cm_max : float or None, optional
Maximum absolute moment coefficient. None if unconstrained. Default is None.
strategy : string, optional
Evolution strategy to use. Default is 'rand-to-best/1/exp/random'.
f : float or None, optional
Mutation rate
cr : float or None, optional
Crossover rate
adaptivity : 0, 1, or 2
Which kind of self-adaptivity to ue (0: none, 1: simple, 2: complex)
repr_file, dat_file, png_file : str, optional
Paths where the final representation, optimized airfoil coordinates, and output image should be saved.
"""
# Construct the OpenMDAO Problem
kwargs = dict(
n_ca=n_ca,
n_th=n_th,
fix_te=fix_te,
t_te_min=t_te_min,
t_c_min=t_c_min,
r_le_min=r_le_min,
A_cs_min=A_cs_min,
A_bins_min=A_bins_min,
Cm_max=Cm_max,
)
prob = om.Problem()
prob.model = AfOptModel(**kwargs)
prob.driver = get_de_driver(gen, tolx, tolf, strategy, f, cr, adaptivity)
prob.driver.add_recorder(PopulationReporter())
prob.setup()
# Set reference values
prob["Cl_des"] = cl
prob["Re"] = re
prob["M"] = ma
# Optimize the problem using a genetic algorithm
t0 = time.time()
prob.run_driver()
dt = time.time() - t0
# Show and write final results
if rank == 0:
yaml = prob.model.__repr__()
print("Optimized airfoil:")
print(" " + yaml.replace("\n", "\n "))
print(f"Time Elapsed: {timedelta(seconds=dt)}")
with open(repr_file, "w") as f:
f.write(yaml)
write(prob, filename=dat_file)
fig = plot(prob)
fig.savefig(png_file)
# Clean up and exit
prob.cleanup()
del prob
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv) == 23:
main(
cl=float(sys.argv[1]),
re=float(sys.argv[2]),
ma=float(sys.argv[3]),
n_ca=int(sys.argv[4]),
n_th=int(sys.argv[5]),
gen=int(sys.argv[6]),
tolx=float(sys.argv[7]),
tolf=float(sys.argv[8]),
fix_te=(sys.argv[9] == "True"),
t_te_min=float(sys.argv[10]),
t_c_min=None if sys.argv[11] == "None" else float(sys.argv[11]),
r_le_min=None if sys.argv[12] == "None" else float(sys.argv[12]),
A_cs_min=None if sys.argv[13] == "None" else float(sys.argv[13]),
A_bins_min=None if sys.argv[14] == "None" else float(sys.argv[14]),
Cm_max=None if sys.argv[15] == "None" else float(sys.argv[15]),
strategy=sys.argv[16],
f=None if sys.argv[17] == "None" else float(sys.argv[17]),
cr=None if sys.argv[18] == "None" else float(sys.argv[18]),
adaptivity=int(sys.argv[19]),
repr_file=sys.argv[20],
dat_file=sys.argv[21],
png_file=sys.argv[22],
)
else:
main(1.0, 1e6, 0.0, 3, 3, gen=9) | PypiClean |
/ExifReader-0.1.1-py3-none-any.whl/exifreader/__init__.py | from .exif_log import get_logger
from .classes import *
from .tags import *
from .utils import ord_
from .heic import HEICExifFinder
__version__ = '0.1.1'
logger = get_logger()
def increment_base(data, base):
return ord_(data[base + 2]) * 256 + ord_(data[base + 3]) + 2
def process_file(f, stop_tag=DEFAULT_STOP_TAG, details=True, strict=False,
debug=False, truncate_tags=True, auto_seek=True):
"""
Process an image file (expects an open file object).
This is the function that has to deal with all the arbitrary nasty bits
of the EXIF standard.
"""
if auto_seek:
f.seek(0)
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:2] in [b'II', b'MM']:
# it's a TIFF file
logger.debug("TIFF format recognized in data[0:2]")
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[4:12] == b'ftypheic':
f.seek(0)
heic = HEICExifFinder(f)
offset, endian = heic.find_exif()
elif data[0:2] == b'\xFF\xD8':
# it's a JPEG file
logger.debug("JPEG format recognized data[0:2]=0x%X%X",
ord_(data[0]),
ord_(data[1]))
base = 2
logger.debug("data[2]=0x%X data[3]=0x%X data[6:10]=%s",
ord_(data[2]), ord_(data[3]), data[6:10])
while ord_(data[2]) == 0xFF and data[6:10] in [b'JFIF', b'JFXX',
b'OLYM', b'Phot']:
length = ord_(data[4]) * 256 + ord_(data[5])
logger.debug(" Length offset is %s", length)
f.read(length - 8)
# fake an EXIF beginning of file
# I don't think this is used. --gd
data = b'\xFF\x00' + f.read(10)
fake_exif = 1
if base > 2:
logger.debug(" Added to base")
base = base + length + 4 - 2
else:
logger.debug(" Added to zero")
base = length + 4
logger.debug(" Set segment base to 0x%X", base)
# Big ugly patch to deal with APP2 (or other) data coming before APP1
f.seek(0)
# in theory, this could be insufficient since 64K is the maximum size--gd
data = f.read(base + 4000)
# base = 2
while 1:
logger.debug(" Segment base 0x%X", base)
if data[base:base + 2] == b'\xFF\xE1':
# APP1
logger.debug(" APP1 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
if data[base + 4:base + 8] == b"Exif":
logger.debug("Decrement base by 2 to get to pre-segment header"
"(for compatibility with later code)")
base -= 2
break
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE0':
# APP0
logger.debug(" APP0 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE2':
# APP2
logger.debug(" APP2 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEE':
# APP14
logger.debug(" APP14 Adobe segment at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(" There is useful EXIF-like data here, but we have no parser for it.")
elif data[base:base + 2] == b'\xFF\xDB':
logger.debug(" JPEG image data at base 0x%X No more segments are expected.",
base)
break
elif data[base:base + 2] == b'\xFF\xD8':
# APP12
logger.debug(" FFD8 segment at base 0x%X", base)
logger.debug(" Got 0x%X 0x%X and %s instead",
ord_(data[base]),
ord_(data[base + 1]),
data[4 + base:10 + base])
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEC':
# APP12
logger.debug(" APP12 XMP (Ducky) or Pictureinfo segment at base 0x%X",
base)
logger.debug(" Got 0x%X and 0x%X instead", ord_(data[base]),
ord_(data[base + 1]))
logger.debug(" Length: 0x%X 0x%X",
ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug("Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(
"There is useful EXIF-like data here (quality, comment, copyright),"
"but we have no parser for it.")
else:
try:
increment = increment_base(data, base)
logger.debug("Got 0x%X and 0x%X instead",
ord_(data[base]),
ord_(data[base + 1]))
except IndexError:
logger.debug("Unexpected/unhandled segment type or file content.")
return {}
else:
logger.debug(" Increment base by %s", increment)
base += increment
f.seek(base + 12)
if ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base] == b'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
# HACK TEST: endian = 'M'
elif ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Ducky':
# detected Ducky header.
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
elif ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Adobe':
# detected APP14 (Adobe)
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
logger.debug("No EXIF header expected data[2+base]==0xFF and"
"data[6+base:10+base]===Exif (or Duck)")
logger.debug("Did get 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
return {}
else:
# file format not recognized
logger.debug("File format not recognized.")
return {}
endian = chr(ord_(endian[0]))
# deal with the EXIF info we found
logger.debug("Endian format is %s (%s)", endian, {
'I': 'Intel',
'M': 'Motorola',
'\x01': 'Adobe Ducky',
'd': 'XMP/Adobe unknown'
}[endian])
hdr = ExifHeader(f, endian, offset, fake_exif, strict, debug, details, truncate_tags)
ifd_list = hdr.list_ifd()
thumb_ifd = False
ctr = 0
for ifd in ifd_list:
if ctr == 0:
ifd_name = 'Image'
elif ctr == 1:
ifd_name = 'Thumbnail'
thumb_ifd = ifd
else:
ifd_name = 'IFD %d' % ctr
logger.debug('IFD %d (%s) at offset %s:', ctr, ifd_name, ifd)
hdr.dump_ifd(ifd, ifd_name, stop_tag=stop_tag)
ctr += 1
# EXIF IFD
exif_off = hdr.tags.get('Image ExifOffset')
if exif_off:
logger.debug('Exif SubIFD at offset %s:', exif_off.values[0])
hdr.dump_ifd(exif_off.values[0], 'EXIF', stop_tag=stop_tag)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if details and 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags:
hdr.decode_maker_note()
# extract thumbnails
if details and thumb_ifd:
hdr.extract_tiff_thumbnail(thumb_ifd)
hdr.extract_jpeg_thumbnail()
# parse XMP tags (experimental)
if debug and details:
xmp_string = b''
# Easy we already have them
if 'Image ApplicationNotes' in hdr.tags:
logger.debug('XMP present in Exif')
xmp_string = make_string(hdr.tags['Image ApplicationNotes'].values)
# We need to look in the entire file for the XML
else:
logger.debug('XMP not in Exif, searching file for XMP info...')
xml_started = False
xml_finished = False
for line in f:
open_tag = line.find(b'<x:xmpmeta')
close_tag = line.find(b'</x:xmpmeta>')
if open_tag != -1:
xml_started = True
line = line[open_tag:]
logger.debug('XMP found opening tag at line position %s' % open_tag)
if close_tag != -1:
logger.debug('XMP found closing tag at line position %s' % close_tag)
line_offset = 0
if open_tag != -1:
line_offset = open_tag
line = line[:(close_tag - line_offset) + 12]
xml_finished = True
if xml_started:
xmp_string += line
if xml_finished:
break
logger.debug('XMP Finished searching for info')
if xmp_string:
hdr.parse_xmp(xmp_string)
return hdr.tags | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/lib/js/plugins/nonbreaking/plugin.js | (function () {
'use strict';
var global = tinymce.util.Tools.resolve('tinymce.PluginManager');
var getKeyboardSpaces = function (editor) {
var spaces = editor.getParam('nonbreaking_force_tab', 0);
if (typeof spaces === 'boolean') {
return spaces === true ? 3 : 0;
} else {
return spaces;
}
};
var wrapNbsps = function (editor) {
return editor.getParam('nonbreaking_wrap', true, 'boolean');
};
var stringRepeat = function (string, repeats) {
var str = '';
for (var index = 0; index < repeats; index++) {
str += string;
}
return str;
};
var isVisualCharsEnabled = function (editor) {
return editor.plugins.visualchars ? editor.plugins.visualchars.isEnabled() : false;
};
var insertNbsp = function (editor, times) {
var classes = function () {
return isVisualCharsEnabled(editor) ? 'mce-nbsp-wrap mce-nbsp' : 'mce-nbsp-wrap';
};
var nbspSpan = function () {
return '<span class="' + classes() + '" contenteditable="false">' + stringRepeat(' ', times) + '</span>';
};
var shouldWrap = wrapNbsps(editor);
var html = shouldWrap || editor.plugins.visualchars ? nbspSpan() : stringRepeat(' ', times);
editor.undoManager.transact(function () {
return editor.insertContent(html);
});
};
var register = function (editor) {
editor.addCommand('mceNonBreaking', function () {
insertNbsp(editor, 1);
});
};
var global$1 = tinymce.util.Tools.resolve('tinymce.util.VK');
var setup = function (editor) {
var spaces = getKeyboardSpaces(editor);
if (spaces > 0) {
editor.on('keydown', function (e) {
if (e.keyCode === global$1.TAB && !e.isDefaultPrevented()) {
if (e.shiftKey) {
return;
}
e.preventDefault();
e.stopImmediatePropagation();
insertNbsp(editor, spaces);
}
});
}
};
var register$1 = function (editor) {
editor.ui.registry.addButton('nonbreaking', {
icon: 'non-breaking',
tooltip: 'Nonbreaking space',
onAction: function () {
return editor.execCommand('mceNonBreaking');
}
});
editor.ui.registry.addMenuItem('nonbreaking', {
icon: 'non-breaking',
text: 'Nonbreaking space',
onAction: function () {
return editor.execCommand('mceNonBreaking');
}
});
};
function Plugin () {
global.add('nonbreaking', function (editor) {
register(editor);
register$1(editor);
setup(editor);
});
}
Plugin();
}()); | PypiClean |
/ENCODEQueryTools-0.1.1.tar.gz/ENCODEQueryTools-0.1.1/docs/html/_static/sidebar.js | $(function() {
// global elements used by the functions.
// the 'sidebarbutton' element is defined as global after its
// creation, in the add_sidebar_button function
var bodywrapper = $('.bodywrapper');
var sidebar = $('.sphinxsidebar');
var sidebarwrapper = $('.sphinxsidebarwrapper');
// for some reason, the document has no sidebar; do not run into errors
if (!sidebar.length) return;
// original margin-left of the bodywrapper and width of the sidebar
// with the sidebar expanded
var bw_margin_expanded = bodywrapper.css('margin-left');
var ssb_width_expanded = sidebar.width();
// margin-left of the bodywrapper and width of the sidebar
// with the sidebar collapsed
var bw_margin_collapsed = '.8em';
var ssb_width_collapsed = '.8em';
// colors used by the current theme
var dark_color = $('.related').css('background-color');
var light_color = $('.document').css('background-color');
function sidebar_is_collapsed() {
return sidebarwrapper.is(':not(:visible)');
}
function toggle_sidebar() {
if (sidebar_is_collapsed())
expand_sidebar();
else
collapse_sidebar();
}
function collapse_sidebar() {
sidebarwrapper.hide();
sidebar.css('width', ssb_width_collapsed);
bodywrapper.css('margin-left', bw_margin_collapsed);
sidebarbutton.css({
'margin-left': '0',
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('»');
sidebarbutton.attr('title', _('Expand sidebar'));
document.cookie = 'sidebar=collapsed';
}
function expand_sidebar() {
bodywrapper.css('margin-left', bw_margin_expanded);
sidebar.css('width', ssb_width_expanded);
sidebarwrapper.show();
sidebarbutton.css({
'margin-left': ssb_width_expanded-12,
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('«');
sidebarbutton.attr('title', _('Collapse sidebar'));
document.cookie = 'sidebar=expanded';
}
function add_sidebar_button() {
sidebarwrapper.css({
'float': 'left',
'margin-right': '0',
'width': ssb_width_expanded - 28
});
// create the button
sidebar.append(
'<div id="sidebarbutton"><span>«</span></div>'
);
var sidebarbutton = $('#sidebarbutton');
light_color = sidebarbutton.css('background-color');
// find the height of the viewport to center the '<<' in the page
var viewport_height;
if (window.innerHeight)
viewport_height = window.innerHeight;
else
viewport_height = $(window).height();
sidebarbutton.find('span').css({
'display': 'block',
'margin-top': (viewport_height - sidebar.position().top - 20) / 2
});
sidebarbutton.click(toggle_sidebar);
sidebarbutton.attr('title', _('Collapse sidebar'));
sidebarbutton.css({
'color': '#FFFFFF',
'border-left': '1px solid ' + dark_color,
'font-size': '1.2em',
'cursor': 'pointer',
'height': bodywrapper.height(),
'padding-top': '1px',
'margin-left': ssb_width_expanded - 12
});
sidebarbutton.hover(
function () {
$(this).css('background-color', dark_color);
},
function () {
$(this).css('background-color', light_color);
}
);
}
function set_position_from_cookie() {
if (!document.cookie)
return;
var items = document.cookie.split(';');
for(var k=0; k<items.length; k++) {
var key_val = items[k].split('=');
var key = key_val[0].replace(/ /, ""); // strip leading spaces
if (key == 'sidebar') {
var value = key_val[1];
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
collapse_sidebar();
else if ((value == 'expanded') && (sidebar_is_collapsed()))
expand_sidebar();
}
}
}
add_sidebar_button();
var sidebarbutton = $('#sidebarbutton');
set_position_from_cookie();
}); | PypiClean |
/AWS-Manager-0.4.tar.gz/AWS-Manager-0.4/aws_manager/functions/functions.py | import subprocess
import aws_manager.settings as settings
import distutils.spawn
def read_file_to_array(path_to_file):
"""
read a file into an array
:param str path_to_file: full path to array
:return:
"""
try:
file_content = open(path_to_file);
except IOError:
return False
content_array = file_content.readlines();
file_content.close();
return content_array;
def is_pip_installed():
"""
Check if the pip is installed
:return True if installed, False otherwise
"""
return is_command_exists("pip")
def install_pip():
"""
install pip
:return:
"""
install = raw_input("%s needs to have pip install in order to run. Do you wish to install it now? (y/n)" %
settings.script_name)
if install is "y":
subprocess.call("sudo python %s" % settings.pip_installation, shell=True)
elif install is "n":
print "Cannot run without Pip installed"
exit()
else:
print "Cannot run without Pip installed"
install_pip()
init_and_run()
def uninstall_pip():
"""
uninstall pip
:return:
"""
print "This will uninstall both boto3 moudle and pip."
uninstall_boto3()
install = raw_input("%s needs to have pip install in order to run. Are you sure you wish to uninstall it? (y/n)" %
settings.script_name)
if install is "y":
subprocess.call("sudo pip uninstall pip", shell=True)
init_and_run()
def is_boto3_installed():
"""
Check if the boto3 is installed
:return True if installed, False otherwise
"""
return is_module_exists("boto3")
def install_boto3():
"""
install boto3
:return:
"""
install = raw_input("%s needs to have boto3 installed in order to run. Do you wish to install it now? (y/n)" %
settings.script_name)
if install is "y":
subprocess.call("sudo pip install boto3", shell=True)
elif install is "n":
print "Cannot run without boto3 installed"
exit()
else:
print "Cannot run without boto3 installed"
install_boto3()
init_and_run()
def uninstall_boto3():
"""
uninstall boto3
:return:
"""
install = raw_input("%s needs to have boto3 install in order to run. Are you sure you wish to uninstall it? (y/n)" %
settings.script_name)
if install is "y":
subprocess.call("sudo pip uninstall boto3", shell=True)
else:
init_and_run()
def is_command_exists(name):
"""
Check if a command exists
:param str name: the name of the comman
:return: True if the command exists, False otherwise
"""
return distutils.spawn.find_executable(name) is not None
def is_module_exists(name):
"""
Check if a moudle exists
:param str name: the name of the moudle
:return:
"""
try:
__import__(name)
except ImportError:
return False
else:
return True
def init_and_run():
"""
Initialize the system, making sure all the dependencies and certificates are installed and run the script
"""
install_dependencies()
import aws_manager.menus as menus
import aws_manager.aws as aws
if not aws.is_valid_credentials_set():
menus.show_credential_setup_menu()
else:
menus.show_main_menu()
def install_dependencies():
"""
Install all the dependencies necessary to run
"""
if not is_pip_installed():
install_pip()
if not is_boto3_installed():
install_boto3() | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/markdown/__init__.py | import io
import re
import sys
from html.entities import name2codepoint
from xml.dom.minidom import parseString
from markdown import markdownFromFile
MARKDOWN_ALL_ESCAPED_CHARS = (
'\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#',
'+', '-', '.', '!',
)
MARKDOWN_SPECIAL_CHARS = re.escape(r''.join(MARKDOWN_ALL_ESCAPED_CHARS))
MARKDOWN_SPECIAL_CHARS_RE = re.compile(r'([%s])' % MARKDOWN_SPECIAL_CHARS)
# Markdown.ESCAPED_CHARS lists several characters to escape, but it's not
# that simple. We only want to escape certain things if they'll actually affect
# the Markdown rendering, because otherwise it's annoying to look at the
# source.
MARKDOWN_ESCAPED_CHARS = set(MARKDOWN_ALL_ESCAPED_CHARS) - {
'.', '#', '-', '+', '_', '(', ')', '*', '>'
}
ESCAPE_CHARS_RE = re.compile(r"""
(
# Numeric lists start with leading whitespace, one or more digits,
# and then a period
^\s*(\d+\.)\s
# ATX-style headers start with a hash at the beginning of the line.
| ^\s*(\#+)
# + and - have special meaning (lists, headers, and rules), but only if
# they're at the start of the line.
| ^\s*([-\+]+)
# _ indicates italic, and __ indicates bold, but not when in the middle
# of a word.
#
| (?<!\w|_)(__?)
| (__?)(?!\w|_)
# This is an alternate format for italic and bold, using * instead of _.
| (?<!\w|\*)(\*\*?)
| (\*\*?)(?!\w|\*)
# Named links are in the form of [name](url).
| (\[) [^\]]* (\]) (\() [^\)]* (\))
# '>' need only be escaped for blockquotes ('> ...') or automatic links
# ('<http://...> or <user@example.com>).
| ^((?:\s*>)+)
| (?:<(?:(?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*))(>)
| (?:<[^> \!]*@[^> ]*)(>)
# All other special characters
| ([%s])
)
""" % re.escape(''.join(MARKDOWN_ESCAPED_CHARS)),
re.M | re.VERBOSE)
UNESCAPE_CHARS_RE = re.compile(r'\\([%s])' % MARKDOWN_SPECIAL_CHARS)
ILLEGAL_XML_CHARS_RE = None
_ILLEGAL_XML_CHAR_CODES_SET = None
def markdown_escape(text):
"""Escape text for use in Markdown.
This will escape the provided text so that none of the characters will
be rendered specially by Markdown.
"""
def _escape_matches(m):
prev_end = m.start(0)
new_s = []
for i, group in enumerate(m.groups()[1:], start=2):
if group:
new_s.append(m.string[prev_end:m.start(i)])
new_s.append(MARKDOWN_SPECIAL_CHARS_RE.sub(r'\\\1', group))
prev_end = m.end(i)
new_s.append(m.string[prev_end:m.end(0)])
return ''.join(new_s)
return ESCAPE_CHARS_RE.sub(_escape_matches, text)
def markdown_unescape(escaped_text):
"""Unescape Markdown-escaped text.
This will unescape the provided Markdown-formatted text so that any
escaped characters will be unescaped.
"""
text = UNESCAPE_CHARS_RE.sub(r'\1', escaped_text)
split = text.split('\n')
for i, line in enumerate(split):
if line.startswith(' '):
split[i] = ' ' + line[6:]
elif line.startswith(' \t'):
split[i] = line[6:]
return '\n'.join(split)
def iter_markdown_lines(markdown_html):
"""Iterate over lines of Markdown, normalizing for individual display.
Generated Markdown HTML cannot by itself be handled on a per-line-basis.
Code blocks, for example, will consist of multiple lines of content
contained within a <pre> tag. Likewise, lists will be a bunch of
<li> tags inside a <ul> tag, and individually do not form valid lists.
This function iterates through the Markdown tree and generates
self-contained lines of HTML that can be rendered individually.
"""
nodes = get_markdown_element_tree(markdown_html)
for node in nodes:
if node.nodeType == node.ELEMENT_NODE:
if (node.tagName == 'div' and
node.attributes.get('class', 'codehilite')):
# This is a code block, which will consist of a bunch of lines
# for the source code. It contains a single <pre> element
# within it that contains the full code block. We want to split
# that up into individual lines which are each wrapped in their
# own <div class="codehilite"><pre> elements.
child_lines = node.firstChild.toxml().splitlines()
# The first and last lines in child_lines begin and end with
# <pre> and </pre>, respectively. Clear that out.
assert child_lines[0].startswith('<pre>')
assert child_lines[-1].endswith('</pre>')
child_lines[0] = child_lines[0][len('<pre>'):]
child_lines[-1] = child_lines[-1][:-len('</pre>')]
if not child_lines[-1]:
child_lines = child_lines[:-1]
fmt = '<div class="codehilite%s"><pre>%s</pre></div>'
# If there's just a single line, output that directly
if len(child_lines) == 1:
yield fmt % ('', child_lines[0])
elif len(child_lines) > 1:
yield fmt % (' codehilite-multiline-start', child_lines[0])
for line in child_lines[1:-1]:
yield fmt % (' codehilite-multiline-middle', line)
yield fmt % (' codehilite-multiline-end', child_lines[-1])
elif node.tagName in ('ul', 'ol'):
# This is a list. We'll need to split all of its items
# into individual lists, in order to retain bullet points
# or the numbers.
#
# For the case of numbers, we can set each list to start
# at the appropriate number so that they don't all say "1."
start = node.attributes.get('start')
if start is not None:
try:
i = int(start.value)
except ValueError:
i = 1
else:
i = 1
for child_node in node.childNodes:
if (child_node.nodeType == child_node.ELEMENT_NODE and
child_node.tagName == 'li'):
# This is a list item element. It may be multiple
# lines, but we'll have to treat it as one line.
yield '<%s start="%s">%s</%s>' % (
node.tagName, i, child_node.toxml(),
node.tagName)
i += 1
elif node.tagName == 'p':
# This is a paragraph, possibly containing multiple lines.
for line in node.toxml().splitlines():
yield line
else:
# Whatever this is, treat it as one block.
yield node.toxml()
elif node.nodeType == node.TEXT_NODE:
# This may be several blank extraneous blank lines, due to
# Markdown's generation from invisible markup like fences.
# We want to condense this down to one blank line.
yield '\n'
def get_markdown_element_tree(markdown_html):
"""Return an XML element tree for Markdown-generated HTML.
This will build the tree and return all nodes representing the rendered
Markdown content. The element tree is generated using Python's
:py:mod:`xml.dom.minidom`.
Args:
markdown_html (bytes or unicode):
The Markdown-generated HTML to parse.
Returns:
xml.dom.minicompat.NodeList:
The list of nodes representing the Markdown-generated HTML.
"""
markdown_html = sanitize_illegal_chars_for_xml(markdown_html)
# Python-Markdown converts all characters in an e-mail address to
# entities (likely to avoid e-mail address harvesting). It optimistically
# tries to use named HTML entities for special characters, but these won't
# be known to an XML parser.
#
# Since there's no easy way to register entities with xml.dom.minidom, and
# we don't want to change the return type, the easiest solution is to
# convert named entites back to character codes through a regex before
# parsing.
illegal_chars_set = _get_illegal_char_codes_set_for_xml()
unknown_code = ord('?')
unknown_entity = '#%s' % unknown_code
def _handle_entity(m):
entity = m.group(1)
if entity.startswith('#'):
try:
if entity.startswith('#x'):
value = int(entity[2:], 16)
else:
value = int(entity[1:])
if value in illegal_chars_set:
entity = unknown_entity
except ValueError:
entity = unknown_entity
return '&%s;' % entity
else:
return '&#%s;' % name2codepoint.get(entity, unknown_code)
markdown_html = re.sub(r'&([^;]+);', _handle_entity, markdown_html,
flags=re.M)
doc = parseString(('<html>%s</html>' % markdown_html).encode('utf-8'))
return doc.childNodes[0].childNodes
def sanitize_illegal_chars_for_xml(s):
"""Sanitize a string, removing characters illegal in XML.
This will remove a number of characters that would break the XML parser.
They may be in the string due to a copy/paste.
Note that this will not perform any security-related sanitization of the
HTML. It's purely a parsing aid for dealing with illegal characters.
This code is courtesy of the XmlRpcPlugin developers, as documented
here: http://stackoverflow.com/a/22273639
Args:
s (unicode):
The string to sanitize.
Returns:
unicode:
The resulting sanitized HTML.
"""
global ILLEGAL_XML_CHARS_RE
if ILLEGAL_XML_CHARS_RE is None:
ILLEGAL_XML_CHARS_RE = re.compile('[%s]' % ''.join(
'%s-%s' % (chr(low), chr(high))
for low, high in _get_illegal_chars_for_xml()
))
if isinstance(s, bytes):
s = s.decode('utf-8')
return ILLEGAL_XML_CHARS_RE.sub('', s)
def render_markdown_from_file(f, **markdown_kwargs):
"""Render Markdown text from a file stream to HTML.
Args:
f (file or io.BytesIO):
The byte stream to read from.
**markdown_kwargs (dict):
Keyword arguments to pass to :py:func:`markdown.markdownFromFile`.
Returns:
unicode:
The resulting Markdown-rendered HTML.
"""
s = io.BytesIO()
try:
markdownFromFile(input=f, output=s, **markdown_kwargs)
return s.getvalue().decode('utf-8')
finally:
s.close()
def _get_illegal_char_codes_set_for_xml():
"""Return a set of all illegal character codes for an XML document.
The set will contain a numeric character code for every single character
that's considered invalid in an XML document.
Version Added:
1.0.18
Returns:
set of int:
A set of all illegal character codes.
"""
global _ILLEGAL_XML_CHAR_CODES_SET
if _ILLEGAL_XML_CHAR_CODES_SET is None:
_ILLEGAL_XML_CHAR_CODES_SET = set()
for illegal_range in _get_illegal_chars_for_xml():
_ILLEGAL_XML_CHAR_CODES_SET.update(range(illegal_range[0],
illegal_range[1] + 1))
return _ILLEGAL_XML_CHAR_CODES_SET
def _get_illegal_chars_for_xml():
"""Return all illegal character ranges for an XML document.
This is used internally to build ranges of characters that aren't
valid in an XML document, for the purposes of building regexes and
maps for filtering them out.
We don't cache the result, because this will only ever be generated
up to two times per process. The end results themselves are cached, and
there's no point in keeping the raw lists in memory.
Version Added:
1.0.18
Returns:
list of tuple:
The list of inclusive illegal character ranges.
"""
illegal_unichrs = [
(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F), (0x7F, 0x84),
(0x86, 0x9F), (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF),
]
if sys.maxunicode > 0x10000:
illegal_unichrs += [
(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF),
]
return illegal_unichrs | PypiClean |
/LeadsRx_Python_Client-0.0.9.tar.gz/LeadsRx_Python_Client-0.0.9/README.md | # LeadsRX Python Client
A python client for querying the LeadsRX API. Created in my spare time to help out our analytics team.
https://developers.leadsrx.com/reference#conversions
#### Written by Josh Hatfield @Semetrical
#### About Semetrical
Semetrical are a global top 40 digital agency offering a full suite of services. Bespoke technology forms a key part of what we do.
We won Best Small SEO Agency at the EU Search Awards in 2019 and Best Small Biddable Agency at the UK Search Awards 2020.
Our website can be found [here](http://bit.ly/3aMWIMd). If you want to chat, get in touch [here](http://bit.ly/3keCf5Y).
## Key Features
### Fetch Data From These Endpoints
* Conversions | https://developers.leadsrx.com/reference#conversions
* Touchpoints | https://developers.leadsrx.com/reference#touchpoints
* Interactions | https://developers.leadsrx.com/reference#interactions
* Conversion IDs | https://developers.leadsrx.com/reference#conversion-ids
* Campaign IDs | https://developers.leadsrx.com/reference#campaign-ids
* Grouping IDs | https://developers.leadsrx.com/reference#grouping-ids
* Domains | https://developers.leadsrx.com/reference#domains
### Access Results In These Formats
* Access Results As A Json Object ([{},{},{}])
* Access Results As A Pandas Dataframe
## Getting Started
### Installation
Download Files As Zip
Install These Libraries
* Requests
* Pandas
## Code Examples
Import Required Libraries
```python
import LeadsRx
import logging
import json
#Setup Log Handler
logging.basicConfig(level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler()
]
)
```
Authenticate And Create Client
```python
#read in client_scret and account tag from a json file
json_file=open('auth.json')
auth_dict=json.load(json_file)
##create client -- must pass in secret key and account tag as strings
secret_key=auth_dict['secret_key']
account_tag=auth_dict['account_tag']
client = LeadRx.client(secret_key=secret_key, account_tag=account_tag)
#You could also just pass the key and tag in directly. Using a file is preferable as this way no sensitive information ends up in our gitrepo
client = LeadRx.client(secret_key="Your Client Secret", account_tag="Your Account Tag")
```
Accessing The Conversion ID Endpoint
```python
# get contents of conversion ID endpoint for the account tag given to client
conversion_ids = client.pull_conversion_ids()
# get result as json
conversion_id_json = conversion_ids.json
# get result as dataframe
conversion_id_dataframe = conversion_ids.dataframe
```
Accessing The Campaign ID Endpoint
```python
# get campaign IDs for the account tag given to client
# campaignIDs are also called touchpoint ids
campaign_ids = client.pull_campaign_ids()
# get results as json
campaign_ids_json = campaign_ids.json
# get result as dataframe
campaign_ids_dataframe = campaign_ids.dataframe
```
Accessing The Domains Endpoint
```python
# get domains for the account tag given to client
domains = client.pull_domains()
# get result as json
domains_json = domains.json
# get result as dataframe
domains_dataframe = domains.dataframe
```
Accessing The Groupings ID Endpoint
```python
# get grouping ids for the account tag given to client
groupings = client.pull_grouping_ids()
# get result as json
groupings = groupings.json
```
Accessing The Touchpoint Endpoint
```python
# get contents of touchpoint endpoint for the account tag given to client
#Expects a startDateTime and endDateTime in "YYYY-MM-DD HH-MM-SS" format
#leadType can be set to "new","repeat" or "all".This filters touchpoint results based on first time,repeat or all conversions for a customer
#Function pulls all conversion_ids by default. If you want to fetch conversions for a single conversion then pass it's conversion_id as a string
touchpoints = client.pull_touchpoints(campaignID="554588", startDateTimeStr="2021-01 00:00:00",
endDateTimeStr="2021-01-01 11:00:00", conversion_id='13697', lead_type="new")
# get result for entire time period as json
touchpoints_json = touchpoints.total_results_json
# get result for entire time period as dataframe
touchpoints_dataframe = touchpoints.total_results_dataframe
# get result for entire period split by day as json
touchpoints_json = touchpoints.by_day_results_json
# get result for entire period split by day as dataframe
touchpoints_dataframe = touchpoints.by_day_results_dataframe
```
Accessing The Interactions Endpoint
```python
# get contents of interactions endpoing for the account tag given to client
#Expects a startDateTime and endDateTime in "YYYY-MM-DD HH-MM-SS" format
#LeadRX campaignID should be supplied as a string.Campaign ID is the ID of the touchpoint you want to query
#Campaign IDS can be found by calling the campaigns ID Endpoint
interactions = client.pull_interactions(campaignID="554588", startDateTimeStr="2021-01 00:00:00",
endDateTimeStr="2021-01-01 11:00:00")
#get result for the entire time period split by date as JSON
interactions_by_date_json = interactions.by_day_results_json
#get results by hour_of_day for the entire time period as JSON
interactions_by_hour_of_day_json = interactions.hour_of_day_json
#get results by day_of_week for the entire time period as JSON
interactions_by_day_of_week_json = interactions.day_of_week_json
#get result for the entire time period split by date as a Dataframe
interactions_by_date_dataframe = interactions.by_day_results_dataframe
#get results by hour_of_day for the entire time period as Dataframe
interactions_by_hour_of_day_dataframe = interactions.hour_of_day_dataframe
#get results by day_of_week for the entire time period as JSON
interactions_by_day_of_week_dataframe = interactions.day_of_week_json
```
Accessing The Conversions Endpoint
```python
# get contents of conversions endpoint for the account tag given to client
#Expects a startDateTime and endDateTime in "YYYY-MM-DD HH-MM-SS" format
#landingPage can be set to False to avoid pulling the landingPage field
#Function pulls all conversion_ids by default. If you want to fetch conversions for a single conversion then pass it's conversion_id as a string
#visitorId can be set to get conversions for a single visitor.To pull data for all visitors do not pass the visitorID parameter to the function
conversions = client.pull_conversions(startDateTimeStr="2021-01-01 00:00:00", endDateTimeStr="2021-01-01 11:00:00",
visitorID="1522653355",landingPage=True,conversion_id='13697')
# get result for the entire period split by day as json
conversions_json = conversions.json
# get result for the entire period split by day as dataframe
conversions_dataframe = conversions.dataframe
```
| PypiClean |
/ElectroMagneticPython-2.1.4-py3-none-any.whl/EMpy/modesolvers/FD.py | import numpy
import scipy
import scipy.optimize
import EMpy.utils
from EMpy.modesolvers.interface import Mode, ModeSolver
class SVFDModeSolver(ModeSolver):
"""
This function calculates the modes of a dielectric waveguide
using the semivectorial finite difference method.
It is slightly faster than the full-vectorial VFDModeSolver,
but it does not accept non-isotropic permittivity. For example,
birefringent materials, which have
different refractive indices along different dimensions cannot be used.
It is adapted from the "svmodes.m" matlab code of Thomas Murphy and co-workers.
https://www.mathworks.com/matlabcentral/fileexchange/12734-waveguide-mode-solver/content/svmodes.m
Parameters
----------
wl : float
optical wavelength
units are arbitrary, but must be self-consistent.
I.e., just use micron for everything.
x : 1D array of floats
Array of x-values
y : 1D array of floats
Array of y-values
epsfunc : function
This is a function that provides the relative permittivity matrix
(square of the refractive index) as a function of its x and y
numpy.arrays (the function's input parameters). The function must be
of the form: ``myRelativePermittivity(x,y)``, where x and y are 2D
numpy "meshgrid" arrays that will be passed by this function.
The function returns a relative permittivity numpy.array of
shape( x.shape[0], y.shape[0] ) where each element of the array
is a single float, corresponding the an isotropic refractive index.
If an anisotropic refractive index is desired, the full-vectorial
VFDModeSolver function should be used.
boundary : str
This is a string that identifies the type of boundary conditions applied.
The following options are available:
'A' - Hx is antisymmetric, Hy is symmetric.
'S' - Hx is symmetric and, Hy is antisymmetric.
'0' - Hx and Hy are zero immediately outside of the boundary.
The string identifies all four boundary conditions, in the order:
North, south, east, west.
For example, boundary='000A'
method : str
must be 'Ex', 'Ey', or 'scalar'
this identifies the field that will be calculated.
Returns
-------
self : an instance of the SVFDModeSolver class
Typically self.solve() will be called in order to actually find the modes.
"""
def __init__(self, wl, x, y, epsfunc, boundary, method="Ex"):
self.wl = wl
self.x = x
self.y = y
self.epsfunc = epsfunc
self.boundary = boundary
self.method = method
def _get_eps(self, xc, yc):
eps = self.epsfunc(xc, yc)
eps = numpy.c_[eps[:, 0:1], eps, eps[:, -1:]]
eps = numpy.r_[eps[0:1, :], eps, eps[-1:, :]]
return eps
def build_matrix(self):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
boundary = self.boundary
method = self.method
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
eps = self._get_eps(xc, yc)
nx = len(xc)
ny = len(yc)
self.nx = nx
self.ny = ny
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, 0.5 * (dy[:, 2:] + dy[:, 1:-1])).flatten()
s = numpy.dot(ones_nx, 0.5 * (dy[:, 0:-2] + dy[:, 1:-1])).flatten()
e = numpy.dot(0.5 * (dx[2:, :] + dx[1:-1, :]), ones_ny).flatten()
w = numpy.dot(0.5 * (dx[0:-2, :] + dx[1:-1, :]), ones_ny).flatten()
p = numpy.dot(dx[1:-1, :], ones_ny).flatten()
q = numpy.dot(ones_nx, dy[:, 1:-1]).flatten()
en = eps[1:-1, 2:].flatten()
es = eps[1:-1, 0:-2].flatten()
ee = eps[2:, 1:-1].flatten()
ew = eps[0:-2, 1:-1].flatten()
ep = eps[1:-1, 1:-1].flatten()
# three methods: Ex, Ey and scalar
if method == "Ex":
# Ex
An = 2 / n / (n + s)
As = 2 / s / (n + s)
Ae = (
8
* (p * (ep - ew) + 2 * w * ew)
* ee
/ (
(p * (ep - ee) + 2 * e * ee)
* (p**2 * (ep - ew) + 4 * w**2 * ew)
+ (p * (ep - ew) + 2 * w * ew)
* (p**2 * (ep - ee) + 4 * e**2 * ee)
)
)
Aw = (
8
* (p * (ep - ee) + 2 * e * ee)
* ew
/ (
(p * (ep - ee) + 2 * e * ee)
* (p**2 * (ep - ew) + 4 * w**2 * ew)
+ (p * (ep - ew) + 2 * w * ew)
* (p**2 * (ep - ee) + 4 * e**2 * ee)
)
)
Ap = ep * k**2 - An - As - Ae * ep / ee - Aw * ep / ew
elif method == "Ey":
# Ey
An = (
8
* (q * (ep - es) + 2 * s * es)
* en
/ (
(q * (ep - en) + 2 * n * en)
* (q**2 * (ep - es) + 4 * s**2 * es)
+ (q * (ep - es) + 2 * s * es)
* (q**2 * (ep - en) + 4 * n**2 * en)
)
)
As = (
8
* (q * (ep - en) + 2 * n * en)
* es
/ (
(q * (ep - en) + 2 * n * en)
* (q**2 * (ep - es) + 4 * s**2 * es)
+ (q * (ep - es) + 2 * s * es)
* (q**2 * (ep - en) + 4 * n**2 * en)
)
)
Ae = 2 / e / (e + w)
Aw = 2 / w / (e + w)
Ap = ep * k**2 - An * ep / en - As * ep / es - Ae - Aw
elif method == "scalar":
# scalar
An = 2 / n / (n + s)
As = 2 / s / (n + s)
Ae = 2 / e / (e + w)
Aw = 2 / w / (e + w)
Ap = ep * k**2 - An - As - Ae - Aw
else:
raise ValueError("unknown method")
ii = numpy.arange(nx * ny).reshape(nx, ny)
# north boundary
ib = ii[:, -1]
if boundary[0] == "S":
Ap[ib] += An[ib]
elif boundary[0] == "A":
Ap[ib] -= An[ib]
# else:
# raise ValueError('unknown boundary')
# south
ib = ii[:, 0]
if boundary[1] == "S":
Ap[ib] += As[ib]
elif boundary[1] == "A":
Ap[ib] -= As[ib]
# else:
# raise ValueError('unknown boundary')
# east
ib = ii[-1, :]
if boundary[2] == "S":
Ap[ib] += Ae[ib]
elif boundary[2] == "A":
Ap[ib] -= Ae[ib]
# else:
# raise ValueError('unknown boundary')
# west
ib = ii[0, :]
if boundary[3] == "S":
Ap[ib] += Aw[ib]
elif boundary[3] == "A":
Ap[ib] -= Aw[ib]
# else:
# raise ValueError('unknown boundary')
iall = ii.flatten()
i_n = ii[:, 1:].flatten()
i_s = ii[:, :-1].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
I = numpy.r_[iall, i_w, i_e, i_s, i_n]
J = numpy.r_[iall, i_e, i_w, i_n, i_s]
V = numpy.r_[Ap[iall], Ae[i_w], Aw[i_e], An[i_s], As[i_n]]
A = coo_matrix((V, (I, J))).tocsr()
return A
def solve(self, neigs, tol):
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
eigvals, eigvecs = eigen.eigs( # type: ignore
A, k=neigs, which="LR", tol=tol, ncv=10 * neigs, return_eigenvectors=True
)
neff = self.wl * numpy.sqrt(eigvals) / (2 * numpy.pi)
phi = []
for ieig in range(neigs):
tmp = eigvecs[:, ieig].reshape(self.nx, self.ny)
phi.append(tmp)
# sort and save the modes
idx = numpy.flipud(numpy.argsort(neff))
self.neff = neff[idx]
tmp = []
for i in idx:
tmp.append(phi[i])
if self.method == "scalar":
self.phi = tmp
elif self.method == "Ex":
self.Ex = tmp
if self.method == "Ey":
self.Ey = tmp
return self
def __str__(self):
descr = (
"Semi-Vectorial Finite Difference Modesolver\n\tmethod: %s\n" % self.method
)
return descr
class VFDModeSolver(ModeSolver):
"""
The VFDModeSolver class computes the electric and magnetic fields
for modes of a dielectric waveguide using the "Vector Finite
Difference (VFD)" method, as described in A. B. Fallahkhair,
K. S. Li and T. E. Murphy, "Vector Finite Difference Modesolver
for Anisotropic Dielectric Waveguides", J. Lightwave
Technol. 26(11), 1423-1431, (2008).
Parameters
----------
wl : float
The wavelength of the optical radiation (units are arbitrary,
but must be self-consistent between all inputs. It is recommended to
just use microns for everthing)
x : 1D array of floats
Array of x-values
y : 1D array of floats
Array of y-values
epsfunc : function
This is a function that provides the relative permittivity
matrix (square of the refractive index) as a function of its x
and y numpy.arrays (the function's input parameters). The
function must be of the form: ``myRelativePermittivity(x,y)``
The function returns a relative permittivity numpy.array of either
shape( x.shape[0], y.shape[0] ) where each element of the
array can either be a single float, corresponding the an
isotropic refractive index, or (x.shape[0], y.shape[0], 5),
where the last dimension describes the relative permittivity in
the form (epsxx, epsxy, epsyx, epsyy, epszz).
boundary : str
This is a string that identifies the type of boundary
conditions applied.
The following options are available:
'A' - Hx is antisymmetric, Hy is symmetric.
'S' - Hx is symmetric and, Hy is antisymmetric.
'0' - Hx and Hy are zero immediately outside of the boundary.
The string identifies all four boundary conditions, in the
order: North, south, east, west. For example, boundary='000A'
Returns
-------
self : an instance of the VFDModeSolver class
Typically self.solve() will be called in order to actually
find the modes.
"""
def __init__(self, wl, x, y, epsfunc, boundary):
self.wl = wl
self.x = x
self.y = y
self.epsfunc = epsfunc
self.boundary = boundary
def _get_eps(self, xc, yc):
tmp = self.epsfunc(xc, yc)
def _reshape(tmp):
"""
pads the array by duplicating edge values
"""
tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]
tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]
return tmp
if tmp.ndim == 2: # isotropic refractive index
tmp = _reshape(tmp)
epsxx = epsyy = epszz = tmp
epsxy = epsyx = numpy.zeros_like(epsxx)
elif tmp.ndim == 3: # anisotropic refractive index
assert tmp.shape[2] == 5, "eps must be NxMx5"
epsxx = _reshape(tmp[:, :, 0])
epsxy = _reshape(tmp[:, :, 1])
epsyx = _reshape(tmp[:, :, 2])
epsyy = _reshape(tmp[:, :, 3])
epszz = _reshape(tmp[:, :, 4])
else:
raise ValueError("Invalid eps")
return epsxx, epsxy, epsyx, epsyy, epszz
def build_matrix(self):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
boundary = self.boundary
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
# Note: the permittivity is actually defined at the center of each
# region *between* the mesh points used for the H-field calculation.
# (See Fig. 1 of Fallahkhair and Murphy)
# In other words, eps is defined on (xc,yc) which is offset from
# (x,y), the grid where H is calculated, by
# "half a pixel" in the positive-x and positive-y directions.
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
epsxx, epsxy, epsyx, epsyy, epszz = self._get_eps(xc, yc)
nx = len(x)
ny = len(y)
self.nx = nx
self.ny = ny
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
# distance of mesh points to nearest neighbor mesh point:
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
# These define the permittivity (eps) tensor relative to each mesh point
# using the following geometry:
#
# NW------N------NE
# | | |
# | 1 n 4 |
# | | |
# W---w---P---e---E
# | | |
# | 2 s 3 |
# | | |
# SW------S------SE
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
ns21 = n * eyy2 + s * eyy1
ns34 = n * eyy3 + s * eyy4
ew14 = e * exx1 + w * exx4
ew23 = e * exx2 + w * exx3
# calculate the finite difference coefficients following
# Fallahkhair and Murphy, Appendix Eqs 21 though 37
axxn = (
(2 * eyy4 * e - eyx4 * n) * (eyy3 / ezz4) / ns34
+ (2 * eyy1 * w + eyx1 * n) * (eyy2 / ezz1) / ns21
) / (n * (e + w))
axxs = (
(2 * eyy3 * e + eyx3 * s) * (eyy4 / ezz3) / ns34
+ (2 * eyy2 * w - eyx2 * s) * (eyy1 / ezz2) / ns21
) / (s * (e + w))
ayye = (2 * n * exx4 - e * exy4) * exx1 / ezz4 / e / ew14 / (n + s) + (
2 * s * exx3 + e * exy3
) * exx2 / ezz3 / e / ew23 / (n + s)
ayyw = (2 * exx1 * n + exy1 * w) * exx4 / ezz1 / w / ew14 / (n + s) + (
2 * exx2 * s - exy2 * w
) * exx3 / ezz2 / w / ew23 / (n + s)
axxe = (
2 / (e * (e + w))
+ (eyy4 * eyx3 / ezz3 - eyy3 * eyx4 / ezz4) / (e + w) / ns34
)
axxw = (
2 / (w * (e + w))
+ (eyy2 * eyx1 / ezz1 - eyy1 * eyx2 / ezz2) / (e + w) / ns21
)
ayyn = (
2 / (n * (n + s))
+ (exx4 * exy1 / ezz1 - exx1 * exy4 / ezz4) / (n + s) / ew14
)
ayys = (
2 / (s * (n + s))
+ (exx2 * exy3 / ezz3 - exx3 * exy2 / ezz2) / (n + s) / ew23
)
axxne = +eyx4 * eyy3 / ezz4 / (e + w) / ns34
axxse = -eyx3 * eyy4 / ezz3 / (e + w) / ns34
axxnw = -eyx1 * eyy2 / ezz1 / (e + w) / ns21
axxsw = +eyx2 * eyy1 / ezz2 / (e + w) / ns21
ayyne = +exy4 * exx1 / ezz4 / (n + s) / ew14
ayyse = -exy3 * exx2 / ezz3 / (n + s) / ew23
ayynw = -exy1 * exx4 / ezz1 / (n + s) / ew14
ayysw = +exy2 * exx3 / ezz2 / (n + s) / ew23
axxp = (
-axxn
- axxs
- axxe
- axxw
- axxne
- axxse
- axxnw
- axxsw
+ k**2
* (n + s)
* (eyy4 * eyy3 * e / ns34 + eyy1 * eyy2 * w / ns21)
/ (e + w)
)
ayyp = (
-ayyn
- ayys
- ayye
- ayyw
- ayyne
- ayyse
- ayynw
- ayysw
+ k**2
* (e + w)
* (exx1 * exx4 * n / ew14 + exx2 * exx3 * s / ew23)
/ (n + s)
)
axyn = (
eyy3 * eyy4 / ezz4 / ns34
- eyy2 * eyy1 / ezz1 / ns21
+ s * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34
) / (e + w)
axys = (
eyy1 * eyy2 / ezz2 / ns21
- eyy4 * eyy3 / ezz3 / ns34
+ n * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34
) / (e + w)
ayxe = (
exx1 * exx4 / ezz4 / ew14
- exx2 * exx3 / ezz3 / ew23
+ w * (exx2 * exx4 - exx1 * exx3) / ew23 / ew14
) / (n + s)
ayxw = (
exx3 * exx2 / ezz2 / ew23
- exx4 * exx1 / ezz1 / ew14
+ e * (exx4 * exx2 - exx1 * exx3) / ew23 / ew14
) / (n + s)
axye = (eyy4 * (1 + eyy3 / ezz4) - eyy3 * (1 + eyy4 / ezz4)) / ns34 / (
e + w
) - (
2 * eyx1 * eyy2 / ezz1 * n * w / ns21
+ 2 * eyx2 * eyy1 / ezz2 * s * w / ns21
+ 2 * eyx4 * eyy3 / ezz4 * n * e / ns34
+ 2 * eyx3 * eyy4 / ezz3 * s * e / ns34
+ 2 * eyy1 * eyy2 * (1.0 / ezz1 - 1.0 / ezz2) * w**2 / ns21
) / e / (
e + w
) ** 2
axyw = (eyy2 * (1 + eyy1 / ezz2) - eyy1 * (1 + eyy2 / ezz2)) / ns21 / (
e + w
) - (
2 * eyx1 * eyy2 / ezz1 * n * e / ns21
+ 2 * eyx2 * eyy1 / ezz2 * s * e / ns21
+ 2 * eyx4 * eyy3 / ezz4 * n * w / ns34
+ 2 * eyx3 * eyy4 / ezz3 * s * w / ns34
+ 2 * eyy3 * eyy4 * (1.0 / ezz3 - 1.0 / ezz4) * e**2 / ns34
) / w / (
e + w
) ** 2
ayxn = (exx4 * (1 + exx1 / ezz4) - exx1 * (1 + exx4 / ezz4)) / ew14 / (
n + s
) - (
2 * exy3 * exx2 / ezz3 * e * s / ew23
+ 2 * exy2 * exx3 / ezz2 * w * n / ew23
+ 2 * exy4 * exx1 / ezz4 * e * s / ew14
+ 2 * exy1 * exx4 / ezz1 * w * n / ew14
+ 2 * exx3 * exx2 * (1.0 / ezz3 - 1.0 / ezz2) * s**2 / ew23
) / n / (
n + s
) ** 2
ayxs = (exx2 * (1 + exx3 / ezz2) - exx3 * (1 + exx2 / ezz2)) / ew23 / (
n + s
) - (
2 * exy3 * exx2 / ezz3 * e * n / ew23
+ 2 * exy2 * exx3 / ezz2 * w * n / ew23
+ 2 * exy4 * exx1 / ezz4 * e * s / ew14
+ 2 * exy1 * exx4 / ezz1 * w * s / ew14
+ 2 * exx1 * exx4 * (1.0 / ezz1 - 1.0 / ezz4) * n**2 / ew14
) / s / (
n + s
) ** 2
axyne = +eyy3 * (1 - eyy4 / ezz4) / (e + w) / ns34
axyse = -eyy4 * (1 - eyy3 / ezz3) / (e + w) / ns34
axynw = -eyy2 * (1 - eyy1 / ezz1) / (e + w) / ns21
axysw = +eyy1 * (1 - eyy2 / ezz2) / (e + w) / ns21
ayxne = +exx1 * (1 - exx4 / ezz4) / (n + s) / ew14
ayxse = -exx2 * (1 - exx3 / ezz3) / (n + s) / ew23
ayxnw = -exx4 * (1 - exx1 / ezz1) / (n + s) / ew14
ayxsw = +exx3 * (1 - exx2 / ezz2) / (n + s) / ew23
axyp = -(axyn + axys + axye + axyw + axyne + axyse + axynw + axysw) - k**2 * (
w * (n * eyx1 * eyy2 + s * eyx2 * eyy1) / ns21
+ e * (s * eyx3 * eyy4 + n * eyx4 * eyy3) / ns34
) / (e + w)
ayxp = -(ayxn + ayxs + ayxe + ayxw + ayxne + ayxse + ayxnw + ayxsw) - k**2 * (
n * (w * exy1 * exx4 + e * exy4 * exx1) / ew14
+ s * (w * exy2 * exx3 + e * exy3 * exx2) / ew23
) / (n + s)
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == "S":
sign = 1
elif boundary[0] == "A":
sign = -1
elif boundary[0] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
axxs[ib] += sign * axxn[ib]
axxse[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxnw[ib]
ayxs[ib] += sign * ayxn[ib]
ayxse[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxnw[ib]
ayys[ib] -= sign * ayyn[ib]
ayyse[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayynw[ib]
axys[ib] -= sign * axyn[ib]
axyse[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == "S":
sign = 1
elif boundary[1] == "A":
sign = -1
elif boundary[1] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
axxn[ib] += sign * axxs[ib]
axxne[ib] += sign * axxse[ib]
axxnw[ib] += sign * axxsw[ib]
ayxn[ib] += sign * ayxs[ib]
ayxne[ib] += sign * ayxse[ib]
ayxnw[ib] += sign * ayxsw[ib]
ayyn[ib] -= sign * ayys[ib]
ayyne[ib] -= sign * ayyse[ib]
ayynw[ib] -= sign * ayysw[ib]
axyn[ib] -= sign * axys[ib]
axyne[ib] -= sign * axyse[ib]
axynw[ib] -= sign * axysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == "S":
sign = 1
elif boundary[2] == "A":
sign = -1
elif boundary[2] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
axxw[ib] += sign * axxe[ib]
axxnw[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxse[ib]
ayxw[ib] += sign * ayxe[ib]
ayxnw[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxse[ib]
ayyw[ib] -= sign * ayye[ib]
ayynw[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayyse[ib]
axyw[ib] -= sign * axye[ib]
axynw[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == "S":
sign = 1
elif boundary[3] == "A":
sign = -1
elif boundary[3] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
axxe[ib] += sign * axxw[ib]
axxne[ib] += sign * axxnw[ib]
axxse[ib] += sign * axxsw[ib]
ayxe[ib] += sign * ayxw[ib]
ayxne[ib] += sign * ayxnw[ib]
ayxse[ib] += sign * ayxsw[ib]
ayye[ib] -= sign * ayyw[ib]
ayyne[ib] -= sign * ayynw[ib]
ayyse[ib] -= sign * ayysw[ib]
axye[ib] -= sign * axyw[ib]
axyne[ib] -= sign * axynw[ib]
axyse[ib] -= sign * axysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Ixx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vxx = numpy.r_[
axxp[iall],
axxe[i_w],
axxw[i_e],
axxn[i_s],
axxs[i_n],
axxsw[i_ne],
axxnw[i_se],
axxne[i_sw],
axxse[i_nw],
]
Ixy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxy = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vxy = numpy.r_[
axyp[iall],
axye[i_w],
axyw[i_e],
axyn[i_s],
axys[i_n],
axysw[i_ne],
axynw[i_se],
axyne[i_sw],
axyse[i_nw],
]
Iyx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vyx = numpy.r_[
ayxp[iall],
ayxe[i_w],
ayxw[i_e],
ayxn[i_s],
ayxs[i_n],
ayxsw[i_ne],
ayxnw[i_se],
ayxne[i_sw],
ayxse[i_nw],
]
Iyy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyy = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vyy = numpy.r_[
ayyp[iall],
ayye[i_w],
ayyw[i_e],
ayyn[i_s],
ayys[i_n],
ayysw[i_ne],
ayynw[i_se],
ayyne[i_sw],
ayyse[i_nw],
]
I = numpy.r_[Ixx, Ixy, Iyx, Iyy]
J = numpy.r_[Jxx, Jxy, Jyx, Jyy]
V = numpy.r_[Vxx, Vxy, Vyx, Vyy]
A = coo_matrix((V, (I, J))).tocsr()
return A
def compute_other_fields(self, neffs, Hxs, Hys):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
boundary = self.boundary
Hzs = []
Exs = []
Eys = []
Ezs = []
for neff, Hx, Hy in zip(neffs, Hxs, Hys):
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
epsxx, epsxy, epsyx, epsyy, epszz = self._get_eps(xc, yc)
nx = len(x)
ny = len(y)
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
b = neff * k
bzxne = (
0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* eyx4
/ ezz4
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy3
* eyy1
* w
* eyy2
+ 0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (1 - exx4 / ezz4)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* exx1
* s
) / b
bzxse = (
-0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* eyx3
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy1
* w
* eyy2
+ 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (1 - exx3 / ezz3)
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* n
* exx1
* exx4
) / b
bzxnw = (
-0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* eyx1
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy2
* e
- 0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (1 - exx1 / ezz1)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* exx4
* s
) / b
bzxsw = (
0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* eyx2
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* e
- 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (1 - exx2 / ezz2)
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx3
* n
* exx1
* exx4
) / b
bzxn = (
(
0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* n
* ezz1
* ezz2
/ eyy1
* (2 * eyy1 / ezz1 / n**2 + eyx1 / ezz1 / n / w)
+ 0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* n
* ezz4
* ezz3
/ eyy4
* (2 * eyy4 / ezz4 / n**2 - eyx4 / ezz4 / n / e)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
(ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (
0.5
* ezz4
* (
(1 - exx1 / ezz1) / n / w
- exy1 / ezz1 * (2.0 / n**2 - 2 / n**2 * s / (n + s))
)
/ exx1
* ezz1
* w
+ (ezz4 - ezz1) * s / n / (n + s)
+ 0.5
* ezz1
* (
-(1 - exx4 / ezz4) / n / e
- exy4 / ezz4 * (2.0 / n**2 - 2 / n**2 * s / (n + s))
)
/ exx4
* ezz4
* e
)
- (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (
-ezz3 * exy2 / n / (n + s) / exx2 * w
+ (ezz3 - ezz2) * s / n / (n + s)
- ezz2 * exy3 / n / (n + s) / exx3 * e
)
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzxs = (
(
0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* s
* ezz2
* ezz1
/ eyy2
* (2 * eyy2 / ezz2 / s**2 - eyx2 / ezz2 / s / w)
+ 0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* s
* ezz3
* ezz4
/ eyy3
* (2 * eyy3 / ezz3 / s**2 + eyx3 / ezz3 / s / e)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
(ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (
-ezz4 * exy1 / s / (n + s) / exx1 * w
- (ezz4 - ezz1) * n / s / (n + s)
- ezz1 * exy4 / s / (n + s) / exx4 * e
)
- (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (
0.5
* ezz3
* (
-(1 - exx2 / ezz2) / s / w
- exy2 / ezz2 * (2.0 / s**2 - 2 / s**2 * n / (n + s))
)
/ exx2
* ezz2
* w
- (ezz3 - ezz2) * n / s / (n + s)
+ 0.5
* ezz2
* (
(1 - exx3 / ezz3) / s / e
- exy3 / ezz3 * (2.0 / s**2 - 2 / s**2 * n / (n + s))
)
/ exx3
* ezz3
* e
)
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzxe = (
(n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (
0.5 * n * ezz4 * ezz3 / eyy4 * (2.0 / e**2 - eyx4 / ezz4 / n / e)
+ 0.5
* s
* ezz3
* ezz4
/ eyy3
* (2.0 / e**2 + eyx3 / ezz3 / s / e)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
-0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* ezz1
* (1 - exx4 / ezz4)
/ n
/ exx4
* ezz4
- 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* ezz2
* (1 - exx3 / ezz3)
/ s
/ exx3
* ezz3
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzxw = (
(-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (
0.5 * n * ezz1 * ezz2 / eyy1 * (2.0 / w**2 + eyx1 / ezz1 / n / w)
+ 0.5
* s
* ezz2
* ezz1
/ eyy2
* (2.0 / w**2 - eyx2 / ezz2 / s / w)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* ezz4
* (1 - exx1 / ezz1)
/ n
/ exx1
* ezz1
+ 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* ezz3
* (1 - exx2 / ezz2)
/ s
/ exx2
* ezz2
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzxp = (
(
(-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (
0.5
* n
* ezz1
* ezz2
/ eyy1
* (
-2.0 / w**2
- 2 * eyy1 / ezz1 / n**2
+ k**2 * eyy1
- eyx1 / ezz1 / n / w
)
+ 0.5
* s
* ezz2
* ezz1
/ eyy2
* (
-2.0 / w**2
- 2 * eyy2 / ezz2 / s**2
+ k**2 * eyy2
+ eyx2 / ezz2 / s / w
)
)
+ (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (
0.5
* n
* ezz4
* ezz3
/ eyy4
* (
-2.0 / e**2
- 2 * eyy4 / ezz4 / n**2
+ k**2 * eyy4
+ eyx4 / ezz4 / n / e
)
+ 0.5
* s
* ezz3
* ezz4
/ eyy3
* (
-2.0 / e**2
- 2 * eyy3 / ezz3 / s**2
+ k**2 * eyy3
- eyx3 / ezz3 / s / e
)
)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
(ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (
0.5
* ezz4
* (
-(k**2) * exy1
- (1 - exx1 / ezz1) / n / w
- exy1 / ezz1 * (-2.0 / n**2 - 2 / n**2 * (n - s) / s)
)
/ exx1
* ezz1
* w
+ (ezz4 - ezz1) * (n - s) / n / s
+ 0.5
* ezz1
* (
-(k**2) * exy4
+ (1 - exx4 / ezz4) / n / e
- exy4 / ezz4 * (-2.0 / n**2 - 2 / n**2 * (n - s) / s)
)
/ exx4
* ezz4
* e
)
- (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (
0.5
* ezz3
* (
-(k**2) * exy2
+ (1 - exx2 / ezz2) / s / w
- exy2 / ezz2 * (-2.0 / s**2 + 2 / s**2 * (n - s) / n)
)
/ exx2
* ezz2
* w
+ (ezz3 - ezz2) * (n - s) / n / s
+ 0.5
* ezz2
* (
-(k**2) * exy3
- (1 - exx3 / ezz3) / s / e
- exy3 / ezz3 * (-2.0 / s**2 + 2 / s**2 * (n - s) / n)
)
/ exx3
* ezz3
* e
)
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzyne = (
0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (1 - eyy4 / ezz4)
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy3
* eyy1
* w
* eyy2
+ 0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* exy4
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* exx1
* s
) / b
bzyse = (
-0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (1 - eyy3 / ezz3)
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy1
* w
* eyy2
+ 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* exy3
/ ezz3
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* n
* exx1
* exx4
) / b
bzynw = (
-0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (1 - eyy1 / ezz1)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy2
* e
- 0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* exy1
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* exx4
* s
) / b
bzysw = (
0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (1 - eyy2 / ezz2)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* e
- 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* exy2
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx3
* n
* exx1
* exx4
) / b
bzyn = (
(
0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* ezz1
* ezz2
/ eyy1
* (1 - eyy1 / ezz1)
/ w
- 0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* ezz4
* ezz3
/ eyy4
* (1 - eyy4 / ezz4)
/ e
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (
0.5 * ezz4 * (2.0 / n**2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w
+ 0.5
* ezz1
* (2.0 / n**2 - exy4 / ezz4 / n / e)
/ exx4
* ezz4
* e
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzys = (
(
-0.5
* (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* ezz2
* ezz1
/ eyy2
* (1 - eyy2 / ezz2)
/ w
+ 0.5
* (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* ezz3
* ezz4
/ eyy3
* (1 - eyy3 / ezz3)
/ e
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
- (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (
0.5 * ezz3 * (2.0 / s**2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w
+ 0.5
* ezz2
* (2.0 / s**2 + exy3 / ezz3 / s / e)
/ exx3
* ezz3
* e
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzye = (
(
(-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (
-n * ezz2 / eyy1 * eyx1 / e / (e + w)
+ (ezz1 - ezz2) * w / e / (e + w)
- s * ezz1 / eyy2 * eyx2 / e / (e + w)
)
+ (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (
0.5
* n
* ezz4
* ezz3
/ eyy4
* (
-(1 - eyy4 / ezz4) / n / e
- eyx4 / ezz4 * (2.0 / e**2 - 2 / e**2 * w / (e + w))
)
+ 0.5
* s
* ezz3
* ezz4
/ eyy3
* (
(1 - eyy3 / ezz3) / s / e
- eyx3 / ezz3 * (2.0 / e**2 - 2 / e**2 * w / (e + w))
)
+ (ezz4 - ezz3) * w / e / (e + w)
)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* ezz1
* (2 * exx4 / ezz4 / e**2 - exy4 / ezz4 / n / e)
/ exx4
* ezz4
* e
- 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* ezz2
* (2 * exx3 / ezz3 / e**2 + exy3 / ezz3 / s / e)
/ exx3
* ezz3
* e
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzyw = (
(
(-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (
0.5
* n
* ezz1
* ezz2
/ eyy1
* (
(1 - eyy1 / ezz1) / n / w
- eyx1 / ezz1 * (2.0 / w**2 - 2 / w**2 * e / (e + w))
)
- (ezz1 - ezz2) * e / w / (e + w)
+ 0.5
* s
* ezz2
* ezz1
/ eyy2
* (
-(1 - eyy2 / ezz2) / s / w
- eyx2 / ezz2 * (2.0 / w**2 - 2 / w**2 * e / (e + w))
)
)
+ (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (
-n * ezz3 / eyy4 * eyx4 / w / (e + w)
- s * ezz4 / eyy3 * eyx3 / w / (e + w)
- (ezz4 - ezz3) * e / w / (e + w)
)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
0.5
* (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* ezz4
* (2 * exx1 / ezz1 / w**2 + exy1 / ezz1 / n / w)
/ exx1
* ezz1
* w
- 0.5
* (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* ezz3
* (2 * exx2 / ezz2 / w**2 - exy2 / ezz2 / s / w)
/ exx2
* ezz2
* w
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
bzyp = (
(
(-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3)
* (
0.5
* n
* ezz1
* ezz2
/ eyy1
* (
-(k**2) * eyx1
- (1 - eyy1 / ezz1) / n / w
- eyx1 / ezz1 * (-2.0 / w**2 + 2 / w**2 * (e - w) / e)
)
+ (ezz1 - ezz2) * (e - w) / e / w
+ 0.5
* s
* ezz2
* ezz1
/ eyy2
* (
-(k**2) * eyx2
+ (1 - eyy2 / ezz2) / s / w
- eyx2 / ezz2 * (-2.0 / w**2 + 2 / w**2 * (e - w) / e)
)
)
+ (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2)
* (
0.5
* n
* ezz4
* ezz3
/ eyy4
* (
-(k**2) * eyx4
+ (1 - eyy4 / ezz4) / n / e
- eyx4 / ezz4 * (-2.0 / e**2 - 2 / e**2 * (e - w) / w)
)
+ 0.5
* s
* ezz3
* ezz4
/ eyy3
* (
-(k**2) * eyx3
- (1 - eyy3 / ezz3) / s / e
- eyx3 / ezz3 * (-2.0 / e**2 - 2 / e**2 * (e - w) / w)
)
+ (ezz4 - ezz3) * (e - w) / e / w
)
)
/ ezz4
/ ezz3
/ (n * eyy3 + s * eyy4)
/ ezz2
/ ezz1
/ (n * eyy2 + s * eyy1)
/ (e + w)
* eyy4
* eyy3
* eyy1
* w
* eyy2
* e
+ (
(ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e)
* (
0.5
* ezz4
* (
-2.0 / n**2
- 2 * exx1 / ezz1 / w**2
+ k**2 * exx1
- exy1 / ezz1 / n / w
)
/ exx1
* ezz1
* w
+ 0.5
* ezz1
* (
-2.0 / n**2
- 2 * exx4 / ezz4 / e**2
+ k**2 * exx4
+ exy4 / ezz4 / n / e
)
/ exx4
* ezz4
* e
)
- (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e)
* (
0.5
* ezz3
* (
-2.0 / s**2
- 2 * exx2 / ezz2 / w**2
+ k**2 * exx2
+ exy2 / ezz2 / s / w
)
/ exx2
* ezz2
* w
+ 0.5
* ezz2
* (
-2.0 / s**2
- 2 * exx3 / ezz3 / e**2
+ k**2 * exx3
- exy3 / ezz3 / s / e
)
/ exx3
* ezz3
* e
)
)
/ ezz3
/ ezz2
/ (w * exx3 + e * exx2)
/ ezz4
/ ezz1
/ (w * exx4 + e * exx1)
/ (n + s)
* exx2
* exx3
* n
* exx1
* exx4
* s
) / b
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == "S":
sign = 1
elif boundary[0] == "A":
sign = -1
elif boundary[0] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
bzxs[ib] += sign * bzxn[ib]
bzxse[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxnw[ib]
bzys[ib] -= sign * bzyn[ib]
bzyse[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == "S":
sign = 1
elif boundary[1] == "A":
sign = -1
elif boundary[1] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
bzxn[ib] += sign * bzxs[ib]
bzxne[ib] += sign * bzxse[ib]
bzxnw[ib] += sign * bzxsw[ib]
bzyn[ib] -= sign * bzys[ib]
bzyne[ib] -= sign * bzyse[ib]
bzynw[ib] -= sign * bzysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == "S":
sign = 1
elif boundary[2] == "A":
sign = -1
elif boundary[2] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
bzxw[ib] += sign * bzxe[ib]
bzxnw[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxse[ib]
bzyw[ib] -= sign * bzye[ib]
bzynw[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == "S":
sign = 1
elif boundary[3] == "A":
sign = -1
elif boundary[3] == "0":
sign = 0
else:
raise ValueError("unknown boundary conditions")
bzxe[ib] += sign * bzxw[ib]
bzxne[ib] += sign * bzxnw[ib]
bzxse[ib] += sign * bzxsw[ib]
bzye[ib] -= sign * bzyw[ib]
bzyne[ib] -= sign * bzynw[ib]
bzyse[ib] -= sign * bzysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Izx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vzx = numpy.r_[
bzxp[iall],
bzxe[i_w],
bzxw[i_e],
bzxn[i_s],
bzxs[i_n],
bzxsw[i_ne],
bzxnw[i_se],
bzxne[i_sw],
bzxse[i_nw],
]
Izy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzy = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vzy = numpy.r_[
bzyp[iall],
bzye[i_w],
bzyw[i_e],
bzyn[i_s],
bzys[i_n],
bzysw[i_ne],
bzynw[i_se],
bzyne[i_sw],
bzyse[i_nw],
]
I = numpy.r_[Izx, Izy]
J = numpy.r_[Jzx, Jzy]
V = numpy.r_[Vzx, Vzy]
B = coo_matrix((V, (I, J))).tocsr()
HxHy = numpy.r_[Hx, Hy]
Hz = B * HxHy.ravel() / 1j
Hz = Hz.reshape(Hx.shape)
# in xc e yc
exx = epsxx[1:-1, 1:-1]
exy = epsxy[1:-1, 1:-1]
eyx = epsyx[1:-1, 1:-1]
eyy = epsyy[1:-1, 1:-1]
ezz = epszz[1:-1, 1:-1]
edet = exx * eyy - exy * eyx
h = e.reshape(nx, ny)[:-1, :-1]
v = n.reshape(nx, ny)[:-1, :-1]
# in xc e yc
Dx = neff * EMpy.utils.centered2d(Hy) + (
Hz[:-1, 1:] + Hz[1:, 1:] - Hz[:-1, :-1] - Hz[1:, :-1]
) / (2j * k * v)
Dy = -neff * EMpy.utils.centered2d(Hx) - (
Hz[1:, :-1] + Hz[1:, 1:] - Hz[:-1, 1:] - Hz[:-1, :-1]
) / (2j * k * h)
Dz = (
(Hy[1:, :-1] + Hy[1:, 1:] - Hy[:-1, 1:] - Hy[:-1, :-1]) / (2 * h)
- (Hx[:-1, 1:] + Hx[1:, 1:] - Hx[:-1, :-1] - Hx[1:, :-1]) / (2 * v)
) / (1j * k)
Ex = (eyy * Dx - exy * Dy) / edet
Ey = (exx * Dy - eyx * Dx) / edet
Ez = Dz / ezz
Hzs.append(Hz)
Exs.append(Ex)
Eys.append(Ey)
Ezs.append(Ez)
return (Hzs, Exs, Eys, Ezs)
def solve(self, neigs=4, tol=0, guess=None):
"""
This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues.
The default value of 0 implies machine precision.
guess : float
A guess for the refractive index.
The modesolver will only finds eigenvectors with an
effective refrative index higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez
"""
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
if guess is not None:
# calculate shift for eigs function
k = 2 * numpy.pi / self.wl
shift = (guess * k) ** 2
else:
shift = None
# Here is where the actual mode-solving takes place!
[eigvals, eigvecs] = eigen.eigs(
A,
k=neigs,
which="LR",
tol=tol,
ncv=10 * neigs,
return_eigenvectors=True,
sigma=shift,
)
neffs = self.wl * numpy.sqrt(eigvals) / (2 * numpy.pi)
Hxs = []
Hys = []
nx = self.nx
ny = self.ny
for ieig in range(neigs):
Hxs.append(eigvecs[: nx * ny, ieig].reshape(nx, ny))
Hys.append(eigvecs[nx * ny :, ieig].reshape(nx, ny))
# sort the modes
idx = numpy.flipud(numpy.argsort(neffs))
neffs = neffs[idx]
tmpx = []
tmpy = []
for i in idx:
tmpx.append(Hxs[i])
tmpy.append(Hys[i])
Hxs = tmpx
Hys = tmpy
[Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)
self.modes = []
for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):
self.modes.append(
FDMode(
self.wl, self.x, self.y, neff, Ex, Ey, Ez, Hx, Hy, Hz
).normalize()
)
return self
def save_modes_for_FDTD(self, x=None, y=None):
for im, m in enumerate(self.modes):
m.save_for_FDTD(str(im), x, y)
def __str__(self):
descr = "Vectorial Finite Difference Modesolver\n"
return descr
class FDMode(Mode):
def __init__(self, wl, x, y, neff, Ex, Ey, Ez, Hx, Hy, Hz):
self.wl = wl
self.x = x
self.y = y
self.neff = neff
self.Ex = Ex
self.Ey = Ey
self.Ez = Ez
self.Hx = Hx
self.Hy = Hy
self.Hz = Hz
def get_x(self, n=None):
if n is None:
return self.x
return numpy.linspace(self.x[0], self.x[-1], n)
def get_y(self, n=None):
if n is None:
return self.y
return numpy.linspace(self.y[0], self.y[-1], n)
def get_field(self, fname, x=None, y=None):
if fname == "Ex":
f = self.Ex
centered = True
elif fname == "Ey":
f = self.Ey
centered = True
elif fname == "Ez":
f = self.Ez
centered = True
elif fname == "Hx":
f = self.Hx
centered = False
elif fname == "Hy":
f = self.Hy
centered = False
else: # if fname == "Hz"
f = self.Hz
centered = False
if (x is None) and (y is None):
return f
if not centered:
# magnetic fields are not centered
x0 = self.x
y0 = self.y
else:
# electric fields and intensity are centered
x0 = EMpy.utils.centered1d(self.x)
y0 = EMpy.utils.centered1d(self.y)
return EMpy.utils.interp2(x, y, x0, y0, f)
def intensityTETM(self, x=None, y=None):
I_TE = self.Ex * EMpy.utils.centered2d(numpy.conj(self.Hy)) / 2.0
I_TM = -self.Ey * EMpy.utils.centered2d(numpy.conj(self.Hx)) / 2.0
if x is None and y is None:
return (I_TE, I_TM)
else:
x0 = EMpy.utils.centered1d(self.x)
y0 = EMpy.utils.centered1d(self.y)
I_TE_ = EMpy.utils.interp2(x, y, x0, y0, I_TE)
I_TM_ = EMpy.utils.interp2(x, y, x0, y0, I_TM)
return (I_TE_, I_TM_)
def intensity(self, x=None, y=None):
I_TE, I_TM = self.intensityTETM(x, y)
return I_TE + I_TM
def TEfrac(self, x_=None, y_=None):
if x_ is None:
x = EMpy.utils.centered1d(self.x)
else:
x = x_
if y_ is None:
y = EMpy.utils.centered1d(self.y)
else:
y = y_
STE, STM = self.intensityTETM(x_, y_)
num = EMpy.utils.trapz2(numpy.abs(STE), x=x, y=y)
den = EMpy.utils.trapz2(numpy.abs(STE) + numpy.abs(STM), x=x, y=y)
return num / den
def norm(self):
x = EMpy.utils.centered1d(self.x)
y = EMpy.utils.centered1d(self.y)
return numpy.sqrt(EMpy.utils.trapz2(self.intensity(), x=x, y=y))
def normalize(self):
n = self.norm()
self.Ex /= n
self.Ey /= n
self.Ez /= n
self.Hx /= n
self.Hy /= n
self.Hz /= n
return self
def overlap(self, m, x=None, y=None):
x1 = EMpy.utils.centered1d(self.x)
y1 = EMpy.utils.centered1d(self.y)
x2 = EMpy.utils.centered1d(m.x)
y2 = EMpy.utils.centered1d(m.y)
if x is None:
x = x2
if y is None:
y = y2
# Interpolates m1 onto m2 grid:
Ex1 = EMpy.utils.interp2(x, y, x1, y1, self.Ex)
Ey1 = EMpy.utils.interp2(x, y, x1, y1, self.Ey)
Hx2 = EMpy.utils.interp2(x, y, x2, y2, m.Hx)
Hy2 = EMpy.utils.interp2(x, y, x2, y2, m.Hy)
intensity = (
Ex1 * EMpy.utils.centered2d(numpy.conj(Hy2))
- Ey1 * EMpy.utils.centered2d(numpy.conj(Hx2))
) / 2.0
return EMpy.utils.trapz2(intensity, x=x, y=y)
def get_fields_for_FDTD(self, x=None, y=None):
"""Get mode's field on a staggered grid.
Note: ignores some fields on the boudaries.
"""
if x is None:
x = self.x
if y is None:
y = self.y
# Ex: ignores y = 0, max
x_Ex = EMpy.utils.centered1d(self.x)
y_Ex = EMpy.utils.centered1d(self.y)
x_Ex_FDTD = EMpy.utils.centered1d(x)
y_Ex_FDTD = y[1:-1]
Ex_FDTD = EMpy.utils.interp2(x_Ex_FDTD, y_Ex_FDTD, x_Ex, y_Ex, self.Ex)
# Ey: ignores x = 0, max
x_Ey = EMpy.utils.centered1d(self.x)
y_Ey = EMpy.utils.centered1d(self.y)
x_Ey_FDTD = x[1:-1]
y_Ey_FDTD = EMpy.utils.centered1d(y)
Ey_FDTD = EMpy.utils.interp2(x_Ey_FDTD, y_Ey_FDTD, x_Ey, y_Ey, self.Ey)
# Ez: ignores x, y = 0, max
x_Ez = EMpy.utils.centered1d(self.x)
y_Ez = EMpy.utils.centered1d(self.y)
x_Ez_FDTD = x[1:-1]
y_Ez_FDTD = y[1:-1]
Ez_FDTD = EMpy.utils.interp2(x_Ez_FDTD, y_Ez_FDTD, x_Ez, y_Ez, self.Ez)
# Hx: ignores x = 0, max, /120pi, reverse direction
x_Hx = self.x
y_Hx = self.y
x_Hx_FDTD = x[1:-1]
y_Hx_FDTD = EMpy.utils.centered1d(y)
Hx_FDTD = EMpy.utils.interp2(x_Hx_FDTD, y_Hx_FDTD, x_Hx, y_Hx, self.Hx) / (
-120.0 * numpy.pi
)
# Hy: ignores y = 0, max, /120pi, reverse direction
x_Hy = self.x
y_Hy = self.y
x_Hy_FDTD = EMpy.utils.centered1d(x)
y_Hy_FDTD = y[1:-1]
Hy_FDTD = EMpy.utils.interp2(x_Hy_FDTD, y_Hy_FDTD, x_Hy, y_Hy, self.Hy) / (
-120.0 * numpy.pi
)
# Hz: /120pi, reverse direction
x_Hz = self.x
y_Hz = self.y
x_Hz_FDTD = EMpy.utils.centered1d(x)
y_Hz_FDTD = EMpy.utils.centered1d(y)
Hz_FDTD = EMpy.utils.interp2(x_Hz_FDTD, y_Hz_FDTD, x_Hz, y_Hz, self.Hz) / (
-120.0 * numpy.pi
)
return (Ex_FDTD, Ey_FDTD, Ez_FDTD, Hx_FDTD, Hy_FDTD, Hz_FDTD)
@staticmethod
def plot_field(x, y, field):
try:
import pylab
except ImportError:
print("no pylab installed")
return
pylab.hot()
pylab.contour(x, y, numpy.abs(field.T), 16)
pylab.axis("image")
def plot_Ex(self, x=None, y=None):
if x is None:
x = EMpy.utils.centered1d(self.x)
if y is None:
y = EMpy.utils.centered1d(self.y)
Ex = self.get_field("Ex", x, y)
self.plot_field(x, y, Ex)
def plot_Ey(self, x=None, y=None):
if x is None:
x = EMpy.utils.centered1d(self.x)
if y is None:
y = EMpy.utils.centered1d(self.y)
Ey = self.get_field("Ey", x, y)
self.plot_field(x, y, Ey)
def plot_Ez(self, x=None, y=None):
if x is None:
x = EMpy.utils.centered1d(self.x)
if y is None:
y = EMpy.utils.centered1d(self.y)
Ez = self.get_field("Ez", x, y)
self.plot_field(x, y, Ez)
def plot_Hx(self, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y
Hx = self.get_field("Hx", x, y)
self.plot_field(x, y, Hx)
def plot_Hy(self, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y
Hy = self.get_field("Hy", x, y)
self.plot_field(x, y, Hy)
def plot_Hz(self, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y
Hz = self.get_field("Hz", x, y)
self.plot_field(x, y, Hz)
def plot_intensity(self):
x = EMpy.utils.centered1d(self.x)
y = EMpy.utils.centered1d(self.y)
I = self.intensity(x, y)
self.plot_field(x, y, I)
def plot(self):
"""Plot the mode's fields."""
try:
import pylab
except ImportError:
print("no pylab installed")
return
pylab.figure()
pylab.subplot(2, 3, 1)
self.plot_Ex()
pylab.title("Ex")
pylab.subplot(2, 3, 2)
self.plot_Ey()
pylab.title("Ey")
pylab.subplot(2, 3, 3)
self.plot_Ez()
pylab.title("Ez")
pylab.subplot(2, 3, 4)
self.plot_Hx()
pylab.title("Hx")
pylab.subplot(2, 3, 5)
self.plot_Hy()
pylab.title("Hy")
pylab.subplot(2, 3, 6)
self.plot_Hz()
pylab.title("Hz")
def stretchmesh(x, y, nlayers, factor, method="PPPP"):
# OKKIO: check me!
# This function can be used to continuously stretch the grid
# spacing at the edges of the computation window for
# finite-difference calculations. This is useful when you would
# like to increase the size of the computation window without
# increasing the total number of points in the computational
# domain. The program implements four different expansion
# methods: uniform, linear, parabolic (the default) and
# geometric. The first three methods also allow for complex
# coordinate stretching, which is useful for creating
# perfectly-matched non-reflective boundaries.
#
# USAGE:
#
# [x,y] = stretchmesh(x,y,nlayers,factor);
# [x,y] = stretchmesh(x,y,nlayers,factor,method);
# [x,y,xc,yc] = stretchmesh(x,y,nlayers,factor);
# [x,y,xc,yc] = stretchmesh(x,y,nlayers,factor,method);
# [x,y,xc,yc,dx,dy] = stretchmesh(x,y,nlayers,factor);
# [x,y,xc,yc,dx,dy] = stretchmesh(x,y,nlayers,factor,method);
#
# INPUT:
#
# x,y - vectors that specify the vertices of the original
# grid, which are usually linearly spaced.
# nlayers - vector that specifies how many layers of the grid
# you would like to expand:
# nlayers(1) = # of layers on the north boundary to stretch
# nlayers(2) = # of layers on the south boundary to stretch
# nlayers(3) = # of layers on the east boundary to stretch
# nlayers(4) = # of layers on the west boundary to stretch
# factor - cumulative factor by which the layers are to be
# expanded. As with nlayers, this can be a 4-vector.
# method - 4-letter string specifying the method of
# stretching for each of the four boundaries. Four different
# methods are supported: uniform, linear, parabolic (default)
# and geometric. For example, method = 'LLLG' will use linear
# expansion for the north, south and east boundaries and
# geometric expansion for the west boundary.
#
# OUTPUT:
#
# x,y - the vertices of the new stretched grid
# xc,yc (optional) - the center cell coordinates of the
# stretched grid
# dx,dy (optional) - the grid spacing (dx = diff(x))
xx = x.astype(complex)
yy = y.astype(complex)
nlayers *= numpy.ones(4, dtype=int)
factor *= numpy.ones(4)
for idx, (n, f, m) in enumerate(zip(nlayers, factor, method.upper())):
if n > 0 and f != 1:
if idx == 0:
# north boundary
kv = numpy.arange(len(y) - 1 - n, len(y))
z = yy
q1 = z[-1 - n]
q2 = z[-1]
elif idx == 1:
# south boundary
kv = numpy.arange(0, n)
z = yy
q1 = z[n]
q2 = z[0]
elif idx == 2:
# east boundary
kv = numpy.arange(len(x) - 1 - n, len(x))
z = xx
q1 = z[-1 - n]
q2 = z[-1]
else: # if idx == 3
# west boundary
kv = numpy.arange(0, n)
z = xx
q1 = z[n]
q2 = z[0]
kv = kv.astype(int)
if m == "U":
c = numpy.polyfit([q1, q2], [q1, q1 + f * (q2 - q1)], 1)
z[kv] = numpy.polyval(c, z[kv])
elif m == "L":
c = (f - 1) / (q2 - q1)
b = 1 - 2 * c * q1
a = q1 - b * q1 - c * q1**2
z[kv] = a + b * z[kv] + c * z[kv] ** 2
elif m == "P":
z[kv] = z[kv] + (f - 1) * (z[kv] - q1) ** 3 / (q2 - q1) ** 2
elif m == "G":
b = scipy.optimize.newton(lambda s: numpy.exp(s) - 1 - f * s, f)
a = (q2 - q1) / b
z[kv] = q1 + a * (numpy.exp((z[kv] - q1) / a) - 1)
xx = xx.real + 1j * numpy.abs(xx.imag)
yy = yy.real + 1j * numpy.abs(yy.imag)
xc = (xx[:-1] + xx[1:]) / 2.0
yc = (yy[:-1] + yy[1:]) / 2.0
dx = numpy.diff(xx)
dy = numpy.diff(yy)
return (xx, yy, xc, yc, dx, dy) | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/plugins/find/dialogs/find.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
(function(){var a;function b(i){return i.type==CKEDITOR.NODE_TEXT&&i.getLength()>0&&(!a||!i.isReadOnly());};function c(i){return!(i.type==CKEDITOR.NODE_ELEMENT&&i.isBlockBoundary(CKEDITOR.tools.extend({},CKEDITOR.dtd.$empty,CKEDITOR.dtd.$nonEditable)));};var d=function(){var i=this;return{textNode:i.textNode,offset:i.offset,character:i.textNode?i.textNode.getText().charAt(i.offset):null,hitMatchBoundary:i._.matchBoundary};},e=['find','replace'],f=[['txtFindFind','txtFindReplace'],['txtFindCaseChk','txtReplaceCaseChk'],['txtFindWordChk','txtReplaceWordChk'],['txtFindCyclic','txtReplaceCyclic']];function g(i){var j,k,l,m;j=i==='find'?1:0;k=1-j;var n,o=f.length;for(n=0;n<o;n++){l=this.getContentElement(e[j],f[n][j]);m=this.getContentElement(e[k],f[n][k]);m.setValue(l.getValue());}};var h=function(i,j){var k=new CKEDITOR.style(CKEDITOR.tools.extend({fullMatch:true,childRule:function(){return false;}},i.config.find_highlight)),l=function(x,y){var z=new CKEDITOR.dom.walker(x);z.guard=y?c:null;z.evaluator=b;z.breakOnFalse=true;this._={matchWord:y,walker:z,matchBoundary:false};};l.prototype={next:function(){return this.move();},back:function(){return this.move(true);},move:function(x){var z=this;var y=z.textNode;if(y===null)return d.call(z);z._.matchBoundary=false;if(y&&x&&z.offset>0){z.offset--;return d.call(z);}else if(y&&z.offset<y.getLength()-1){z.offset++;return d.call(z);}else{y=null;while(!y){y=z._.walker[x?'previous':'next'].call(z._.walker);if(z._.matchWord&&!y||z._.walker._.end)break;if(!y&&!c(z._.walker.current))z._.matchBoundary=true;}z.textNode=y;if(y)z.offset=x?y.getLength()-1:0;else z.offset=0;}return d.call(z);}};var m=function(x,y){this._={walker:x,cursors:[],rangeLength:y,highlightRange:null,isMatched:false};};m.prototype={toDomRange:function(){var x=new CKEDITOR.dom.range(i.document),y=this._.cursors;if(y.length<1){var z=this._.walker.textNode;if(z)x.setStartAfter(z);else return null;}else{var A=y[0],B=y[y.length-1];x.setStart(A.textNode,A.offset);x.setEnd(B.textNode,B.offset+1);}return x;},updateFromDomRange:function(x){var A=this;var y,z=new l(x);A._.cursors=[];do{y=z.next();if(y.character)A._.cursors.push(y);}while(y.character)A._.rangeLength=A._.cursors.length;},setMatched:function(){this._.isMatched=true;},clearMatched:function(){this._.isMatched=false;},isMatched:function(){return this._.isMatched;},highlight:function(){var A=this;if(A._.cursors.length<1)return;if(A._.highlightRange)A.removeHighlight();var x=A.toDomRange(),y=x.createBookmark();
k.applyToRange(x);x.moveToBookmark(y);A._.highlightRange=x;var z=x.startContainer;if(z.type!=CKEDITOR.NODE_ELEMENT)z=z.getParent();z.scrollIntoView();A.updateFromDomRange(x);},removeHighlight:function(){var y=this;if(!y._.highlightRange)return;var x=y._.highlightRange.createBookmark();k.removeFromRange(y._.highlightRange);y._.highlightRange.moveToBookmark(x);y.updateFromDomRange(y._.highlightRange);y._.highlightRange=null;},isReadOnly:function(){if(!this._.highlightRange)return 0;return this._.highlightRange.startContainer.isReadOnly();},moveBack:function(){var z=this;var x=z._.walker.back(),y=z._.cursors;if(x.hitMatchBoundary)z._.cursors=y=[];y.unshift(x);if(y.length>z._.rangeLength)y.pop();return x;},moveNext:function(){var z=this;var x=z._.walker.next(),y=z._.cursors;if(x.hitMatchBoundary)z._.cursors=y=[];y.push(x);if(y.length>z._.rangeLength)y.shift();return x;},getEndCharacter:function(){var x=this._.cursors;if(x.length<1)return null;return x[x.length-1].character;},getNextCharacterRange:function(x){var y,z,A=this._.cursors;if((y=A[A.length-1])&&y.textNode)z=new l(n(y));else z=this._.walker;return new m(z,x);},getCursors:function(){return this._.cursors;}};function n(x,y){var z=new CKEDITOR.dom.range();z.setStart(x.textNode,y?x.offset:x.offset+1);z.setEndAt(i.document.getBody(),CKEDITOR.POSITION_BEFORE_END);return z;};function o(x){var y=new CKEDITOR.dom.range();y.setStartAt(i.document.getBody(),CKEDITOR.POSITION_AFTER_START);y.setEnd(x.textNode,x.offset);return y;};var p=0,q=1,r=2,s=function(x,y){var z=[-1];if(y)x=x.toLowerCase();for(var A=0;A<x.length;A++){z.push(z[A]+1);while(z[A+1]>0&&x.charAt(A)!=x.charAt(z[A+1]-1))z[A+1]=z[z[A+1]-1]+1;}this._={overlap:z,state:0,ignoreCase:!!y,pattern:x};};s.prototype={feedCharacter:function(x){var y=this;if(y._.ignoreCase)x=x.toLowerCase();for(;;){if(x==y._.pattern.charAt(y._.state)){y._.state++;if(y._.state==y._.pattern.length){y._.state=0;return r;}return q;}else if(!y._.state)return p;else y._.state=y._.overlap[y._.state];}return null;},reset:function(){this._.state=0;}};var t=/[.,"'?!;: \u0085\u00a0\u1680\u280e\u2028\u2029\u202f\u205f\u3000]/,u=function(x){if(!x)return true;var y=x.charCodeAt(0);return y>=9&&y<=13||y>=8192&&y<=8202||t.test(x);},v={searchRange:null,matchRange:null,find:function(x,y,z,A,B,C){var L=this;if(!L.matchRange)L.matchRange=new m(new l(L.searchRange),x.length);else{L.matchRange.removeHighlight();L.matchRange=L.matchRange.getNextCharacterRange(x.length);}var D=new s(x,!y),E=p,F='%';while(F!==null){L.matchRange.moveNext();
while(F=L.matchRange.getEndCharacter()){E=D.feedCharacter(F);if(E==r)break;if(L.matchRange.moveNext().hitMatchBoundary)D.reset();}if(E==r){if(z){var G=L.matchRange.getCursors(),H=G[G.length-1],I=G[0],J=new l(o(I),true),K=new l(n(H),true);if(!(u(J.back().character)&&u(K.next().character)))continue;}L.matchRange.setMatched();if(B!==false)L.matchRange.highlight();return true;}}L.matchRange.clearMatched();L.matchRange.removeHighlight();if(A&&!C){L.searchRange=w(true);L.matchRange=null;return arguments.callee.apply(L,Array.prototype.slice.call(arguments).concat([true]));}return false;},replaceCounter:0,replace:function(x,y,z,A,B,C,D){var I=this;a=1;var E=false;if(I.matchRange&&I.matchRange.isMatched()&&!I.matchRange._.isReplaced&&!I.matchRange.isReadOnly()){I.matchRange.removeHighlight();var F=I.matchRange.toDomRange(),G=i.document.createText(z);if(!D){var H=i.getSelection();H.selectRanges([F]);i.fire('saveSnapshot');}F.deleteContents();F.insertNode(G);if(!D){H.selectRanges([F]);i.fire('saveSnapshot');}I.matchRange.updateFromDomRange(F);if(!D)I.matchRange.highlight();I.matchRange._.isReplaced=true;I.replaceCounter++;E=true;}else E=I.find(y,A,B,C,!D);a=0;return E;}};function w(x){var y,z=i.getSelection(),A=i.document.getBody();if(z&&!x){y=z.getRanges()[0].clone();y.collapse(true);}else{y=new CKEDITOR.dom.range();y.setStartAt(A,CKEDITOR.POSITION_AFTER_START);}y.setEndAt(A,CKEDITOR.POSITION_BEFORE_END);return y;};return{title:i.lang.findAndReplace.title,resizable:CKEDITOR.DIALOG_RESIZE_NONE,minWidth:350,minHeight:165,buttons:[CKEDITOR.dialog.cancelButton],contents:[{id:'find',label:i.lang.findAndReplace.find,title:i.lang.findAndReplace.find,accessKey:'',elements:[{type:'hbox',widths:['230px','90px'],children:[{type:'text',id:'txtFindFind',label:i.lang.findAndReplace.findWhat,isChanged:false,labelLayout:'horizontal',accessKey:'F'},{type:'button',align:'left',style:'width:100%',label:i.lang.findAndReplace.find,onClick:function(){var x=this.getDialog();if(!v.find(x.getValueOf('find','txtFindFind'),x.getValueOf('find','txtFindCaseChk'),x.getValueOf('find','txtFindWordChk'),x.getValueOf('find','txtFindCyclic')))alert(i.lang.findAndReplace.notFoundMsg);}}]},{type:'vbox',padding:0,children:[{type:'checkbox',id:'txtFindCaseChk',isChanged:false,style:'margin-top:28px',label:i.lang.findAndReplace.matchCase},{type:'checkbox',id:'txtFindWordChk',isChanged:false,label:i.lang.findAndReplace.matchWord},{type:'checkbox',id:'txtFindCyclic',isChanged:false,'default':true,label:i.lang.findAndReplace.matchCyclic}]}]},{id:'replace',label:i.lang.findAndReplace.replace,accessKey:'M',elements:[{type:'hbox',widths:['230px','90px'],children:[{type:'text',id:'txtFindReplace',label:i.lang.findAndReplace.findWhat,isChanged:false,labelLayout:'horizontal',accessKey:'F'},{type:'button',align:'left',style:'width:100%',label:i.lang.findAndReplace.replace,onClick:function(){var x=this.getDialog();
if(!v.replace(x,x.getValueOf('replace','txtFindReplace'),x.getValueOf('replace','txtReplace'),x.getValueOf('replace','txtReplaceCaseChk'),x.getValueOf('replace','txtReplaceWordChk'),x.getValueOf('replace','txtReplaceCyclic')))alert(i.lang.findAndReplace.notFoundMsg);}}]},{type:'hbox',widths:['230px','90px'],children:[{type:'text',id:'txtReplace',label:i.lang.findAndReplace.replaceWith,isChanged:false,labelLayout:'horizontal',accessKey:'R'},{type:'button',align:'left',style:'width:100%',label:i.lang.findAndReplace.replaceAll,isChanged:false,onClick:function(){var x=this.getDialog(),y;v.replaceCounter=0;v.searchRange=w(true);if(v.matchRange){v.matchRange.removeHighlight();v.matchRange=null;}i.fire('saveSnapshot');while(v.replace(x,x.getValueOf('replace','txtFindReplace'),x.getValueOf('replace','txtReplace'),x.getValueOf('replace','txtReplaceCaseChk'),x.getValueOf('replace','txtReplaceWordChk'),false,true)){}if(v.replaceCounter){alert(i.lang.findAndReplace.replaceSuccessMsg.replace(/%1/,v.replaceCounter));i.fire('saveSnapshot');}else alert(i.lang.findAndReplace.notFoundMsg);}}]},{type:'vbox',padding:0,children:[{type:'checkbox',id:'txtReplaceCaseChk',isChanged:false,label:i.lang.findAndReplace.matchCase},{type:'checkbox',id:'txtReplaceWordChk',isChanged:false,label:i.lang.findAndReplace.matchWord},{type:'checkbox',id:'txtReplaceCyclic',isChanged:false,'default':true,label:i.lang.findAndReplace.matchCyclic}]}]}],onLoad:function(){var x=this,y,z,A=false;this.on('hide',function(){A=false;});this.on('show',function(){A=true;});this.selectPage=CKEDITOR.tools.override(this.selectPage,function(B){return function(C){B.call(x,C);var D=x._.tabs[C],E,F,G;F=C==='find'?'txtFindFind':'txtFindReplace';G=C==='find'?'txtFindWordChk':'txtReplaceWordChk';y=x.getContentElement(C,F);z=x.getContentElement(C,G);if(!D.initialized){E=CKEDITOR.document.getById(y._.inputId);D.initialized=true;}if(A)g.call(this,C);};});},onShow:function(){v.searchRange=w();this.selectPage(j);},onHide:function(){var x;if(v.matchRange&&v.matchRange.isMatched()){v.matchRange.removeHighlight();i.focus();x=v.matchRange.toDomRange();if(x)i.getSelection().selectRanges([x]);}delete v.matchRange;},onFocus:function(){if(j=='replace')return this.getContentElement('replace','txtFindReplace');else return this.getContentElement('find','txtFindFind');}};};CKEDITOR.dialog.add('find',function(i){return h(i,'find');});CKEDITOR.dialog.add('replace',function(i){return h(i,'replace');});})(); | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/lang/pl.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['pl']={"editor":"Edytor tekstu sformatowanego","editorPanel":"Panel edytora tekstu sformatowanego","common":{"editorHelp":"W celu uzyskania pomocy naciśnij ALT 0","browseServer":"Przeglądaj","url":"Adres URL","protocol":"Protokół","upload":"Wyślij","uploadSubmit":"Wyślij","image":"Obrazek","flash":"Flash","form":"Formularz","checkbox":"Pole wyboru (checkbox)","radio":"Przycisk opcji (radio)","textField":"Pole tekstowe","textarea":"Obszar tekstowy","hiddenField":"Pole ukryte","button":"Przycisk","select":"Lista wyboru","imageButton":"Przycisk graficzny","notSet":"<nie ustawiono>","id":"Id","name":"Nazwa","langDir":"Kierunek tekstu","langDirLtr":"Od lewej do prawej (LTR)","langDirRtl":"Od prawej do lewej (RTL)","langCode":"Kod języka","longDescr":"Adres URL długiego opisu","cssClass":"Nazwa klasy CSS","advisoryTitle":"Opis obiektu docelowego","cssStyle":"Styl","ok":"OK","cancel":"Anuluj","close":"Zamknij","preview":"Podgląd","resize":"Przeciągnij, aby zmienić rozmiar","generalTab":"Ogólne","advancedTab":"Zaawansowane","validateNumberFailed":"Ta wartość nie jest liczbą.","confirmNewPage":"Wszystkie niezapisane zmiany zostaną utracone. Czy na pewno wczytać nową stronę?","confirmCancel":"Pewne opcje zostały zmienione. Czy na pewno zamknąć okno dialogowe?","options":"Opcje","target":"Obiekt docelowy","targetNew":"Nowe okno (_blank)","targetTop":"Okno najwyżej w hierarchii (_top)","targetSelf":"To samo okno (_self)","targetParent":"Okno nadrzędne (_parent)","langDirLTR":"Od lewej do prawej (LTR)","langDirRTL":"Od prawej do lewej (RTL)","styles":"Style","cssClasses":"Klasy arkusza stylów","width":"Szerokość","height":"Wysokość","align":"Wyrównaj","alignLeft":"Do lewej","alignRight":"Do prawej","alignCenter":"Do środka","alignTop":"Do góry","alignMiddle":"Do środka","alignBottom":"Do dołu","invalidValue":"Nieprawidłowa wartość.","invalidHeight":"Wysokość musi być liczbą.","invalidWidth":"Szerokość musi być liczbą.","invalidCssLength":"Wartość podana dla pola \"%1\" musi być liczbą dodatnią bez jednostki lub z poprawną jednostką długości zgodną z CSS (px, %, in, cm, mm, em, ex, pt lub pc).","invalidHtmlLength":"Wartość podana dla pola \"%1\" musi być liczbą dodatnią bez jednostki lub z poprawną jednostką długości zgodną z HTML (px lub %).","invalidInlineStyle":"Wartość podana dla stylu musi składać się z jednej lub większej liczby krotek w formacie \"nazwa : wartość\", rozdzielonych średnikami.","cssLengthTooltip":"Wpisz liczbę dla wartości w pikselach lub liczbę wraz z jednostką długości zgodną z CSS (px, %, in, cm, mm, em, ex, pt lub pc).","unavailable":"%1<span class=\"cke_accessibility\">, niedostępne</span>"},"about":{"copy":"Copyright © $1. Wszelkie prawa zastrzeżone.","dlgTitle":"Informacje o programie CKEditor","help":"Pomoc znajdziesz w $1.","moreInfo":"Informacje na temat licencji można znaleźć na naszej stronie:","title":"Informacje o programie CKEditor","userGuide":"podręczniku użytkownika programu CKEditor"},"basicstyles":{"bold":"Pogrubienie","italic":"Kursywa","strike":"Przekreślenie","subscript":"Indeks dolny","superscript":"Indeks górny","underline":"Podkreślenie"},"bidi":{"ltr":"Kierunek tekstu od lewej strony do prawej","rtl":"Kierunek tekstu od prawej strony do lewej"},"blockquote":{"toolbar":"Cytat"},"clipboard":{"copy":"Kopiuj","copyError":"Ustawienia bezpieczeństwa Twojej przeglądarki nie pozwalają na automatyczne kopiowanie tekstu. Użyj skrótu klawiszowego Ctrl/Cmd+C.","cut":"Wytnij","cutError":"Ustawienia bezpieczeństwa Twojej przeglądarki nie pozwalają na automatyczne wycinanie tekstu. Użyj skrótu klawiszowego Ctrl/Cmd+X.","paste":"Wklej","pasteArea":"Obszar wklejania","pasteMsg":"Wklej tekst w poniższym polu, używając skrótu klawiaturowego (<STRONG>Ctrl/Cmd+V</STRONG>), i kliknij <STRONG>OK</STRONG>.","securityMsg":"Zabezpieczenia przeglądarki uniemożliwiają wklejenie danych bezpośrednio do edytora. Proszę ponownie wkleić dane w tym oknie.","title":"Wklej"},"colorbutton":{"auto":"Automatycznie","bgColorTitle":"Kolor tła","colors":{"000":"Czarny","800000":"Kasztanowy","8B4513":"Czekoladowy","2F4F4F":"Ciemnografitowy","008080":"Morski","000080":"Granatowy","4B0082":"Indygo","696969":"Ciemnoszary","B22222":"Czerwień żelazowa","A52A2A":"Brązowy","DAA520":"Ciemnozłoty","006400":"Ciemnozielony","40E0D0":"Turkusowy","0000CD":"Ciemnoniebieski","800080":"Purpurowy","808080":"Szary","F00":"Czerwony","FF8C00":"Ciemnopomarańczowy","FFD700":"Złoty","008000":"Zielony","0FF":"Cyjan","00F":"Niebieski","EE82EE":"Fioletowy","A9A9A9":"Przygaszony szary","FFA07A":"Łososiowy","FFA500":"Pomarańczowy","FFFF00":"Żółty","00FF00":"Limonkowy","AFEEEE":"Bladoturkusowy","ADD8E6":"Jasnoniebieski","DDA0DD":"Śliwkowy","D3D3D3":"Jasnoszary","FFF0F5":"Jasnolawendowy","FAEBD7":"Kremowobiały","FFFFE0":"Jasnożółty","F0FFF0":"Bladozielony","F0FFFF":"Jasnolazurowy","F0F8FF":"Jasnobłękitny","E6E6FA":"Lawendowy","FFF":"Biały"},"more":"Więcej kolorów...","panelTitle":"Kolory","textColorTitle":"Kolor tekstu"},"colordialog":{"clear":"Wyczyść","highlight":"Zaznacz","options":"Opcje koloru","selected":"Wybrany","title":"Wybierz kolor"},"templates":{"button":"Szablony","emptyListMsg":"(Brak zdefiniowanych szablonów)","insertOption":"Zastąp obecną zawartość","options":"Opcje szablonów","selectPromptMsg":"Wybierz szablon do otwarcia w edytorze<br>(obecna zawartość okna edytora zostanie utracona):","title":"Szablony zawartości"},"contextmenu":{"options":"Opcje menu kontekstowego"},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Opis obiektu docelowego","cssClassInputLabel":"Klasy arkusza stylów","edit":"Edytuj pojemnik Div","inlineStyleInputLabel":"Style liniowe","langDirLTRLabel":"Od lewej do prawej (LTR)","langDirLabel":"Kierunek tekstu","langDirRTLLabel":"Od prawej do lewej (RTL)","languageCodeInputLabel":"Kod języka","remove":"Usuń pojemnik Div","styleSelectLabel":"Styl","title":"Utwórz pojemnik Div","toolbar":"Utwórz pojemnik Div"},"toolbar":{"toolbarCollapse":"Zwiń pasek narzędzi","toolbarExpand":"Rozwiń pasek narzędzi","toolbarGroups":{"document":"Dokument","clipboard":"Schowek/Wstecz","editing":"Edycja","forms":"Formularze","basicstyles":"Style podstawowe","paragraph":"Akapit","links":"Hiperłącza","insert":"Wstawianie","styles":"Style","colors":"Kolory","tools":"Narzędzia"},"toolbars":"Paski narzędzi edytora"},"elementspath":{"eleLabel":"Ścieżka elementów","eleTitle":"element %1"},"find":{"find":"Znajdź","findOptions":"Opcje wyszukiwania","findWhat":"Znajdź:","matchCase":"Uwzględnij wielkość liter","matchCyclic":"Cykliczne dopasowanie","matchWord":"Całe słowa","notFoundMsg":"Nie znaleziono szukanego hasła.","replace":"Zamień","replaceAll":"Zamień wszystko","replaceSuccessMsg":"%1 wystąpień zastąpionych.","replaceWith":"Zastąp przez:","title":"Znajdź i zamień"},"fakeobjects":{"anchor":"Kotwica","flash":"Animacja Flash","hiddenfield":"Pole ukryte","iframe":"IFrame","unknown":"Nieznany obiekt"},"flash":{"access":"Dostęp skryptów","accessAlways":"Zawsze","accessNever":"Nigdy","accessSameDomain":"Ta sama domena","alignAbsBottom":"Do dołu","alignAbsMiddle":"Do środka w pionie","alignBaseline":"Do linii bazowej","alignTextTop":"Do góry tekstu","bgcolor":"Kolor tła","chkFull":"Zezwól na pełny ekran","chkLoop":"Pętla","chkMenu":"Włącz menu","chkPlay":"Autoodtwarzanie","flashvars":"Zmienne obiektu Flash","hSpace":"Odstęp poziomy","properties":"Właściwości obiektu Flash","propertiesTab":"Właściwości","quality":"Jakość","qualityAutoHigh":"Auto wysoka","qualityAutoLow":"Auto niska","qualityBest":"Najlepsza","qualityHigh":"Wysoka","qualityLow":"Niska","qualityMedium":"Średnia","scale":"Skaluj","scaleAll":"Pokaż wszystko","scaleFit":"Dokładne dopasowanie","scaleNoBorder":"Bez obramowania","title":"Właściwości obiektu Flash","vSpace":"Odstęp pionowy","validateHSpace":"Odstęp poziomy musi być liczbą.","validateSrc":"Podaj adres URL","validateVSpace":"Odstęp pionowy musi być liczbą.","windowMode":"Tryb okna","windowModeOpaque":"Nieprzezroczyste","windowModeTransparent":"Przezroczyste","windowModeWindow":"Okno"},"font":{"fontSize":{"label":"Rozmiar","voiceLabel":"Rozmiar czcionki","panelTitle":"Rozmiar"},"label":"Czcionka","panelTitle":"Czcionka","voiceLabel":"Czcionka"},"forms":{"button":{"title":"Właściwości przycisku","text":"Tekst (Wartość)","type":"Typ","typeBtn":"Przycisk","typeSbm":"Wyślij","typeRst":"Wyczyść"},"checkboxAndRadio":{"checkboxTitle":"Właściwości pola wyboru (checkbox)","radioTitle":"Właściwości przycisku opcji (radio)","value":"Wartość","selected":"Zaznaczone"},"form":{"title":"Właściwości formularza","menu":"Właściwości formularza","action":"Akcja","method":"Metoda","encoding":"Kodowanie"},"hidden":{"title":"Właściwości pola ukrytego","name":"Nazwa","value":"Wartość"},"select":{"title":"Właściwości listy wyboru","selectInfo":"Informacje","opAvail":"Dostępne opcje","value":"Wartość","size":"Rozmiar","lines":"wierszy","chkMulti":"Wielokrotny wybór","opText":"Tekst","opValue":"Wartość","btnAdd":"Dodaj","btnModify":"Zmień","btnUp":"Do góry","btnDown":"Do dołu","btnSetValue":"Ustaw jako zaznaczoną","btnDelete":"Usuń"},"textarea":{"title":"Właściwości obszaru tekstowego","cols":"Liczba kolumn","rows":"Liczba wierszy"},"textfield":{"title":"Właściwości pola tekstowego","name":"Nazwa","value":"Wartość","charWidth":"Szerokość w znakach","maxChars":"Szerokość maksymalna","type":"Typ","typeText":"Tekst","typePass":"Hasło","typeEmail":"Email","typeSearch":"Szukaj","typeTel":"Numer telefonu","typeUrl":"Adres URL"}},"format":{"label":"Format","panelTitle":"Format","tag_address":"Adres","tag_div":"Normalny (DIV)","tag_h1":"Nagłówek 1","tag_h2":"Nagłówek 2","tag_h3":"Nagłówek 3","tag_h4":"Nagłówek 4","tag_h5":"Nagłówek 5","tag_h6":"Nagłówek 6","tag_p":"Normalny","tag_pre":"Tekst sformatowany"},"horizontalrule":{"toolbar":"Wstaw poziomą linię"},"iframe":{"border":"Pokaż obramowanie obiektu IFrame","noUrl":"Podaj adres URL elementu IFrame","scrolling":"Włącz paski przewijania","title":"Właściwości elementu IFrame","toolbar":"IFrame"},"image":{"alertUrl":"Podaj adres obrazka.","alt":"Tekst zastępczy","border":"Obramowanie","btnUpload":"Wyślij","button2Img":"Czy chcesz przekonwertować zaznaczony przycisk graficzny do zwykłego obrazka?","hSpace":"Odstęp poziomy","img2Button":"Czy chcesz przekonwertować zaznaczony obrazek do przycisku graficznego?","infoTab":"Informacje o obrazku","linkTab":"Hiperłącze","lockRatio":"Zablokuj proporcje","menu":"Właściwości obrazka","resetSize":"Przywróć rozmiar","title":"Właściwości obrazka","titleButton":"Właściwości przycisku graficznego","upload":"Wyślij","urlMissing":"Podaj adres URL obrazka.","vSpace":"Odstęp pionowy","validateBorder":"Wartość obramowania musi być liczbą całkowitą.","validateHSpace":"Wartość odstępu poziomego musi być liczbą całkowitą.","validateVSpace":"Wartość odstępu pionowego musi być liczbą całkowitą."},"indent":{"indent":"Zwiększ wcięcie","outdent":"Zmniejsz wcięcie"},"smiley":{"options":"Opcje emotikonów","title":"Wstaw emotikona","toolbar":"Emotikony"},"justify":{"block":"Wyjustuj","center":"Wyśrodkuj","left":"Wyrównaj do lewej","right":"Wyrównaj do prawej"},"link":{"acccessKey":"Klawisz dostępu","advanced":"Zaawansowane","advisoryContentType":"Typ MIME obiektu docelowego","advisoryTitle":"Opis obiektu docelowego","anchor":{"toolbar":"Wstaw/edytuj kotwicę","menu":"Właściwości kotwicy","title":"Właściwości kotwicy","name":"Nazwa kotwicy","errorName":"Wpisz nazwę kotwicy","remove":"Usuń kotwicę"},"anchorId":"Wg identyfikatora","anchorName":"Wg nazwy","charset":"Kodowanie znaków obiektu docelowego","cssClasses":"Nazwa klasy CSS","emailAddress":"Adres e-mail","emailBody":"Treść","emailSubject":"Temat","id":"Id","info":"Informacje ","langCode":"Kod języka","langDir":"Kierunek tekstu","langDirLTR":"Od lewej do prawej (LTR)","langDirRTL":"Od prawej do lewej (RTL)","menu":"Edytuj odnośnik","name":"Nazwa","noAnchors":"(W dokumencie nie zdefiniowano żadnych kotwic)","noEmail":"Podaj adres e-mail","noUrl":"Podaj adres URL","other":"<inny>","popupDependent":"Okno zależne (Netscape)","popupFeatures":"Właściwości wyskakującego okna","popupFullScreen":"Pełny ekran (IE)","popupLeft":"Pozycja w poziomie","popupLocationBar":"Pasek adresu","popupMenuBar":"Pasek menu","popupResizable":"Skalowalny","popupScrollBars":"Paski przewijania","popupStatusBar":"Pasek statusu","popupToolbar":"Pasek narzędzi","popupTop":"Pozycja w pionie","rel":"Relacja","selectAnchor":"Wybierz kotwicę","styles":"Styl","tabIndex":"Indeks kolejności","target":"Obiekt docelowy","targetFrame":"<ramka>","targetFrameName":"Nazwa ramki docelowej","targetPopup":"<wyskakujące okno>","targetPopupName":"Nazwa wyskakującego okna","title":"Odnośnik","toAnchor":"Odnośnik wewnątrz strony (kotwica)","toEmail":"Adres e-mail","toUrl":"Adres URL","toolbar":"Wstaw/edytuj odnośnik","type":"Typ odnośnika","unlink":"Usuń odnośnik","upload":"Wyślij"},"list":{"bulletedlist":"Lista wypunktowana","numberedlist":"Lista numerowana"},"liststyle":{"armenian":"Numerowanie armeńskie","bulletedTitle":"Właściwości list wypunktowanych","circle":"Koło","decimal":"Liczby (1, 2, 3 itd.)","decimalLeadingZero":"Liczby z początkowym zerem (01, 02, 03 itd.)","disc":"Okrąg","georgian":"Numerowanie gruzińskie (an, ban, gan itd.)","lowerAlpha":"Małe litery (a, b, c, d, e itd.)","lowerGreek":"Małe litery greckie (alpha, beta, gamma itd.)","lowerRoman":"Małe cyfry rzymskie (i, ii, iii, iv, v itd.)","none":"Brak","notset":"<nie ustawiono>","numberedTitle":"Właściwości list numerowanych","square":"Kwadrat","start":"Początek","type":"Typ punktora","upperAlpha":"Duże litery (A, B, C, D, E itd.)","upperRoman":"Duże cyfry rzymskie (I, II, III, IV, V itd.)","validateStartNumber":"Listę musi rozpoczynać liczba całkowita."},"magicline":{"title":"Wstaw nowy akapit"},"maximize":{"maximize":"Maksymalizuj","minimize":"Minimalizuj"},"newpage":{"toolbar":"Nowa strona"},"pagebreak":{"alt":"Wstaw podział strony","toolbar":"Wstaw podział strony"},"pastetext":{"button":"Wklej jako czysty tekst","title":"Wklej jako czysty tekst"},"pastefromword":{"confirmCleanup":"Tekst, który chcesz wkleić, prawdopodobnie pochodzi z programu Microsoft Word. Czy chcesz go wyczyścić przed wklejeniem?","error":"Wyczyszczenie wklejonych danych nie było możliwe z powodu wystąpienia błędu.","title":"Wklej z programu MS Word","toolbar":"Wklej z programu MS Word"},"preview":{"preview":"Podgląd"},"print":{"toolbar":"Drukuj"},"removeformat":{"toolbar":"Usuń formatowanie"},"save":{"toolbar":"Zapisz"},"selectall":{"toolbar":"Zaznacz wszystko"},"showblocks":{"toolbar":"Pokaż bloki"},"sourcearea":{"toolbar":"Źródło dokumentu"},"specialchar":{"options":"Opcje znaków specjalnych","title":"Wybierz znak specjalny","toolbar":"Wstaw znak specjalny"},"scayt":{"about":"Informacje o SCAYT","aboutTab":"Informacje o SCAYT","addWord":"Dodaj słowo","allCaps":"Ignoruj wyrazy pisane dużymi literami","dic_create":"Utwórz","dic_delete":"Usuń","dic_field_name":"Nazwa słownika","dic_info":"Początkowo słownik użytkownika przechowywany jest w cookie. Pliki cookie mają jednak ograniczoną pojemność. Jeśli słownik użytkownika przekroczy wielkość dopuszczalną dla pliku cookie, możliwe jest przechowanie go na naszym serwerze. W celu zapisania słownika na serwerze niezbędne jest nadanie mu nazwy. Jeśli słownik został już zapisany na serwerze, wystarczy podać jego nazwę i nacisnąć przycisk Przywróć.","dic_rename":"Zmień nazwę","dic_restore":"Przywróć","dictionariesTab":"Słowniki","disable":"Wyłącz SCAYT","emptyDic":"Nazwa słownika nie może być pusta.","enable":"Włącz SCAYT","ignore":"Ignoruj","ignoreAll":"Ignoruj wszystkie","ignoreDomainNames":"Ignoruj nazwy domen","langs":"Języki","languagesTab":"Języki","mixedCase":"Ignoruj wyrazy pisane dużymi i małymi literami","mixedWithDigits":"Ignoruj wyrazy zawierające cyfry","moreSuggestions":"Więcej sugestii","opera_title":"Funkcja nie jest obsługiwana przez przeglądarkę Opera","options":"Opcje","optionsTab":"Opcje","title":"Sprawdź pisownię podczas pisania (SCAYT)","toggle":"Przełącz SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"Styl","panelTitle":"Style formatujące","panelTitle1":"Style blokowe","panelTitle2":"Style liniowe","panelTitle3":"Style obiektowe"},"table":{"border":"Grubość obramowania","caption":"Tytuł","cell":{"menu":"Komórka","insertBefore":"Wstaw komórkę z lewej","insertAfter":"Wstaw komórkę z prawej","deleteCell":"Usuń komórki","merge":"Połącz komórki","mergeRight":"Połącz z komórką z prawej","mergeDown":"Połącz z komórką poniżej","splitHorizontal":"Podziel komórkę poziomo","splitVertical":"Podziel komórkę pionowo","title":"Właściwości komórki","cellType":"Typ komórki","rowSpan":"Scalenie wierszy","colSpan":"Scalenie komórek","wordWrap":"Zawijanie słów","hAlign":"Wyrównanie poziome","vAlign":"Wyrównanie pionowe","alignBaseline":"Linia bazowa","bgColor":"Kolor tła","borderColor":"Kolor obramowania","data":"Dane","header":"Nagłówek","yes":"Tak","no":"Nie","invalidWidth":"Szerokość komórki musi być liczbą.","invalidHeight":"Wysokość komórki musi być liczbą.","invalidRowSpan":"Scalenie wierszy musi być liczbą całkowitą.","invalidColSpan":"Scalenie komórek musi być liczbą całkowitą.","chooseColor":"Wybierz"},"cellPad":"Dopełnienie komórek","cellSpace":"Odstęp pomiędzy komórkami","column":{"menu":"Kolumna","insertBefore":"Wstaw kolumnę z lewej","insertAfter":"Wstaw kolumnę z prawej","deleteColumn":"Usuń kolumny"},"columns":"Liczba kolumn","deleteTable":"Usuń tabelę","headers":"Nagłówki","headersBoth":"Oba","headersColumn":"Pierwsza kolumna","headersNone":"Brak","headersRow":"Pierwszy wiersz","invalidBorder":"Wartość obramowania musi być liczbą.","invalidCellPadding":"Dopełnienie komórek musi być liczbą dodatnią.","invalidCellSpacing":"Odstęp pomiędzy komórkami musi być liczbą dodatnią.","invalidCols":"Liczba kolumn musi być większa niż 0.","invalidHeight":"Wysokość tabeli musi być liczbą.","invalidRows":"Liczba wierszy musi być większa niż 0.","invalidWidth":"Szerokość tabeli musi być liczbą.","menu":"Właściwości tabeli","row":{"menu":"Wiersz","insertBefore":"Wstaw wiersz powyżej","insertAfter":"Wstaw wiersz poniżej","deleteRow":"Usuń wiersze"},"rows":"Liczba wierszy","summary":"Podsumowanie","title":"Właściwości tabeli","toolbar":"Tabela","widthPc":"%","widthPx":"piksele","widthUnit":"jednostka szerokości"},"undo":{"redo":"Ponów","undo":"Cofnij"},"wsc":{"btnIgnore":"Ignoruj","btnIgnoreAll":"Ignoruj wszystkie","btnReplace":"Zmień","btnReplaceAll":"Zmień wszystkie","btnUndo":"Cofnij","changeTo":"Zmień na","errorLoading":"Błąd wczytywania hosta aplikacji usługi: %s.","ieSpellDownload":"Słownik nie jest zainstalowany. Czy chcesz go pobrać?","manyChanges":"Sprawdzanie zakończone: zmieniono %l słów","noChanges":"Sprawdzanie zakończone: nie zmieniono żadnego słowa","noMispell":"Sprawdzanie zakończone: nie znaleziono błędów","noSuggestions":"- Brak sugestii -","notAvailable":"Przepraszamy, ale usługa jest obecnie niedostępna.","notInDic":"Słowa nie ma w słowniku","oneChange":"Sprawdzanie zakończone: zmieniono jedno słowo","progress":"Trwa sprawdzanie...","title":"Sprawdź pisownię","toolbar":"Sprawdź pisownię"}}; | PypiClean |
/EvtPlugins-0.99.34.tar.gz/EvtPlugins-0.99.34/opensesame_plugins/VAS2/VAS2.py | from libopensesame.py3compat import *
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from openexp.canvas import Canvas, canvas
from libopensesame.oslogging import oslogger
from openexp.canvas_elements import (
Line,
Rect,
Polygon,
Ellipse,
Image,
Gabor,
NoisePatch,
Circle,
FixDot,
ElementFactory,
RichText,
Arrow,
Text
)
from openexp.mouse import Mouse
from libopensesame.exceptions import osexception
import os
import sys
import numpy as np
from pyEVT import EvtExchanger
class VAS2(item):
"""
This class (the class with the same name as the module) handles the basic
functionality of the item. It does not deal with GUI stuff.
"""
# Provide an informative description for your plug-in.
description = u'A Revised VAS modifier for a canvas'
def reset(self):
"""
desc:
Resets plug-in to initial values.
"""
# Here we provide default values for the variables that are specified
# in info.json. If you do not provide default values, the plug-in will
# work, but the variables will be undefined when they are not explicitly
# set in the GUI.
self.var.VAS_CANVAS_NAME = u'VASSCREEN'
self.var.VAS_BODY_NAME = u'VASBODY'
self.var.VAS_CURSOR_COLOR = "#ffffff"
self.var.VAS_EXITBUTTON_NAME = u'VASEXIT'
self.var.VAS_MAXLABEL_NAME = u'MAXLABEL'
self.var.VAS_MINLABEL_NAME = u'MINLABEL'
self.var.VAS_LINESIZE = 10
def prepare(self):
"""The preparation phase of the plug-in goes here."""
item.prepare(self)
# Checking the excistence of the VAS elements is only possible in the runphase
# as only then the full canvas is availeable
self.c = Canvas(self.experiment)
self.slmouse = Mouse(self.experiment, timeout=20, visible=True)
self.slmouse.show_cursor(True)
self.slmouse.set_pos(pos=(0,0))
my_canvas = self.experiment.items[self.var.VAS_CANVAS_NAME].canvas
try:
if my_canvas[self.var.VAS_BODY_NAME] == None or my_canvas[self.var.VAS_EXITBUTTON_NAME] == None:
oslogger.info("Should not occur")
except Exception as e:
raise osexception(u"Prepare: READ the VAS manual:\n\rNo VAS elements found on the named canvas")
self.useLabels = True
try:
if my_canvas[self.var.VAS_MAXLABEL_NAME] == None or my_canvas[self.var.VAS_MAXLABEL_NAME] == None:
oslogger.info("Should not occur")
except Exception as e:
self.uselabels = False
self.c = self.experiment.items[self.var.VAS_CANVAS_NAME].canvas
self.ypos = -1
# is the vasbody a line or a rect?
if hasattr(self.c[self.var.VAS_BODY_NAME], 'ex') and hasattr(self.c[self.var.VAS_BODY_NAME], 'sx'):
self.VASLENGTH = self.c[self.var.VAS_BODY_NAME].ex - self.c[self.var.VAS_BODY_NAME].sx
self.ypos = (self.c[self.var.VAS_BODY_NAME].sy + self.c[self.var.VAS_BODY_NAME].ey) / 2
self.sx = self.c[self.var.VAS_BODY_NAME].sx
if hasattr(self.c[self.var.VAS_BODY_NAME], 'w') and hasattr(self.c[self.var.VAS_BODY_NAME], 'y') and hasattr(self.c[self.var.VAS_BODY_NAME], 'h'):
self.VASLENGTH = self.c[self.var.VAS_BODY_NAME].w
self.ypos = self.c[self.var.VAS_BODY_NAME].y+(self.c[self.var.VAS_BODY_NAME].h/2)
self.sx = self.c[self.var.VAS_BODY_NAME].x
if self.ypos == -1:
raise TypeError("VasBody should be a line or a Rect")
def run(self):
self.set_item_onset(self.c.show())
st = self.experiment.time()
xpos = -1
while(True):
# Poll the mouse for buttonclicks
button = None
while button == None:
button, position, timestamp = self.slmouse.get_click()
button = None
(x,y), mtime = self.slmouse.get_pos()
if (x,y) in self.c[self.var.VAS_BODY_NAME]:
# clicked on the line: either create the cursor, or move it
if xpos == -1:
# create the cursor:
xpos = 100 * ((x - self.sx) / self.VASLENGTH)
self.c['VASCursorLine'] = Line(x, self.ypos-(self.var.VAS_LINESIZE/2), x, self.ypos+(self.var.VAS_LINESIZE/2), color = self.var.VAS_CURSOR_COLOR)
else:
# move it
xpos = 100 * ((x - self.sx) / self.VASLENGTH)
self.c['VASCursorLine'].sx = x
self.c['VASCursorLine'].ex = x
if self.useLabels:
if (x,y) in self.c[self.var.VAS_MAXLABEL_NAME]:
# clicked on the maxlabel: either create the cursor, or move it
if xpos == -1:
# create the cursor:
xpos = 100
x = self.sx+self.VASLENGTH
self.c['VASCursorLine'] = Line(x, self.ypos-(self.var.VAS_LINESIZE/2), x, self.ypos+(self.var.VAS_LINESIZE/2), color = self.var.VAS_CURSOR_COLOR)
else:
# move it
xpos = 100
x = self.sx+self.VASLENGTH
self.c['VASCursorLine'].sx = x
self.c['VASCursorLine'].ex = x
if (x,y) in self.c[self.var.VAS_MINLABEL_NAME]:
# clicked on the minlabel: either create the cursor, or move it
if xpos == -1:
# create the cursor:
xpos = 0
x = self.sx
self.c['VASCursorLine'] = Line(x, self.ypos-(self.var.VAS_LINESIZE/2), x, self.ypos+(self.var.VAS_LINESIZE/2), color = self.var.VAS_CURSOR_COLOR)
else:
# move it
xpos = 0
x = self.sx
self.c['VASCursorLine'].sx = x
self.c['VASCursorLine'].ex = x
if (x,y) in self.c[self.var.VAS_EXITBUTTON_NAME]:
if xpos != -1:
break
self.c.show()
# Add all response related data to the Opensesame responses instance.
self.experiment.responses.add(response_time=self.experiment.time()-st, \
correct=None, \
response=str(round(xpos,2)), \
item=self.name)
class qtVAS2(VAS2, qtautoplugin):
"""
This class handles the GUI aspect of the plug-in. By using qtautoplugin, we
usually need to do hardly anything, because the GUI is defined in info.json.
"""
def __init__(self, name, experiment, script=None):
"""
Constructor.
Arguments:
name -- The name of the plug-in.
experiment -- The experiment object.
Keyword arguments:
script -- A definition script. (default=None)
"""
# We don't need to do anything here, except call the parent
# constructors.
VAS2.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
def init_edit_widget(self):
"""
Constructs the GUI controls. Usually, you can omit this function
altogether, but if you want to implement more advanced functionality,
such as controls that are grayed out under certain conditions, you need
to implement this here.
"""
# First, call the parent constructor, which constructs the GUI controls
# based on info.json.
qtautoplugin.init_edit_widget(self)
# If you specify a 'name' for a control in info.json, this control will
# be available self.[name]. The type of the object depends on the
# control. A checkbox will be a QCheckBox, a line_edit will be a
# QLineEdit. Here we connect the stateChanged signal of the QCheckBox,
# to the setEnabled() slot of the QLineEdit. This has the effect of
# disabling the QLineEdit when the QCheckBox is uncheckhed. We also
# explictly set the starting state.
#self.line_edit_widget.setEnabled(self.checkbox_widget.isChecked())
#self.checkbox_widget.stateChanged.connect(
# self.line_edit_widget.setEnabled) | PypiClean |
/MarkdownTools-1.0.1.tar.gz/MarkdownTools-1.0.1/ez_setup.py | import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "3.1"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def get_zip_class():
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
class ContextualZipFile(zipfile.ZipFile):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close
return zipfile.ZipFile if hasattr(zipfile.ZipFile, '__exit__') else \
ContextualZipFile
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with get_zip_class()(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/BEATAALU-0.13.1.tar.gz/BEATAALU-0.13.1/econml/grf/_base_grftree.py |
import numpy as np
from ._criterion import LinearMomentGRFCriterionMSE, LinearMomentGRFCriterion,LinearMomentGRFCriterionBEAT
from ..tree import BaseTree
from sklearn.model_selection import train_test_split
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_is_fitted
import copy
# =============================================================================
# Types and constants
# =============================================================================
CRITERIA_GRF = {"het": LinearMomentGRFCriterion,
"mse": LinearMomentGRFCriterionMSE,
"beat":LinearMomentGRFCriterionBEAT}
# =============================================================================
# Base GRF tree
# =============================================================================
class GRFTree(BaseTree):
"""A tree of a Generalized Random Forest [grftree1]. This method should be used primarily
through the BaseGRF forest class and its derivatives and not as a standalone
estimator. It fits a tree that solves the local moment equation problem::
E[ m(Z; theta(x)) | X=x] = 0
For some moment vector function m, that takes as input random samples of a random variable Z
and is parameterized by some unknown parameter theta(x). Each node in the tree
contains a local estimate of the parameter theta(x), for every region of X that
falls within that leaf.
Parameters
----------
criterion : {``'mse'``, ``'het'``}, default='mse'
The function to measure the quality of a split. Supported criteria
are ``'mse'`` for the mean squared error in a linear moment estimation tree and ``'het'`` for
heterogeneity score. These criteria solve any linear moment problem of the form::
E[J * theta(x) - A | X = x] = 0
- The ``'mse'`` criterion finds splits that maximize the score:
.. code-block::
sum_{child} weight(child) * theta(child).T @ E[J | X in child] @ theta(child)
- In the case of a causal tree, this coincides with minimizing the MSE:
.. code-block::
sum_{child} E[(Y - <theta(child), T>)^2 | X=child] weight(child)
- In the case of an IV tree, this roughly coincides with minimize the projected MSE::
.. code-block::
sum_{child} E[(Y - <theta(child), E[T|Z]>)^2 | X=child] weight(child)
Internally, for the case of more than two treatments or for the case of one treatment with
``fit_intercept=True`` then this criterion is approximated by computationally simpler variants for
computationaly purposes. In particular, it is replaced by::
sum_{child} weight(child) * rho(child).T @ E[J | X in child] @ rho(child)
where:
.. code-block::
rho(child) := J(parent)^{-1} E[A - J * theta(parent) | X in child]
This can be thought as a heterogeneity inducing score, but putting more weight on scores
with a large minimum eigenvalue of the child jacobian ``E[J | X in child]``, which leads to smaller
variance of the estimate and stronger identification of the parameters.
- The ``'het'`` criterion finds splits that maximize the pure parameter heterogeneity score:
.. code-block::
sum_{child} weight(child) * rho(child).T @ rho(child)
This can be thought as an approximation to the ideal heterogeneity score:
.. code-block::
weight(left) * weight(right) || theta(left) - theta(right)||_2^2 / weight(parent)^2
as outlined in [grftree1]_
splitter : {"best"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=10
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int or float, default=5
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
min_var_leaf : None or double in (0, infinity), default=None
A constraint on the minimum degree of identification of the parameter of interest. This avoids performing
splits where either the variance of the treatment is small or the correlation of the instrument with the
treatment is small, or the variance of the instrument is small. Generically for any linear moment problem
this translates to conditions on the leaf jacobian matrix J(leaf) that are proxies for a well-conditioned
matrix, which leads to smaller variance of the local estimate. The proxy of the well-conditioning is
different for different criterion, primarily for computational efficiency reasons.
- If ``criterion='het'``, then the diagonal entries of J(leaf) are constraint to have absolute
value at least `min_var_leaf`:
.. code-block::
for all i in {1, ..., n_outputs}: abs(J(leaf)[i, i]) > `min_var_leaf`
In the context of a causal tree, when residual treatment is passed
at fit time, then, this translates to a requirement on Var(T[i]) for every treatment coordinate i.
In the context of an IV tree, with residual instruments and residual treatments passed at fit time
this translates to ``Cov(T[i], Z[i]) > min_var_leaf`` for each coordinate i of the instrument and the
treatment.
- If ``criterion='mse'``, because the criterion stores more information about the leaf jacobian for
every candidate split, then we impose further constraints on the pairwise determininants of the
leaf jacobian, as they come at small extra computational cost, i.e.::
for all i neq j:
sqrt(abs(J(leaf)[i, i] * J(leaf)[j, j] - J(leaf)[i, j] * J(leaf)[j, i])) > `min_var_leaf`
In the context of a causal tree, when residual treatment is passed at fit time, then this
translates to a constraint on the pearson correlation coefficient on any two coordinates
of the treatment within the leaf, i.e.::
for all i neq j:
sqrt( Var(T[i]) * Var(T[j]) * (1 - rho(T[i], T[j])^2) ) ) > `min_var_leaf`
where rho(X, Y) is the Pearson correlation coefficient of two random variables X, Y. Thus this
constraint also enforces that no two pairs of treatments be very co-linear within a leaf. This
extra constraint primarily has bite in the case of more than two input treatments.
min_var_leaf_on_val : bool, default=False
Whether the `min_var_leaf` constraint should also be enforced to hold on the validation set of the
honest split too. If `min_var_leaf=None` then this flag does nothing. Setting this to True should
be done with caution, as this partially violates the honesty structure, since parts of the variables
other than the X variable (e.g. the variables that go into the jacobian J of the linear model) are
used to inform the split structure of the tree. However, this is a benign dependence and for instance
in a causal tree or an IV tree does not use the label y. It only uses the treatment T and the instrument
Z and their local correlation structures to decide whether a split is feasible.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
min_balancedness_tol: float in [0, .5], default=.45
How imbalanced a split we can tolerate. This enforces that each split leaves at least
(.5 - min_balancedness_tol) fraction of samples on each side of the split; or fraction
of the total weight of samples, when sample_weight is not None. Default value, ensures
that at least 5% of the parent node weight falls in each side of the split. Set it to 0.0 for no
balancedness and to .5 for perfectly balanced splits. For the formal inference theory
to be valid, this has to be any positive constant bounded away from zero.
honest: bool, default=True
Whether the data should be split in two equally sized samples, such that the one half-sample
is used to determine the optimal split at each node and the other sample is used to determine
the value of every node.
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The feature importances based on the amount of parameter heterogeneity they create.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized) total heterogeneity that the feature
creates. Each split that the feature was chosen adds::
parent_weight * (left_weight * right_weight)
* mean((value_left[k] - value_right[k])**2) / parent_weight**2
to the importance of the feature. Each such quantity is also weighted by the depth of the split.
By default splits below `max_depth=4` are not used in this calculation and also each split
at depth `depth`, is re-weighted by 1 / (1 + `depth`)**2.0. See the method ``feature_importances``
for a method that allows one to change these defaults.
max_features_ : int
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
n_relevant_outputs_ : int
The first `n_relevant_outputs_` where the ones we cared about when ``fit`` was performed.
n_y_ : int
The raw label dimension when ``fit`` is performed.
n_samples_ : int
The number of training samples when ``fit`` is performed.
honest_ : int
Whether honesty was enabled when ``fit`` was performed
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(econml.tree._tree.Tree)`` for attributes of Tree object.
References
----------
.. [grftree1] Athey, Susan, Julie Tibshirani, and Stefan Wager. "Generalized random forests."
The Annals of Statistics 47.2 (2019): 1148-1178
https://arxiv.org/pdf/1610.01271.pdf
"""
def __init__(self, *,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=10,
min_samples_leaf=5,
min_weight_fraction_leaf=0.,
min_var_leaf=None,
min_var_leaf_on_val=False,
max_features=None,
random_state=None,
min_impurity_decrease=0.,
min_balancedness_tol=0.45,
honest=True):
super().__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
min_var_leaf=min_var_leaf,
min_var_leaf_on_val=min_var_leaf_on_val,
max_features=max_features,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_balancedness_tol=min_balancedness_tol,
honest=honest)
def _get_valid_criteria(self):
return CRITERIA_GRF
def _get_valid_min_var_leaf_criteria(self):
return (LinearMomentGRFCriterion,)
def _get_store_jac(self):
return True
def init(self,):
""" This method should be called before fit. We added this pre-fit step so that this step
can be executed without parallelism as it contains code that holds the gil and can hinder
parallel execution. We also did not merge this step to ``__init__`` as we want ``__init__`` to just
be storing the parameters for easy cloning. We also don't want to directly pass a RandomState
object as random_state, as we want to keep the starting seed to be able to replicate the
randomness of the object outside the object.
"""
self.random_seed_ = self.random_state
self.random_state_ = check_random_state(self.random_seed_)
return self
def fit(self, X, y, n_y, n_outputs, n_relevant_outputs, sample_weight=None, check_input=True):
""" Fit the tree from the data
Parameters
----------
X : (n, d) array
The features to split on
y : (n, m) array
All the variables required to calculate the criterion function, evaluate splits and
estimate local values, i.e. all the values that go into the moment function except X.
n_y, n_outputs, n_relevant_outputs : auxiliary info passed to the criterion objects that
help the object parse the variable y into each separate variable components.
- In the case when `isinstance(criterion, LinearMomentGRFCriterion)`, then the first
n_y columns of y are the raw outputs, the next n_outputs columns contain the A part
of the moment and the next n_outputs * n_outputs columnts contain the J part of the moment
in row contiguous format. The first n_relevant_outputs parameters of the linear moment
are the ones that we care about. The rest are nuisance parameters.
sample_weight : (n,) array, default=None
The sample weights
check_input : bool, defaul=True
Whether to check the input parameters for validity. Should be set to False to improve
running time in parallel execution, if the variables have already been checked by the
forest class that spawned this tree.
"""
return super().fit(X, y, n_y, n_outputs, n_relevant_outputs,
sample_weight=sample_weight, check_input=check_input)
def predict(self, X, check_input=True):
"""Return the prefix of relevant fitted local parameters for each X, i.e. theta(X).
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
theta(X)[:n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs)
The estimated relevant parameters for each row of X
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
pred = self.tree_.predict(X)
return pred
def predict_full(self, X, check_input=True):
"""Return the fitted local parameters for each X, i.e. theta(X).
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
theta(X) : array-like of shape (n_samples, n_outputs)
All the estimated parameters for each row of X
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
pred = self.tree_.predict_full(X)
return pred
def predict_alpha_and_jac(self, X, check_input=True):
"""Predict the local jacobian ``E[J | X=x]`` and the local alpha ``E[A | X=x]`` of
a linear moment equation.
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
alpha : array-like of shape (n_samples, n_outputs)
The local alpha E[A | X=x] for each sample x
jac : array-like of shape (n_samples, n_outputs * n_outputs)
The local jacobian E[J | X=x] flattened in a C contiguous format
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
return self.tree_.predict_precond_and_jac(X)
def predict_moment(self, X, parameter, check_input=True):
"""
Predict the local moment value for each sample and at the given parameter::
E[J | X=x] theta(x) - E[A | X=x]
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``
parameter : {array-like} of shape (n_samples, n_outputs)
A parameter estimate for each sample
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
moment : array-like of shape (n_samples, n_outputs)
The local moment E[J | X=x] theta(x) - E[A | X=x] for each sample x
"""
alpha, jac = self.predict_alpha_and_jac(X)
return alpha - np.einsum('ijk,ik->ij', jac.reshape((-1, self.n_outputs_, self.n_outputs_)), parameter)
def feature_importances(self, max_depth=4, depth_decay_exponent=2.0):
"""The feature importances based on the amount of parameter heterogeneity they create.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized) total heterogeneity that the feature
creates. Each split that the feature was chosen adds::
parent_weight * (left_weight * right_weight)
* mean((value_left[k] - value_right[k])**2) / parent_weight**2
to the importance of the feature. Each such quantity is also weighted by the depth of the split.
Parameters
----------
max_depth : int, default=4
Splits of depth larger than `max_depth` are not used in this calculation
depth_decay_exponent: double, default=2.0
The contribution of each split to the total score is re-weighted by ``1 / (1 + `depth`)**2.0``.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
Normalized total parameter heterogeneity inducing importance of each feature
"""
check_is_fitted(self)
return self.tree_.compute_feature_heterogeneity_importances(normalize=True, max_depth=max_depth,
depth_decay=depth_decay_exponent)
@property
def feature_importances_(self):
return self.feature_importances() | PypiClean |
/GSAS-II-WONDER_win-1.0.1.tar.gz/GSAS-II-WONDER_win-1.0.1/GSAS-II-WONDER/GSASIIstrMath.py | ########### SVN repository information ###################
# $Date: 2019-09-24 09:32:12 -0500 (Tue, 24 Sep 2019) $
# $Author: vondreele $
# $Revision: 4158 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIstrMath.py $
# $Id: GSASIIstrMath.py 4158 2019-09-24 14:32:12Z vondreele $
########### SVN repository information ###################
from __future__ import division, print_function
import time
import copy
import numpy as np
import numpy.ma as ma
import numpy.linalg as nl
import scipy.stats as st
import multiprocessing as mp
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4158 $")
import GSASIIElem as G2el
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIpwd as G2pwd
import GSASIImapvars as G2mv
import GSASIImath as G2mth
import GSASIIobj as G2obj
import GSASIImpsubs as G2mp
#G2mp.InitMP(False) # This disables multiprocessing
sind = lambda x: np.sin(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
asind = lambda x: 180.*np.arcsin(x)/np.pi
acosd = lambda x: 180.*np.arccos(x)/np.pi
atan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi
ateln2 = 8.0*np.log(2.0)
twopi = 2.0*np.pi
twopisq = 2.0*np.pi**2
nxs = np.newaxis
################################################################################
##### Rigid Body Models
################################################################################
def ApplyRBModels(parmDict,Phases,rigidbodyDict,Update=False):
''' Takes RB info from RBModels in Phase and RB data in rigidbodyDict along with
current RB values in parmDict & modifies atom contents (xyz & Uij) of parmDict
'''
atxIds = ['Ax:','Ay:','Az:']
atuIds = ['AU11:','AU22:','AU33:','AU12:','AU13:','AU23:']
RBIds = rigidbodyDict.get('RBIds',{'Vector':[],'Residue':[]}) #these are lists of rbIds
if not RBIds['Vector'] and not RBIds['Residue']:
return
VRBIds = RBIds['Vector']
RRBIds = RBIds['Residue']
if Update:
RBData = rigidbodyDict
else:
RBData = copy.deepcopy(rigidbodyDict) # don't mess with original!
if RBIds['Vector']: # first update the vector magnitudes
VRBData = RBData['Vector']
for i,rbId in enumerate(VRBIds):
if VRBData[rbId]['useCount']:
for j in range(len(VRBData[rbId]['VectMag'])):
name = '::RBV;'+str(j)+':'+str(i)
VRBData[rbId]['VectMag'][j] = parmDict[name]
for phase in Phases:
Phase = Phases[phase]
General = Phase['General']
cx,ct,cs,cia = General['AtomPtrs']
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
AtLookup = G2mth.FillAtomLookUp(Phase['Atoms'],cia+8)
pfx = str(Phase['pId'])+'::'
if Update:
RBModels = Phase['RBModels']
else:
RBModels = copy.deepcopy(Phase['RBModels']) # again don't mess with original!
for irb,RBObj in enumerate(RBModels.get('Vector',[])):
jrb = VRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
for i,px in enumerate(['RBVPx:','RBVPy:','RBVPz:']):
RBObj['Orig'][0][i] = parmDict[pfx+px+rbsx]
for i,po in enumerate(['RBVOa:','RBVOi:','RBVOj:','RBVOk:']):
RBObj['Orient'][0][i] = parmDict[pfx+po+rbsx]
RBObj['Orient'][0] = G2mth.normQ(RBObj['Orient'][0])
TLS = RBObj['ThermalMotion']
if 'T' in TLS[0]:
for i,pt in enumerate(['RBVT11:','RBVT22:','RBVT33:','RBVT12:','RBVT13:','RBVT23:']):
TLS[1][i] = parmDict[pfx+pt+rbsx]
if 'L' in TLS[0]:
for i,pt in enumerate(['RBVL11:','RBVL22:','RBVL33:','RBVL12:','RBVL13:','RBVL23:']):
TLS[1][i+6] = parmDict[pfx+pt+rbsx]
if 'S' in TLS[0]:
for i,pt in enumerate(['RBVS12:','RBVS13:','RBVS21:','RBVS23:','RBVS31:','RBVS32:','RBVSAA:','RBVSBB:']):
TLS[1][i+12] = parmDict[pfx+pt+rbsx]
if 'U' in TLS[0]:
TLS[1][0] = parmDict[pfx+'RBVU:'+rbsx]
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Vector')
UIJ = G2mth.UpdateRBUIJ(Bmat,Cart,RBObj)
for i,x in enumerate(XYZ):
atId = RBObj['Ids'][i]
for j in [0,1,2]:
parmDict[pfx+atxIds[j]+str(AtLookup[atId])] = x[j]
if UIJ[i][0] == 'A':
for j in range(6):
parmDict[pfx+atuIds[j]+str(AtLookup[atId])] = UIJ[i][j+2]
elif UIJ[i][0] == 'I':
parmDict[pfx+'AUiso:'+str(AtLookup[atId])] = UIJ[i][1]
for irb,RBObj in enumerate(RBModels.get('Residue',[])):
jrb = RRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
for i,px in enumerate(['RBRPx:','RBRPy:','RBRPz:']):
RBObj['Orig'][0][i] = parmDict[pfx+px+rbsx]
for i,po in enumerate(['RBROa:','RBROi:','RBROj:','RBROk:']):
RBObj['Orient'][0][i] = parmDict[pfx+po+rbsx]
RBObj['Orient'][0] = G2mth.normQ(RBObj['Orient'][0])
TLS = RBObj['ThermalMotion']
if 'T' in TLS[0]:
for i,pt in enumerate(['RBRT11:','RBRT22:','RBRT33:','RBRT12:','RBRT13:','RBRT23:']):
RBObj['ThermalMotion'][1][i] = parmDict[pfx+pt+rbsx]
if 'L' in TLS[0]:
for i,pt in enumerate(['RBRL11:','RBRL22:','RBRL33:','RBRL12:','RBRL13:','RBRL23:']):
RBObj['ThermalMotion'][1][i+6] = parmDict[pfx+pt+rbsx]
if 'S' in TLS[0]:
for i,pt in enumerate(['RBRS12:','RBRS13:','RBRS21:','RBRS23:','RBRS31:','RBRS32:','RBRSAA:','RBRSBB:']):
RBObj['ThermalMotion'][1][i+12] = parmDict[pfx+pt+rbsx]
if 'U' in TLS[0]:
RBObj['ThermalMotion'][1][0] = parmDict[pfx+'RBRU:'+rbsx]
for itors,tors in enumerate(RBObj['Torsions']):
tors[0] = parmDict[pfx+'RBRTr;'+str(itors)+':'+rbsx]
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Residue')
UIJ = G2mth.UpdateRBUIJ(Bmat,Cart,RBObj)
for i,x in enumerate(XYZ):
atId = RBObj['Ids'][i]
for j in [0,1,2]:
parmDict[pfx+atxIds[j]+str(AtLookup[atId])] = x[j]
if UIJ[i][0] == 'A':
for j in range(6):
parmDict[pfx+atuIds[j]+str(AtLookup[atId])] = UIJ[i][j+2]
elif UIJ[i][0] == 'I':
parmDict[pfx+'AUiso:'+str(AtLookup[atId])] = UIJ[i][1]
def ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase):
'Needs a doc string'
atxIds = ['dAx:','dAy:','dAz:']
atuIds = ['AU11:','AU22:','AU33:','AU12:','AU13:','AU23:']
OIds = ['Oa:','Oi:','Oj:','Ok:']
RBIds = rigidbodyDict.get('RBIds',{'Vector':[],'Residue':[]}) #these are lists of rbIds
if not RBIds['Vector'] and not RBIds['Residue']:
return
VRBIds = RBIds['Vector']
RRBIds = RBIds['Residue']
RBData = rigidbodyDict
for item in parmDict:
if 'RB' in item:
dFdvDict[item] = 0. #NB: this is a vector which is no. refl. long & must be filled!
General = Phase['General']
cx,ct,cs,cia = General['AtomPtrs']
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
rpd = np.pi/180.
rpd2 = rpd**2
g = nl.inv(np.inner(Bmat,Bmat))
gvec = np.sqrt(np.array([g[0][0]**2,g[1][1]**2,g[2][2]**2,
g[0][0]*g[1][1],g[0][0]*g[2][2],g[1][1]*g[2][2]]))
AtLookup = G2mth.FillAtomLookUp(Phase['Atoms'],cia+8)
pfx = str(Phase['pId'])+'::'
RBModels = Phase['RBModels']
for irb,RBObj in enumerate(RBModels.get('Vector',[])):
VModel = RBData['Vector'][RBObj['RBId']]
Q = RBObj['Orient'][0]
jrb = VRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
dXdv = []
for iv in range(len(VModel['VectMag'])):
dCdv = []
for vec in VModel['rbVect'][iv]:
dCdv.append(G2mth.prodQVQ(Q,vec))
dXdv.append(np.inner(Bmat,np.array(dCdv)).T)
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Vector')
for ia,atId in enumerate(RBObj['Ids']):
atNum = AtLookup[atId]
dx = 0.00001
for iv in range(len(VModel['VectMag'])):
for ix in [0,1,2]:
dFdvDict['::RBV;'+str(iv)+':'+str(jrb)] += dXdv[iv][ia][ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
for i,name in enumerate(['RBVPx:','RBVPy:','RBVPz:']):
dFdvDict[pfx+name+rbsx] += dFdvDict[pfx+atxIds[i]+str(atNum)]
for iv in range(4):
Q[iv] -= dx
XYZ1 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] += 2.*dx
XYZ2 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] -= dx
dXdO = (XYZ2[ia]-XYZ1[ia])/(2.*dx)
for ix in [0,1,2]:
dFdvDict[pfx+'RBV'+OIds[iv]+rbsx] += dXdO[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
X = G2mth.prodQVQ(Q,Cart[ia])
dFdu = np.array([dFdvDict[pfx+Uid+str(AtLookup[atId])] for Uid in atuIds]).T/gvec
dFdu = G2lat.U6toUij(dFdu.T)
dFdu = np.tensordot(Amat,np.tensordot(Amat,dFdu,([1,0])),([0,1]))
dFdu = G2lat.UijtoU6(dFdu)
atNum = AtLookup[atId]
if 'T' in RBObj['ThermalMotion'][0]:
for i,name in enumerate(['RBVT11:','RBVT22:','RBVT33:','RBVT12:','RBVT13:','RBVT23:']):
dFdvDict[pfx+name+rbsx] += dFdu[i]
if 'L' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVL11:'+rbsx] += rpd2*(dFdu[1]*X[2]**2+dFdu[2]*X[1]**2-dFdu[5]*X[1]*X[2])
dFdvDict[pfx+'RBVL22:'+rbsx] += rpd2*(dFdu[0]*X[2]**2+dFdu[2]*X[0]**2-dFdu[4]*X[0]*X[2])
dFdvDict[pfx+'RBVL33:'+rbsx] += rpd2*(dFdu[0]*X[1]**2+dFdu[1]*X[0]**2-dFdu[3]*X[0]*X[1])
dFdvDict[pfx+'RBVL12:'+rbsx] += rpd2*(-dFdu[3]*X[2]**2-2.*dFdu[2]*X[0]*X[1]+
dFdu[4]*X[1]*X[2]+dFdu[5]*X[0]*X[2])
dFdvDict[pfx+'RBVL13:'+rbsx] += rpd2*(-dFdu[4]*X[1]**2-2.*dFdu[1]*X[0]*X[2]+
dFdu[3]*X[1]*X[2]+dFdu[5]*X[0]*X[1])
dFdvDict[pfx+'RBVL23:'+rbsx] += rpd2*(-dFdu[5]*X[0]**2-2.*dFdu[0]*X[1]*X[2]+
dFdu[3]*X[0]*X[2]+dFdu[4]*X[0]*X[1])
if 'S' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVS12:'+rbsx] += rpd*(dFdu[5]*X[1]-2.*dFdu[1]*X[2])
dFdvDict[pfx+'RBVS13:'+rbsx] += rpd*(-dFdu[5]*X[2]+2.*dFdu[2]*X[1])
dFdvDict[pfx+'RBVS21:'+rbsx] += rpd*(-dFdu[4]*X[0]+2.*dFdu[0]*X[2])
dFdvDict[pfx+'RBVS23:'+rbsx] += rpd*(dFdu[4]*X[2]-2.*dFdu[2]*X[0])
dFdvDict[pfx+'RBVS31:'+rbsx] += rpd*(dFdu[3]*X[0]-2.*dFdu[0]*X[1])
dFdvDict[pfx+'RBVS32:'+rbsx] += rpd*(-dFdu[3]*X[1]+2.*dFdu[1]*X[0])
dFdvDict[pfx+'RBVSAA:'+rbsx] += rpd*(dFdu[4]*X[1]-dFdu[3]*X[2])
dFdvDict[pfx+'RBVSBB:'+rbsx] += rpd*(dFdu[5]*X[0]-dFdu[3]*X[2])
if 'U' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVU:'+rbsx] += dFdvDict[pfx+'AUiso:'+str(AtLookup[atId])]
for irb,RBObj in enumerate(RBModels.get('Residue',[])):
Q = RBObj['Orient'][0]
jrb = RRBIds.index(RBObj['RBId'])
torData = RBData['Residue'][RBObj['RBId']]['rbSeq']
rbsx = str(irb)+':'+str(jrb)
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Residue')
for itors,tors in enumerate(RBObj['Torsions']): #derivative error?
tname = pfx+'RBRTr;'+str(itors)+':'+rbsx
orId,pvId = torData[itors][:2]
pivotVec = Cart[orId]-Cart[pvId]
QA = G2mth.AVdeg2Q(-0.001,pivotVec)
QB = G2mth.AVdeg2Q(0.001,pivotVec)
for ir in torData[itors][3]:
atNum = AtLookup[RBObj['Ids'][ir]]
rVec = Cart[ir]-Cart[pvId]
dR = G2mth.prodQVQ(QB,rVec)-G2mth.prodQVQ(QA,rVec)
dRdT = np.inner(Bmat,G2mth.prodQVQ(Q,dR))/.002
for ix in [0,1,2]:
dFdvDict[tname] += dRdT[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
for ia,atId in enumerate(RBObj['Ids']):
atNum = AtLookup[atId]
dx = 0.00001
for i,name in enumerate(['RBRPx:','RBRPy:','RBRPz:']):
dFdvDict[pfx+name+rbsx] += dFdvDict[pfx+atxIds[i]+str(atNum)]
for iv in range(4):
Q[iv] -= dx
XYZ1 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] += 2.*dx
XYZ2 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] -= dx
dXdO = (XYZ2[ia]-XYZ1[ia])/(2.*dx)
for ix in [0,1,2]:
dFdvDict[pfx+'RBR'+OIds[iv]+rbsx] += dXdO[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
X = G2mth.prodQVQ(Q,Cart[ia])
dFdu = np.array([dFdvDict[pfx+Uid+str(AtLookup[atId])] for Uid in atuIds]).T/gvec
dFdu = G2lat.U6toUij(dFdu.T)
dFdu = np.tensordot(Amat.T,np.tensordot(Amat,dFdu,([1,0])),([0,1]))
dFdu = G2lat.UijtoU6(dFdu)
atNum = AtLookup[atId]
if 'T' in RBObj['ThermalMotion'][0]:
for i,name in enumerate(['RBRT11:','RBRT22:','RBRT33:','RBRT12:','RBRT13:','RBRT23:']):
dFdvDict[pfx+name+rbsx] += dFdu[i]
if 'L' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRL11:'+rbsx] += rpd2*(dFdu[1]*X[2]**2+dFdu[2]*X[1]**2-dFdu[5]*X[1]*X[2])
dFdvDict[pfx+'RBRL22:'+rbsx] += rpd2*(dFdu[0]*X[2]**2+dFdu[2]*X[0]**2-dFdu[4]*X[0]*X[2])
dFdvDict[pfx+'RBRL33:'+rbsx] += rpd2*(dFdu[0]*X[1]**2+dFdu[1]*X[0]**2-dFdu[3]*X[0]*X[1])
dFdvDict[pfx+'RBRL12:'+rbsx] += rpd2*(-dFdu[3]*X[2]**2-2.*dFdu[2]*X[0]*X[1]+
dFdu[4]*X[1]*X[2]+dFdu[5]*X[0]*X[2])
dFdvDict[pfx+'RBRL13:'+rbsx] += rpd2*(dFdu[4]*X[1]**2-2.*dFdu[1]*X[0]*X[2]+
dFdu[3]*X[1]*X[2]+dFdu[5]*X[0]*X[1])
dFdvDict[pfx+'RBRL23:'+rbsx] += rpd2*(dFdu[5]*X[0]**2-2.*dFdu[0]*X[1]*X[2]+
dFdu[3]*X[0]*X[2]+dFdu[4]*X[0]*X[1])
if 'S' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRS12:'+rbsx] += rpd*(dFdu[5]*X[1]-2.*dFdu[1]*X[2])
dFdvDict[pfx+'RBRS13:'+rbsx] += rpd*(-dFdu[5]*X[2]+2.*dFdu[2]*X[1])
dFdvDict[pfx+'RBRS21:'+rbsx] += rpd*(-dFdu[4]*X[0]+2.*dFdu[0]*X[2])
dFdvDict[pfx+'RBRS23:'+rbsx] += rpd*(dFdu[4]*X[2]-2.*dFdu[2]*X[0])
dFdvDict[pfx+'RBRS31:'+rbsx] += rpd*(dFdu[3]*X[0]-2.*dFdu[0]*X[1])
dFdvDict[pfx+'RBRS32:'+rbsx] += rpd*(-dFdu[3]*X[1]+2.*dFdu[1]*X[0])
dFdvDict[pfx+'RBRSAA:'+rbsx] += rpd*(dFdu[4]*X[1]-dFdu[3]*X[2])
dFdvDict[pfx+'RBRSBB:'+rbsx] += rpd*(dFdu[5]*X[0]-dFdu[3]*X[2])
if 'U' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRU:'+rbsx] += dFdvDict[pfx+'AUiso:'+str(AtLookup[atId])]
################################################################################
##### Penalty & restraint functions
################################################################################
def penaltyFxn(HistoPhases,calcControls,parmDict,varyList):
'Compute user-supplied and built-in restraint functions'
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
pNames = []
pVals = []
pWt = []
negWt = {}
pWsum = {}
pWnum = {}
for phase in Phases:
pId = Phases[phase]['pId']
negWt[pId] = Phases[phase]['General']['Pawley neg wt']
General = Phases[phase]['General']
cx,ct,cs,cia = General['AtomPtrs']
textureData = General['SH Texture']
SGData = General['SGData']
Atoms = Phases[phase]['Atoms']
AtLookup = G2mth.FillAtomLookUp(Phases[phase]['Atoms'],cia+8)
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
if phase not in restraintDict:
continue
phaseRest = restraintDict[phase]
names = [['Bond','Bonds'],['Angle','Angles'],['Plane','Planes'],
['Chiral','Volumes'],['Torsion','Torsions'],['Rama','Ramas'],
['ChemComp','Sites'],['Texture','HKLs'],['General','General'],]
for name,rest in names:
pWsum[name] = 0.
pWnum[name] = 0
if name not in phaseRest:
continue
itemRest = phaseRest[name]
if itemRest[rest] and itemRest['Use']:
wt = itemRest.get('wtFactor',1.)
if name in ['Bond','Angle','Plane','Chiral']:
for i,[indx,ops,obs,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
XYZ = G2mth.getSyXYZ(XYZ,ops,SGData)
if name == 'Bond':
calc = G2mth.getRestDist(XYZ,Amat)
elif name == 'Angle':
calc = G2mth.getRestAngle(XYZ,Amat)
elif name == 'Plane':
calc = G2mth.getRestPlane(XYZ,Amat)
elif name == 'Chiral':
calc = G2mth.getRestChiral(XYZ,Amat)
pVals.append(obs-calc)
pWt.append(wt/esd**2)
pWsum[name] += wt*((obs-calc)/esd)**2
pWnum[name] += 1
elif name in ['Torsion','Rama']:
coeffDict = itemRest['Coeff']
for i,[indx,ops,cofName,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
XYZ = G2mth.getSyXYZ(XYZ,ops,SGData)
if name == 'Torsion':
tor = G2mth.getRestTorsion(XYZ,Amat)
restr,calc = G2mth.calcTorsionEnergy(tor,coeffDict[cofName])
else:
phi,psi = G2mth.getRestRama(XYZ,Amat)
restr,calc = G2mth.calcRamaEnergy(phi,psi,coeffDict[cofName])
pVals.append(restr)
pWt.append(wt/esd**2)
pWsum[name] += wt*(restr/esd)**2
pWnum[name] += 1
elif name == 'ChemComp':
for i,[indx,factors,obs,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
mul = np.array(G2mth.GetAtomItemsById(Atoms,AtLookup,indx,cs+1))
frac = np.array(G2mth.GetAtomFracByID(pId,parmDict,AtLookup,indx))
calc = np.sum(mul*frac*factors)
pVals.append(obs-calc)
pWt.append(wt/esd**2)
pWsum[name] += wt*((obs-calc)/esd)**2
pWnum[name] += 1
elif name == 'Texture':
SHkeys = list(textureData['SH Coeff'][1].keys())
SHCoef = G2mth.GetSHCoeff(pId,parmDict,SHkeys)
shModels = ['cylindrical','none','shear - 2/m','rolling - mmm']
SamSym = dict(zip(shModels,['0','-1','2/m','mmm']))
for i,[hkl,grid,esd1,ifesd2,esd2] in enumerate(itemRest[rest]):
PH = np.array(hkl)
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
ODFln = G2lat.Flnh(False,SHCoef,phi,beta,SGData)
R,P,Z = G2mth.getRestPolefig(ODFln,SamSym[textureData['Model']],grid)
Z1 = ma.masked_greater(Z,0.0) #is this + or -?
IndZ1 = np.array(ma.nonzero(Z1))
for ind in IndZ1.T:
pNames.append('%d:%s:%d:%.2f:%.2f'%(pId,name,i,R[ind[0],ind[1]],P[ind[0],ind[1]]))
pVals.append(Z1[ind[0]][ind[1]])
pWt.append(wt/esd1**2)
pWsum[name] += wt*(-Z1[ind[0]][ind[1]]/esd1)**2
pWnum[name] += 1
if ifesd2:
Z2 = 1.-Z
for ind in np.ndindex(grid,grid):
pNames.append('%d:%s:%d:%.2f:%.2f'%(pId,name+'-unit',i,R[ind[0],ind[1]],P[ind[0],ind[1]]))
pVals.append(Z2[ind[0]][ind[1]])
pWt.append(wt/esd2**2)
pWsum[name] += wt*(Z2/esd2)**2
pWnum[name] += 1
elif name == 'General':
for i,(eq,obs,esd) in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
calcobj = G2obj.ExpressionCalcObj(eq)
calcobj.SetupCalc(parmDict)
calc = calcobj.EvalExpression()
pVals.append(obs-calc)
pWt.append(wt/esd**2)
pWsum[name] += wt*((obs-calc)/esd)**2
pWnum[name] += 1
for phase in Phases:
name = 'SH-Pref.Ori.'
pId = Phases[phase]['pId']
General = Phases[phase]['General']
SGData = General['SGData']
cell = General['Cell'][1:7]
pWsum[name] = 0.0
pWnum[name] = 0
for hist in Phases[phase]['Histograms']:
if not Phases[phase]['Histograms'][hist]['Use']:
continue
if hist in Histograms and 'PWDR' in hist:
hId = Histograms[hist]['hId']
phfx = '%d:%d:'%(pId,hId)
if calcControls[phfx+'poType'] == 'SH':
toler = calcControls[phfx+'SHtoler']
wt = 1./toler**2
HKLs = np.array(calcControls[phfx+'SHhkl'])
SHnames = calcControls[phfx+'SHnames']
SHcof = dict(zip(SHnames,[parmDict[phfx+cof] for cof in SHnames]))
for i,PH in enumerate(HKLs):
phi,beta = G2lat.CrsAng(PH,cell,SGData)
SH3Coef = {}
for item in SHcof:
L,N = eval(item.strip('C'))
SH3Coef['C%d,0,%d'%(L,N)] = SHcof[item]
ODFln = G2lat.Flnh(False,SH3Coef,phi,beta,SGData)
X = np.linspace(0,90.0,26)
Y = ma.masked_greater(G2lat.polfcal(ODFln,'0',X,0.0),0.0) #+ or -?
IndY = ma.nonzero(Y)
for ind in IndY[0]:
pNames.append('%d:%d:%s:%d:%.2f'%(pId,hId,name,i,X[ind]))
pVals.append(Y[ind])
pWt.append(wt)
pWsum[name] += wt*(Y[ind])**2
pWnum[name] += 1
pWsum['PWLref'] = 0.
pWnum['PWLref'] = 0
for item in varyList:
if 'PWLref' in item and parmDict[item] < 0.:
pId = int(item.split(':')[0])
if negWt[pId]:
pNames.append(item)
pVals.append(parmDict[item])
pWt.append(negWt[pId])
pWsum['PWLref'] += negWt[pId]*(parmDict[item])**2
pWnum['PWLref'] += 1
pVals = np.array(pVals)
pWt = np.array(pWt) #should this be np.sqrt?
return pNames,pVals,pWt,pWsum,pWnum
def penaltyDeriv(pNames,pVal,HistoPhases,calcControls,parmDict,varyList):
'''Compute derivatives on user-supplied and built-in restraint
(penalty) functions
where pNames is list of restraint labels
returns pDerv with partial derivatives by variable# in varList and
restraint# in pNames (pDerv[variable#][restraint#])
'''
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
pDerv = np.zeros((len(varyList),len(pVal)))
for pName in pNames: # loop over restraints
if 'General' == pName.split(':')[1]:
# initialize for General restraint(s) here
GeneralInit = True
parmDict0 = parmDict.copy()
# setup steps for each parameter
stepDict = {}
for parm in varyList:
stepDict[parm] = G2obj.getVarStep(parm,parmDict)
break
for phase in Phases:
# if phase not in restraintDict:
# continue
pId = Phases[phase]['pId']
General = Phases[phase]['General']
cx,ct,cs,cia = General['AtomPtrs']
SGData = General['SGData']
Atoms = Phases[phase]['Atoms']
AtLookup = G2mth.FillAtomLookUp(Phases[phase]['Atoms'],cia+8)
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
textureData = General['SH Texture']
SHkeys = list(textureData['SH Coeff'][1].keys())
SHCoef = G2mth.GetSHCoeff(pId,parmDict,SHkeys)
shModels = ['cylindrical','none','shear - 2/m','rolling - mmm']
SamSym = dict(zip(shModels,['0','-1','2/m','mmm']))
sam = SamSym[textureData['Model']]
phaseRest = restraintDict.get(phase,{})
names = {'Bond':'Bonds','Angle':'Angles','Plane':'Planes',
'Chiral':'Volumes','Torsion':'Torsions','Rama':'Ramas',
'ChemComp':'Sites','Texture':'HKLs'}
lasthkl = np.array([0,0,0])
for ip,pName in enumerate(pNames): # loop over restraints
pnames = pName.split(':')
if pId == int(pnames[0]):
name = pnames[1]
if 'PWL' in pName:
pDerv[varyList.index(pName)][ip] += 1.
continue
elif 'SH-' in pName:
continue
Id = int(pnames[2])
itemRest = phaseRest[name]
if name in ['Bond','Angle','Plane','Chiral']:
indx,ops,obs,esd = itemRest[names[name]][Id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::dA'+Xname+':'+str(AtLookup[ind]) for Xname in ['x','y','z']]
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
if name == 'Bond':
deriv = G2mth.getRestDeriv(G2mth.getRestDist,XYZ,Amat,ops,SGData)
elif name == 'Angle':
deriv = G2mth.getRestDeriv(G2mth.getRestAngle,XYZ,Amat,ops,SGData)
elif name == 'Plane':
deriv = G2mth.getRestDeriv(G2mth.getRestPlane,XYZ,Amat,ops,SGData)
elif name == 'Chiral':
deriv = G2mth.getRestDeriv(G2mth.getRestChiral,XYZ,Amat,ops,SGData)
elif name in ['Torsion','Rama']:
coffDict = itemRest['Coeff']
indx,ops,cofName,esd = itemRest[names[name]][Id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::dA'+Xname+':'+str(AtLookup[ind]) for Xname in ['x','y','z']]
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
if name == 'Torsion':
deriv = G2mth.getTorsionDeriv(XYZ,Amat,coffDict[cofName])
else:
deriv = G2mth.getRamaDeriv(XYZ,Amat,coffDict[cofName])
elif name == 'ChemComp':
indx,factors,obs,esd = itemRest[names[name]][Id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::Afrac:'+str(AtLookup[ind])]
mul = np.array(G2mth.GetAtomItemsById(Atoms,AtLookup,indx,cs+1))
deriv = mul*factors
elif 'Texture' in name:
deriv = []
dNames = []
hkl,grid,esd1,ifesd2,esd2 = itemRest[names[name]][Id]
hkl = np.array(hkl)
if np.any(lasthkl-hkl):
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
ODFln = G2lat.Flnh(False,SHCoef,phi,beta,SGData)
lasthkl = copy.copy(hkl)
if 'unit' in name:
pass
else:
gam = float(pnames[3])
psi = float(pnames[4])
for SHname in ODFln:
l,m,n = eval(SHname[1:])
Ksl = G2lat.GetKsl(l,m,sam,psi,gam)[0]
dNames += [str(pId)+'::'+SHname]
deriv.append(-ODFln[SHname][0]*Ksl/SHCoef[SHname])
elif name == 'General':
deriv = []
dNames = []
eq,obs,esd = itemRest[name][Id]
calcobj = G2obj.ExpressionCalcObj(eq)
parmlist = list(eq.assgnVars.values()) # parameters used in this expression
for parm in parmlist: # expand list if any parms are determined by constraints
if parm in G2mv.dependentVars:
parmlist += G2mv.independentVars
break
for ind,var in enumerate(varyList):
drv = 0
if var in parmlist:
step = stepDict.get(var,1e-5)
calc = []
# apply step to parameter
oneparm = True
for s in -step,2*step:
parmDict[var] += s
# extend shift if needed to other parameters
if var in G2mv.independentVars:
G2mv.Dict2Map(parmDict,[])
oneparm = False
elif var in G2mv.dependentVars:
G2mv.Map2Dict(parmDict,[])
oneparm = False
if 'RB' in var:
ApplyRBModels(parmDict,Phases,rigidbodyDict)
# test
oneparm = False
calcobj.SetupCalc(parmDict)
calc.append(calcobj.EvalExpression())
drv = (calc[1]-calc[0])*.5/step
# restore the dict
if oneparm:
parmDict[var] = parmDict0[var]
else:
parmDict = parmDict0.copy()
else:
drv = 0
pDerv[ind][ip] = drv
# Add derivatives into matrix, if needed
for dName,drv in zip(dNames,deriv):
try:
ind = varyList.index(dName)
pDerv[ind][ip] += drv
except ValueError:
pass
lasthkl = np.array([0,0,0])
for ip,pName in enumerate(pNames):
deriv = []
dNames = []
pnames = pName.split(':')
if 'SH-' in pName and pId == int(pnames[0]):
hId = int(pnames[1])
phfx = '%d:%d:'%(pId,hId)
psi = float(pnames[4])
HKLs = calcControls[phfx+'SHhkl']
SHnames = calcControls[phfx+'SHnames']
SHcof = dict(zip(SHnames,[parmDict[phfx+cof] for cof in SHnames]))
hkl = np.array(HKLs[int(pnames[3])])
if np.any(lasthkl-hkl):
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
SH3Coef = {}
for item in SHcof:
L,N = eval(item.strip('C'))
SH3Coef['C%d,0,%d'%(L,N)] = SHcof[item]
ODFln = G2lat.Flnh(False,SH3Coef,phi,beta,SGData)
lasthkl = copy.copy(hkl)
for SHname in SHnames:
l,n = eval(SHname[1:])
SH3name = 'C%d,0,%d'%(l,n)
Ksl = G2lat.GetKsl(l,0,'0',psi,0.0)[0]
dNames += [phfx+SHname]
deriv.append(ODFln[SH3name][0]*Ksl/SHcof[SHname])
for dName,drv in zip(dNames,deriv):
try:
ind = varyList.index(dName)
pDerv[ind][ip] += drv
except ValueError:
pass
return pDerv
################################################################################
##### Function & derivative calculations
################################################################################
def GetAtomFXU(pfx,calcControls,parmDict):
'Needs a doc string'
Natoms = calcControls['Natoms'][pfx]
Tdata = Natoms*[' ',]
Mdata = np.zeros(Natoms)
IAdata = Natoms*[' ',]
Fdata = np.zeros(Natoms)
Xdata = np.zeros((3,Natoms))
dXdata = np.zeros((3,Natoms))
Uisodata = np.zeros(Natoms)
Uijdata = np.zeros((6,Natoms))
Gdata = np.zeros((3,Natoms))
keys = {'Atype:':Tdata,'Amul:':Mdata,'Afrac:':Fdata,'AI/A:':IAdata,
'dAx:':dXdata[0],'dAy:':dXdata[1],'dAz:':dXdata[2],
'Ax:':Xdata[0],'Ay:':Xdata[1],'Az:':Xdata[2],'AUiso:':Uisodata,
'AU11:':Uijdata[0],'AU22:':Uijdata[1],'AU33:':Uijdata[2],
'AU12:':Uijdata[3],'AU13:':Uijdata[4],'AU23:':Uijdata[5],
'AMx:':Gdata[0],'AMy:':Gdata[1],'AMz:':Gdata[2],}
for iatm in range(Natoms):
for key in keys:
parm = pfx+key+str(iatm)
if parm in parmDict:
keys[key][iatm] = parmDict[parm]
Fdata = np.where(Fdata,Fdata,1.e-8) #avoid divide by zero in derivative calc.
Gdata = np.where(Gdata,Gdata,1.e-8) #avoid divide by zero in derivative calc.
return Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata
def GetAtomSSFXU(pfx,calcControls,parmDict):
'Needs a doc string'
Natoms = calcControls['Natoms'][pfx]
maxSSwave = calcControls['maxSSwave'][pfx]
Nwave = {'F':maxSSwave['Sfrac'],'X':maxSSwave['Spos'],'Y':maxSSwave['Spos'],'Z':maxSSwave['Spos'],
'U':maxSSwave['Sadp'],'M':maxSSwave['Smag'],'T':maxSSwave['Spos']}
XSSdata = np.zeros((6,maxSSwave['Spos'],Natoms))
FSSdata = np.zeros((2,maxSSwave['Sfrac'],Natoms))
USSdata = np.zeros((12,maxSSwave['Sadp'],Natoms))
MSSdata = np.zeros((6,maxSSwave['Smag'],Natoms))
waveTypes = []
keys = {'Fsin:':FSSdata[0],'Fcos:':FSSdata[1],'Fzero:':FSSdata[0],'Fwid:':FSSdata[1],
'Tmin:':XSSdata[0],'Tmax:':XSSdata[1],'Xmax:':XSSdata[2],'Ymax:':XSSdata[3],'Zmax:':XSSdata[4],
'Xsin:':XSSdata[0],'Ysin:':XSSdata[1],'Zsin:':XSSdata[2],'Xcos:':XSSdata[3],'Ycos:':XSSdata[4],'Zcos:':XSSdata[5],
'U11sin:':USSdata[0],'U22sin:':USSdata[1],'U33sin:':USSdata[2],'U12sin:':USSdata[3],'U13sin:':USSdata[4],'U23sin:':USSdata[5],
'U11cos:':USSdata[6],'U22cos:':USSdata[7],'U33cos:':USSdata[8],'U12cos:':USSdata[9],'U13cos:':USSdata[10],'U23cos:':USSdata[11],
'MXsin:':MSSdata[0],'MYsin:':MSSdata[1],'MZsin:':MSSdata[2],'MXcos:':MSSdata[3],'MYcos:':MSSdata[4],'MZcos:':MSSdata[5]}
for iatm in range(Natoms):
wavetype = [parmDict.get(pfx+kind+'waveType:'+str(iatm),'') for kind in ['F','P','A','M']]
waveTypes.append(wavetype)
for key in keys:
for m in range(Nwave[key[0]]):
parm = pfx+key+str(iatm)+':%d'%(m)
if parm in parmDict:
keys[key][m][iatm] = parmDict[parm]
return waveTypes,FSSdata,XSSdata,USSdata,MSSdata
def StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
''' Compute structure factors for all h,k,l for phase
puts the result, F^2, in each ref[8] in refList
operates on blocks of 100 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
Amat,Bmat = G2lat.Gmat2AB(G)
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
TwinLaw = np.array([[[1,0,0],[0,1,0],[0,0,1]],])
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
blkSize = 100 #no. of reflections in a block - size seems optimal
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[4])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
else: #'X'
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3] #array(blkSize,3)
H = np.squeeze(np.inner(H.T,TwinLaw)) #maybe array(blkSize,nTwins,3) or (blkSize,3)
TwMask = np.any(H,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT)*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,len(SGT)*len(TwinLaw),axis=0)
Uniq = np.inner(H,SGMT)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*len(TwinLaw),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/len(SGMT)
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT)*len(TwinLaw),axis=0)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),len(SGT)*len(TwinLaw))
if 'T' in calcControls[hfx+'histType']: #fa,fb are 2 X blkSize X nTwin X nOps x nAtoms
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,Flack*FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real 2 x blkSize x nTwin; sum over atoms & uniq hkl
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag
if SGData['SGInv']: #centrosymmetric; B=0
fbs[0] *= 0.
fas[1] *= 0.
if 'P' in calcControls[hfx+'histType']: #PXC, PNC & PNT: F^2 = A[0]^2 + A[1]^2 + B[0]^2 + B[1]^2
refl.T[9] = np.sum(fas**2,axis=0)+np.sum(fbs**2,axis=0) #add fam**2 & fbm**2 here
refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
else: #HKLF: F^2 = (A[0]+A[1])^2 + (B[0]+B[1])^2
if len(TwinLaw) > 1:
refl.T[9] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
refl.T[7] = np.sum(TwinFr*TwMask*np.sum(fas,axis=0)**2,axis=-1)+ \
np.sum(TwinFr*TwMask*np.sum(fbs,axis=0)**2,axis=-1) #Fc sum over twins
refl.T[10] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f" & use primary twin
else: # checked correct!!
refl.T[9] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2
refl.T[7] = np.copy(refl.T[9])
refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
# refl.T[10] = atan2d(np.sum(fbs,axis=0),np.sum(fas,axis=0)) #include f' & f"
iBeg += blkSize
# print 'sf time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
def StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute structure factor derivatives on blocks of reflections - for powders/nontwins only
faster than StructureFactorDerv - correct for powders/nontwins!!
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0] for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
Amat,Bmat = G2lat.Gmat2AB(G)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
dFdbab = np.zeros((nRef,2))
dFdfl = np.zeros((nRef))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
# time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 32 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT),axis=0)
FPP = np.repeat(FPP.T,len(SGT),axis=0)
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),len(SGT))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT),axis=0)
Uniq = np.inner(H,SGMT) # array(nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase) #refBlk x nOps x nAtoms
cosp = np.cos(phase)
occ = Mdata*Fdata/len(SGT)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/len(SGMT)
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))]) #Nref*Nops,3,3
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,len(SGT),6)) #Nref,Nops,6
fot = np.reshape(((FF+FP).T-Bab).T,cosp.shape)*Tcorr
if len(FPP.shape) > 1:
fotp = np.reshape(FPP,cosp.shape)*Tcorr
else:
fotp = FPP*Tcorr
if 'T' in calcControls[hfx+'histType']:
fa = np.array([fot*cosp,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([fot*sinp,np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([fot*cosp,-Flack*FPP*sinp*Tcorr])
fb = np.array([fot*sinp,Flack*FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real sum over atoms & unique hkl array(2,refBlk,nTwins)
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag sum over atoms & uniq hkl
fax = np.array([-fot*sinp,-fotp*cosp]) #positions array(2,refBlk,nEqv,nAtoms)
fbx = np.array([fot*cosp,-fotp*sinp])
#sum below is over Uniq
dfadfr = np.sum(fa/occ,axis=-2) #array(2,refBlk,nAtom) Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr,axis=-2) #array(refBlk,nAtom)
dfadx = np.sum(twopi*Uniq[nxs,:,nxs,:,:]*np.swapaxes(fax,-2,-1)[:,:,:,:,nxs],axis=-2)
dfadui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fa,axis=-2) #array(Ops,refBlk,nAtoms)
dfadua = np.sum(-Hij[nxs,:,nxs,:,:]*np.swapaxes(fa,-2,-1)[:,:,:,:,nxs],axis=-2)
# array(2,refBlk,nAtom,3) & array(2,refBlk,nAtom,6)
if not SGData['SGInv']:
dfbdfr = np.sum(fb/occ,axis=-2) #Fdata != 0 avoids /0. problem
dfbdba = np.sum(-sinp*Tcorr,axis=-2)
dfadfl = np.sum(np.sum(-fotp*sinp,axis=-1),axis=-1)
dfbdfl = np.sum(np.sum(fotp*cosp,axis=-1),axis=-1)
dfbdx = np.sum(twopi*Uniq[nxs,:,nxs,:,:]*np.swapaxes(fbx,-2,-1)[:,:,:,:,nxs],axis=-2)
dfbdui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fb,axis=-2)
dfbdua = np.sum(-Hij[nxs,:,nxs,:,:]*np.swapaxes(fb,-2,-1)[:,:,:,:,nxs],axis=-2)
else:
dfbdfr = np.zeros_like(dfadfr)
dfbdx = np.zeros_like(dfadx)
dfbdui = np.zeros_like(dfadui)
dfbdua = np.zeros_like(dfadua)
dfbdba = np.zeros_like(dfadba)
dfadfl = 0.0
dfbdfl = 0.0
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1]
SB = fbs[0]+fbs[1]
if 'P' in calcControls[hfx+'histType']: #checked perfect for centro & noncentro
dFdfr[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs]*dfadfr+fbs[:,:,nxs]*dfbdfr,axis=0)*Mdata/len(SGMT)
dFdx[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs,nxs]*dfadx+fbs[:,:,nxs,nxs]*dfbdx,axis=0)
dFdui[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs]*dfadui+fbs[:,:,nxs]*dfbdui,axis=0)
dFdua[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs,nxs]*dfadua+fbs[:,:,nxs,nxs]*dfbdua,axis=0)
else:
dFdfr[iBeg:iFin] = (2.*SA[:,nxs]*(dfadfr[0]+dfadfr[1])+2.*SB[:,nxs]*(dfbdfr[0]+dfbdfr[1]))*Mdata/len(SGMT)
dFdx[iBeg:iFin] = 2.*SA[:,nxs,nxs]*(dfadx[0]+dfadx[1])+2.*SB[:,nxs,nxs]*(dfbdx[0]+dfbdx[1])
dFdui[iBeg:iFin] = 2.*SA[:,nxs]*(dfadui[0]+dfadui[1])+2.*SB[:,nxs]*(dfbdui[0]+dfbdui[1])
dFdua[iBeg:iFin] = 2.*SA[:,nxs,nxs]*(dfadua[0]+dfadua[1])+2.*SB[:,nxs,nxs]*(dfbdua[0]+dfbdua[1])
dFdfl[iBeg:iFin] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdbab[iBeg:iFin] = 2.*(fas[0,nxs]*np.array([np.sum(dfadba.T*dBabdA,axis=0),np.sum(-dfadba.T*parmDict[phfx+'BabA']*SQfactor*dBabdA,axis=0)])+ \
fbs[0,nxs]*np.array([np.sum(dfbdba.T*dBabdA,axis=0),np.sum(-dfbdba.T*parmDict[phfx+'BabA']*SQfactor*dBabdA,axis=0)])).T
iBeg += blkSize
# print 'derv time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)):
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
dFdvDict[phfx+'Flack'] = 4.*dFdfl.T
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def MagStructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
''' Compute neutron magnetic structure factors for all h,k,l for phase
puts the result, F^2, in each ref[8] in refList
operates on blocks of 100 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
:returns: copy of new refList - used in calculating numerical derivatives
'''
g = nl.inv(G)
ast = np.sqrt(np.diag(G))
ainv = np.sqrt(np.diag(g))
GS = G/np.outer(ast,ast)
Ginv = g/np.outer(ainv,ainv)
uAmat = G2lat.Gmat2AB(GS)[0]
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*Ncen
if not SGData['SGFixed']:
Nops *= (1+SGData['SGInv'])
MFtables = calcControls['MFtables']
Bmat = G2lat.Gmat2AB(G)[1]
TwinLaw = np.ones(1)
# TwinLaw = np.array([[[1,0,0],[0,1,0],[0,0,1]],])
# TwDict = refDict.get('TwDict',{})
# if 'S' in calcControls[hfx+'histType']:
# NTL = calcControls[phfx+'NTL']
# NM = calcControls[phfx+'TwinNMN']+1
# TwinLaw = calcControls[phfx+'TwinLaw']
# TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
# TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
Mag = np.array([np.sqrt(np.inner(mag,np.inner(mag,Ginv))) for mag in Gdata.T])
Gdata = np.inner(Gdata.T,np.swapaxes(SGMT,1,2)).T #apply sym. ops.
if SGData['SGInv'] and not SGData['SGFixed']:
Gdata = np.hstack((Gdata,-Gdata)) #inversion if any
Gdata = np.hstack([Gdata for icen in range(Ncen)]) #dup over cell centering--> [Mxyz,nops,natms]
Gdata = SGData['MagMom'][nxs,:,nxs]*Gdata #flip vectors according to spin flip * det(opM)
Mag = np.tile(Mag[:,nxs],Nops).T #make Mag same length as Gdata
Kdata = np.inner(Gdata.T,uAmat).T
Kmean = np.mean(np.sqrt(np.sum(Kdata**2,axis=0)),axis=0)
Kdata /= Kmean #Cartesian unit vectors
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
blkSize = 100 #no. of reflections in a block - size seems optimal
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[4])**2
refDict['FF']['El'] = list(MFtables.keys())
refDict['FF']['MF'] = np.zeros((nRef,len(MFtables)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T #array(blkSize,3)
# H = np.squeeze(np.inner(H.T,TwinLaw)) #maybe array(blkSize,nTwins,3) or (blkSize,3)
# TwMask = np.any(H,axis=-1)
# if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
# for ir in range(blkSize):
# iref = ir+iBeg
# if iref in TwDict:
# for i in TwDict[iref]:
# for n in range(NTL):
# H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
# TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H,SGMT)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*len(TwinLaw),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Fdata*Mdata*MF/(2*Nops) #Nref,Natm
if SGData['SGInv']:
if not SGData['SGFixed']:
mphase = np.hstack((phase,-phase)) #OK
else:
mphase = phase
else:
mphase = phase #
mphase = np.array([mphase+twopi*np.inner(cen,H)[:,nxs,nxs] for cen in SGData['SGCen']])
mphase = np.concatenate(mphase,axis=1) #Nref,full Nop,Natm
sinm = np.sin(mphase) #ditto - match magstrfc.for
cosm = np.cos(mphase) #ditto
HM = np.inner(Bmat,H) #put into cartesian space
HM = HM/np.sqrt(np.sum(HM**2,axis=0)) #Kdata = MAGS & HM = UVEC in magstrfc.for both OK
eDotK = np.sum(HM[:,:,nxs,nxs]*Kdata[:,nxs,:,:],axis=0)
Q = HM[:,:,nxs,nxs]*eDotK[nxs,:,:,:]-Kdata[:,nxs,:,:] #xyz,Nref,Nop,Natm = BPM in magstrfc.for OK
fam = Q*TMcorr[nxs,:,nxs,:]*cosm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fbm = Q*TMcorr[nxs,:,nxs,:]*sinm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fams = np.sum(np.sum(fam,axis=-1),axis=-1) #Mxyz,Nref Sum(sum(fam,atoms),ops)
fbms = np.sum(np.sum(fbm,axis=-1),axis=-1) #ditto
refl.T[9] = np.sum(fams**2,axis=0)+np.sum(fbms**2,axis=0) #Sum(fams**2,Mxyz) Re + Im
refl.T[7] = np.copy(refl.T[9])
refl.T[10] = atan2d(fbms[0],fams[0]) #- what is phase for mag refl?
# if 'P' in calcControls[hfx+'histType']: #PXC, PNC & PNT: F^2 = A[0]^2 + A[1]^2 + B[0]^2 + B[1]^2
# refl.T[9] = np.sum(fas**2,axis=0)+np.sum(fbs**2,axis=0) #add fam**2 & fbm**2 here
# refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
# else: #HKLF: F^2 = (A[0]+A[1])^2 + (B[0]+B[1])^2
# if len(TwinLaw) > 1:
# refl.T[9] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
# refl.T[7] = np.sum(TwinFr*TwMask*np.sum(fas,axis=0)**2,axis=-1)+ \
# np.sum(TwinFr*TwMask*np.sum(fbs,axis=0)**2,axis=-1) #Fc sum over twins
# refl.T[10] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f" & use primary twin
# else: # checked correct!!
# refl.T[9] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2
# refl.T[7] = np.copy(refl.T[9])
# refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
## refl.T[10] = atan2d(np.sum(fbs,axis=0),np.sum(fas,axis=0)) #include f' & f"
iBeg += blkSize
# print 'sf time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
return copy.deepcopy(refDict['RefList'])
def MagStructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute magnetic structure factor derivatives numerically - for powders/nontwins only
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of magnetic derivatives
'''
trefDict = copy.deepcopy(refDict)
dM = 1.e-6
dFdvDict = {}
for parm in parmDict:
if 'AM' in parm:
parmDict[parm] += dM
prefList = MagStructureFactor2(trefDict,G,hfx,pfx,SGData,calcControls,parmDict)
parmDict[parm] -= 2*dM
mrefList = MagStructureFactor2(trefDict,G,hfx,pfx,SGData,calcControls,parmDict)
parmDict[parm] += dM
dFdvDict[parm] = (prefList[:,9]-mrefList[:,9])/(2.*dM)
return dFdvDict
def MagStructureFactorDerv(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute nonmagnetic structure factor derivatives on blocks of reflections in magnetic structures - for powders/nontwins only
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
g = nl.inv(G)
ast = np.sqrt(np.diag(G))
ainv = np.sqrt(np.diag(g))
GS = G/np.outer(ast,ast)
Ginv = g/np.outer(ainv,ainv)
uAmat = G2lat.Gmat2AB(GS)[0]
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*Ncen
if not SGData['SGFixed']:
Nops *= (1+SGData['SGInv'])
Bmat = G2lat.Gmat2AB(G)[1]
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
Mag = np.array([np.sqrt(np.inner(mag,np.inner(mag,Ginv))) for mag in Gdata.T])
Gones = np.ones_like(Gdata)
Gdata = np.inner(Gdata.T,np.swapaxes(SGMT,1,2)).T #apply sym. ops.
Gones = np.inner(Gones.T,SGMT).T
if SGData['SGInv'] and not SGData['SGFixed']:
Gdata = np.hstack((Gdata,-Gdata)) #inversion if any
Gones = np.hstack((Gones,-Gones)) #inversion if any
Gdata = np.hstack([Gdata for icen in range(Ncen)]) #dup over cell centering
Gones = np.hstack([Gones for icen in range(Ncen)]) #dup over cell centering
Gdata = SGData['MagMom'][nxs,:,nxs]*Gdata #flip vectors according to spin flip
Gones = SGData['MagMom'][nxs,:,nxs]*Gones #flip vectors according to spin flip
Mag = np.tile(Mag[:,nxs],Nops).T #make Mag same length as Gdata
Kdata = np.inner(Gdata.T,uAmat).T #Cartesian unit vectors
Kmean = np.mean(np.sqrt(np.sum(Kdata**2,axis=0)),axis=0)
Kdata /= Kmean
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 5 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
Uniq = np.inner(H,SGMT) # array(nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
occ = Mdata*Fdata/Nops
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))])
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,len(SGT),6))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Fdata*Mdata*MF/(2*Nops) #Nref,Natm
if SGData['SGInv']:
if not SGData['SGFixed']:
mphase = np.hstack((phase,-phase)) #OK
Uniq = np.hstack((Uniq,-Uniq)) #Nref,Nops,hkl
Hij = np.hstack((Hij,Hij))
else:
mphase = phase
else:
mphase = phase #
Hij = np.concatenate(np.array([Hij for cen in SGData['SGCen']]),axis=1)
Uniq = np.hstack([Uniq for cen in SGData['SGCen']])
mphase = np.array([mphase+twopi*np.inner(cen,H)[:,nxs,nxs] for cen in SGData['SGCen']])
mphase = np.concatenate(mphase,axis=1) #Nref,Nop,Natm
sinm = np.sin(mphase) #ditto - match magstrfc.for
cosm = np.cos(mphase) #ditto
HM = np.inner(Bmat.T,H) #put into cartesian space
HM = HM/np.sqrt(np.sum(HM**2,axis=0)) #unit cartesian vector for H
eDotK = np.sum(HM[:,:,nxs,nxs]*Kdata[:,nxs,:,:],axis=0)
Q = HM[:,:,nxs,nxs]*eDotK[nxs,:,:,:]-Kdata[:,nxs,:,:] #Mxyz,Nref,Nop,Natm = BPM in magstrfc.for OK
fam = Q*TMcorr[nxs,:,nxs,:]*cosm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #Mxyz,Nref,Nop,Natm
fbm = Q*TMcorr[nxs,:,nxs,:]*sinm[nxs,:,:,:]*Mag[nxs,nxs,:,:]
fams = np.sum(np.sum(fam,axis=-1),axis=-1) #Mxyz,Nref
fbms = np.sum(np.sum(fbm,axis=-1),axis=-1)
famx = -Q*TMcorr[nxs,:,nxs,:]*Mag[nxs,nxs,:,:]*sinm[nxs,:,:,:] #Mxyz,Nref,Nops,Natom
fbmx = Q*TMcorr[nxs,:,nxs,:]*Mag[nxs,nxs,:,:]*cosm[nxs,:,:,:]
#sums below are over Nops - real part
dfadfr = np.sum(fam/occ,axis=2) #array(Mxyz,refBlk,nAtom) Fdata != 0 avoids /0. problem deriv OK
dfadx = np.sum(twopi*Uniq[nxs,:,:,nxs,:]*famx[:,:,:,:,nxs],axis=2) #deriv OK
dfadui = np.sum(-SQfactor[:,nxs,nxs]*fam,axis=2) #array(Ops,refBlk,nAtoms) deriv OK
dfadua = np.sum(-Hij[nxs,:,:,nxs,:]*fam[:,:,:,:,nxs],axis=2) #deriv OK
# imaginary part; array(3,refBlk,nAtom,3) & array(3,refBlk,nAtom,6)
dfbdfr = np.sum(fbm/occ,axis=2) #array(mxyz,refBlk,nAtom) Fdata != 0 avoids /0. problem
dfbdx = np.sum(twopi*Uniq[nxs,:,:,nxs,:]*fbmx[:,:,:,:,nxs],axis=2)
dfbdui = np.sum(-SQfactor[:,nxs,nxs]*fbm,axis=2) #array(Ops,refBlk,nAtoms)
dfbdua = np.sum(-Hij[nxs,:,:,nxs,:]*fbm[:,:,:,:,nxs],axis=2)
#accumulate derivatives
dFdfr[iBeg:iFin] = 2.*np.sum((fams[:,:,nxs]*dfadfr+fbms[:,:,nxs]*dfbdfr)*Mdata/Nops,axis=0) #ok
dFdx[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs,nxs]*dfadx+fbms[:,:,nxs,nxs]*dfbdx,axis=0) #ok
dFdui[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs]*dfadui+fbms[:,:,nxs]*dfbdui,axis=0) #ok
dFdua[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs,nxs]*dfadua+fbms[:,:,nxs,nxs]*dfbdua,axis=0) #ok
iBeg += blkSize
print (' %d derivative time %.4f\r'%(nRef,time.time()-time0))
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)):
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
return dFdvDict
def StructureFactorDervTw2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute structure factor derivatives on blocks of reflections - for twins only
faster than StructureFactorDervTw
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
TwDict = refDict.get('TwDict',{})
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
nTwin = len(TwinLaw)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,nTwin,mSize))
dFdx = np.zeros((nRef,nTwin,mSize,3))
dFdui = np.zeros((nRef,nTwin,mSize))
dFdua = np.zeros((nRef,nTwin,mSize,6))
dFdbab = np.zeros((nRef,nTwin,2))
dFdtw = np.zeros((nRef,nTwin))
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 16 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3]
H = np.inner(H.T,TwinLaw) #array(3,nTwins)
TwMask = np.any(H,axis=-1)
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT)*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,len(SGT)*len(TwinLaw),axis=0)
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = np.repeat(parmDict[phfx+'BabA']*dBabdA,len(SGT)*nTwin)
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT)*len(TwinLaw),axis=0)
Uniq = np.inner(H,SGMT) # (nTwin,nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/len(SGT)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*nTwin,axis=1)
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))])
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,nTwin,len(SGT),6))
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = (np.reshape(Tiso,Tuij.shape)*Tuij).T*Mdata*Fdata/len(SGMT)
fot = np.reshape(((FF+FP).T-Bab).T,cosp.shape)*Tcorr
fotp = FPP*Tcorr
if 'T' in calcControls[hfx+'histType']: #fa,fb are 2 X blkSize X nTwin X nOps x nAtoms
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,np.reshape(FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-FPP*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real sum over atoms & unique hkl array(2,nTwins)
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag sum over atoms & uniq hkl
if SGData['SGInv']: #centrosymmetric; B=0
fbs[0] *= 0.
fas[1] *= 0.
fax = np.array([-fot*sinp,-fotp*cosp]) #positions array(2,nRef,ntwi,nEqv,nAtoms)
fbx = np.array([fot*cosp,-fotp*sinp])
#sum below is over Uniq
dfadfr = np.sum(np.sum(fa/occ,axis=-2),axis=0) #array(2,nRef,ntwin,nAtom) Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(np.sum(-SQfactor[nxs,:,nxs,nxs,nxs]*fa,axis=-2),axis=0)
dfadx = np.sum(np.sum(twopi*Uniq[nxs,:,:,:,nxs,:]*fax[:,:,:,:,:,nxs],axis=-3),axis=0) # nRef x nTwin x nAtoms x xyz; sum on ops & A,A'
dfadua = np.sum(np.sum(-Hij[nxs,:,:,:,nxs,:]*fa[:,:,:,:,:,nxs],axis=-3),axis=0)
if not SGData['SGInv']:
dfbdfr = np.sum(np.sum(fb/occ,axis=-2),axis=0) #Fdata != 0 avoids /0. problem
dfadba /= 2.
# dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)/2.
dfbdui = np.sum(np.sum(-SQfactor[nxs,:,nxs,nxs,nxs]*fb,axis=-2),axis=0)
dfbdx = np.sum(np.sum(twopi*Uniq[nxs,:,:,:,nxs,:]*fbx[:,:,:,:,:,nxs],axis=-3),axis=0)
dfbdua = np.sum(np.sum(-Hij[nxs,:,:,:,nxs,:]*fb[:,:,:,:,:,nxs],axis=-3),axis=0)
else:
dfbdfr = np.zeros_like(dfadfr)
dfbdx = np.zeros_like(dfadx)
dfbdui = np.zeros_like(dfadui)
dfbdua = np.zeros_like(dfadua)
# dfbdba = np.zeros_like(dfadba)
SA = fas[0]+fas[1]
SB = fbs[0]+fbs[1]
dFdfr[iBeg:iFin] = ((2.*TwMask*SA)[:,:,nxs]*dfadfr+(2.*TwMask*SB)[:,:,nxs]*dfbdfr)*Mdata[nxs,nxs,:]/len(SGMT)
dFdx[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs,nxs]*dfadx+(2.*TwMask*SB)[:,:,nxs,nxs]*dfbdx
dFdui[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs]*dfadui+(2.*TwMask*SB)[:,:,nxs]*dfbdui
dFdua[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs,nxs]*dfadua+(2.*TwMask*SB)[:,:,nxs,nxs]*dfbdua
if SGData['SGInv']: #centrosymmetric; B=0
dFdtw[iBeg:iFin] = np.sum(TwMask[nxs,:]*fas,axis=0)**2
else:
dFdtw[iBeg:iFin] = np.sum(TwMask[nxs,:]*fas,axis=0)**2+np.sum(TwMask[nxs,:]*fbs,axis=0)**2
# dFdbab[iBeg:iFin] = fas[0,:,nxs]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
# fbs[0,:,nxs]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
iBeg += blkSize
print (' %d derivative time %.4f\r'%(len(refDict['RefList']),time.time()-time0))
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)): #these all OK
dFdvDict[pfx+'Afrac:'+str(i)] = np.sum(dFdfr.T[i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAx:'+str(i)] = np.sum(dFdx.T[0][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAy:'+str(i)] = np.sum(dFdx.T[1][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAz:'+str(i)] = np.sum(dFdx.T[2][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AUiso:'+str(i)] = np.sum(dFdui.T[i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU11:'+str(i)] = np.sum(dFdua.T[0][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU22:'+str(i)] = np.sum(dFdua.T[1][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU33:'+str(i)] = np.sum(dFdua.T[2][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU12:'+str(i)] = 2.*np.sum(dFdua.T[3][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU13:'+str(i)] = 2.*np.sum(dFdua.T[4][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU23:'+str(i)] = 2.*np.sum(dFdua.T[5][i]*TwinFr[:,nxs],axis=0)
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
for i in range(nTwin):
dFdvDict[phfx+'TwinFr:'+str(i)] = dFdtw.T[i]
return dFdvDict
def SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factors for all h,k,l,m for phase - no twins
puts the result, F^2, in each ref[9] in refList
works on blocks of 32 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
GS = G/np.outer(ast,ast)
uAmat,uBmat = G2lat.Gmat2AB(GS)
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*(1+SGData['SGInv'])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
SSCen = SSGData['SSGCen']
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
MFtables = calcControls['MFtables']
Amat,Bmat = G2lat.Gmat2AB(G)
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast) #NB: Mmod is ReIm,Mxyz,Ntau,Natm
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
if parmDict[pfx+'isMag']: #This part correct for making modulated mag moments on equiv atoms
mXYZ = np.array([[xyz[0] for xyz in list(G2spc.GenAtom(xyz,SGData,All=True,Move=True))] for xyz in (Xdata+dXdata).T])%1. #Natn,Nop,xyz
MmodA,MmodB = G2mth.MagMod(glTau,mXYZ,modQ,MSSdata,SGData,SSGData) #Ntau,Nops,Natm,Mxyz cos,sim parts sum matches drawing
Mmod = MmodA+MmodB
if not SGData['SGGray']: #for fixed Mx,My,Mz
GSdata = np.inner(Gdata.T,np.swapaxes(SGMT,1,2)) #apply sym. ops.--> Natm,Nops,Nxyz
if SGData['SGInv'] and not SGData['SGFixed']: #inversion if any
GSdata = np.hstack((GSdata,-GSdata))
GSdata = np.hstack([GSdata for cen in SSCen]) #dup over cell centering - Natm,Nops,Mxyz
GSdata = SGData['MagMom'][nxs,:,nxs]*GSdata #flip vectors according to spin flip * det(opM)
GSdata = np.swapaxes(GSdata,0,1) #Nop,Natm,Mxyz
Mmod += GSdata[nxs,:,:,:]
Kdata = np.inner(Mmod,uAmat) #Ntau,Nop,Natm,Mxyz
Mag = np.sqrt(np.sum(Kdata**2,axis=-1))
Kdata /= Mag[:,:,:,nxs] #Cartesian unit vectors
Kdata = np.nan_to_num(Kdata) #Ntau,Nops,Natm,Mxyz
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
blkSize = 48 #no. of reflections in a block
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[5])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
refDict['FF']['MF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
if El in MFtables:
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
# time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
mRef= iFin-iBeg
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:4] #array(blkSize,4)
HP = H[:3]+modQ[:,nxs]*H[3:] #projected hklm to hkl
SQ = 1./(2.*refl.T[5])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H.T,SSGMT)
UniqP = np.inner(HP.T,SGMT)
Phi = np.inner(H.T,SSGT)
if SGInv and not SGData['SGFixed']: #if centro - expand HKL sets
Uniq = np.hstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.hstack((UniqP,-UniqP))
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,Uniq.shape[1],axis=0)
FPP = np.repeat(FPP.T,Uniq.shape[1],axis=0)
Bab = 0.
if phfx+'BabA' in parmDict:
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),Uniq.shape[1])
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,Uniq.shape[1],axis=0)
phase = twopi*(np.inner(Uniq[:,:,:3],(dXdata.T+Xdata.T))-Phi[:,:,nxs])
phase = np.hstack([phase for cen in SSCen])
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[1],axis=1).T
HbH = -np.sum(UniqP[:,:,nxs,:]*np.inner(UniqP[:,:,:],bij),axis=-1) #use hklt proj to hkl
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[1] #refBlk x ops x atoms
if 'N' in calcControls[hfx+'histType'] and parmDict[pfx+'isMag']: #TODO: mag math here??
phasem = twopi*np.inner(HP.T[:,:3],mXYZ) #2pi(Q.r)
phasem = np.swapaxes(phasem,1,2) #Nref,Nops,Natm
cosm = np.cos(phasem)
sinm = np.sin(phasem)
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Mdata*Fdata*MF/(2*Nops) #Nref,Natm
HM = np.inner(uBmat,HP.T) #put into cartesian space X||H,Z||H*L
eM = (HM/np.sqrt(np.sum(HM**2,axis=0))).T # normalize HP Nref,hkl=Unit vectors || Q
#for fixed moments --> m=0 reflections
fam0 = 0.
fbm0 = 0.
if not SGData['SGGray']: #correct -fixed Mx,My,Mz contribution
fam0 = TMcorr[:,nxs,:,nxs]*GSdata[nxs,:,:,:]*cosm[:,:,:,nxs] #Nref,Nops,Natm,Mxyz
fbm0 = TMcorr[:,nxs,:,nxs]*GSdata[nxs,:,:,:]*sinm[:,:,:,nxs]
#for modulated moments --> m != 0 reflections
fams = TMcorr[:,nxs,nxs,:,nxs]*np.array([np.where(H[3,i]!=0,(MmodA*cosm[i,nxs,:,:,nxs]- \
np.sign(H[3,i])*MmodB*sinm[i,nxs,:,:,nxs]),0.) for i in range(mRef)]) #Nref,Ntau,Nops,Natm,Mxyz
fbms = TMcorr[:,nxs,nxs,:,nxs]*np.array([np.where(H[3,i]!=0,(MmodA*sinm[i,nxs,:,:,nxs]+ \
np.sign(H[3,i])*MmodB*cosm[i,nxs,:,:,nxs]),0.) for i in range(mRef)]) #Nref,Ntau,Nops,Natm,Mxyz
if not SGData['SGGray']:
fams += fam0[:,nxs,:,:,:]
fbms += fbm0[:,nxs,:,:,:]
# do sum on ops, atms 1st
fasm = np.sum(np.sum(fams,axis=-2),axis=-2) #Nref,Ntau,Mxyz; sum ops & atoms
fbsm = np.sum(np.sum(fbms,axis=-2),axis=-2)
#put into cartesian space
facm = np.inner(fasm,uBmat.T)
fbcm = np.inner(fbsm,uBmat.T)
#form e.F dot product
eDotFa = np.sum(eM[:,nxs,:]*facm,axis=-1) #Nref,Ntau
eDotFb = np.sum(eM[:,nxs,:]*fbcm,axis=-1)
#intensity
fass = np.sum(fasm**2,axis=-1)-eDotFa**2
fbss = np.sum(fbsm**2,axis=-1)-eDotFb**2
#do integration
fas = np.sum(glWt*fass,axis=1)/2.
fbs = np.sum(glWt*fbss,axis=1)/2.
refl.T[10] = fas+fbs
refl.T[11] = atan2d(fbs,fas)
else:
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x refBlk x sym X atoms
if 'T' in calcControls[hfx+'histType']:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([Flack*FPP*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
fag = fa*GfpuA[0]-fb*GfpuA[1] #real; 2 x refBlk x sym x atoms
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=-1),axis=-1) #2 x refBlk; sum sym & atoms
fbs = np.sum(np.sum(fbg,axis=-1),axis=-1)
refl.T[10] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2 #square of sums
refl.T[11] = atan2d(fbs[0],fas[0]) #use only tau=0; ignore f' & f"
if 'P' not in calcControls[hfx+'histType']:
refl.T[8] = np.copy(refl.T[10])
iBeg += blkSize
# print ('nRef %d time %.4f\r'%(nRef,time.time()-time0))
return copy.deepcopy(refDict['RefList'])
def SStructureFactorTw(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factors for all h,k,l,m for phase - twins only
puts the result, F^2, in each ref[8+im] in refList
works on blocks of 32 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
MFtables = calcControls['MFtables']
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
TwinLaw = np.array([[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],]) #4D?
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw'] #this'll have to be 4D also...
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
blkSize = 32 #no. of reflections in a block
nRef = refDict['RefList'].shape[0]
if not len(refDict['FF']): #no form factors - 1st time thru StructureFactor
SQ = 1./(2.*refDict['RefList'].T[5])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
refDict['FF']['MF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
if El in MFtables:
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
# time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl[:,:4] #array(blkSize,4)
H3 = refl[:,:3]
HP = H[:,:3]+modQ[nxs,:]*H[:,3:] #projected hklm to hkl
HP = np.inner(HP,TwinLaw) #array(blkSize,nTwins,4)
H3 = np.inner(H3,TwinLaw)
TwMask = np.any(HP,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
HP[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
H3[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(HP,axis=-1)
SQ = 1./(2.*refl.T[5])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H,SSGMT)
Uniq3 = np.inner(H3,SGMT)
UniqP = np.inner(HP,SGMT)
Phi = np.inner(H,SSGT)
if SGInv: #if centro - expand HKL sets
Uniq = np.hstack((Uniq,-Uniq))
Uniq3 = np.hstack((Uniq3,-Uniq3))
Phi = np.hstack((Phi,-Phi))
UniqP = np.hstack((UniqP,-UniqP))
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,Uniq.shape[1]*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,Uniq.shape[1]*len(TwinLaw),axis=0)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),Uniq.shape[1]*len(TwinLaw))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,Uniq.shape[1]*len(TwinLaw),axis=0)
phase = twopi*(np.inner(Uniq3,(dXdata.T+Xdata.T))-Phi[:,nxs,:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[1]*len(TwinLaw),axis=1).T
HbH = -np.sum(UniqP[:,:,:,nxs]*np.inner(UniqP[:,:,:],bij),axis=-1) #use hklt proj to hkl
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[1] #refBlk x ops x atoms
if 'T' in calcControls[hfx+'histType']:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([Flack*FPP*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
GfpuA = G2mth.ModulationTw(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x refBlk x sym X atoms
fag = fa*GfpuA[0]-fb*GfpuA[1] #real; 2 x refBlk x sym x atoms
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=-1),axis=-1) #2 x refBlk; sum sym & atoms
fbs = np.sum(np.sum(fbg,axis=-1),axis=-1)
refl.T[10] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
refl.T[8] = np.sum(TwinFr*np.sum(TwMask[nxs,:,:]*fas,axis=0)**2,axis=-1)+ \
np.sum(TwinFr*np.sum(TwMask[nxs,:,:]*fbs,axis=0)**2,axis=-1) #Fc sum over twins
refl.T[11] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f"
iBeg += blkSize
# print ('nRef %d time %.4f\r'%(nRef,time.time()-time0))
def SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factor derivatives for all h,k,l,m for phase - no twins
Only Fourier component are done analytically here
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filled in below
:param int im: = 1 (could be eliminated)
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict SSGData: super space group info.
:param dict calcControls:
:param dict ParmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata) #no. atoms
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
waveShapes,SCtauF,SCtauX,SCtauU,UmodAB = G2mth.makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
if not len(refDict['FF']):
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables) #will need wave here for anom. neutron b's
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((len(refDict['RefList']),len(dat)))
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
dFdbab = np.zeros((nRef,2))
dFdfl = np.zeros((nRef))
dFdGf = np.zeros((nRef,mSize,FSSdata.shape[1],2))
dFdGx = np.zeros((nRef,mSize,XSSdata.shape[1],6))
dFdGu = np.zeros((nRef,mSize,USSdata.shape[1],12))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
nRef = len(refDict['RefList'])/100
for iref,refl in enumerate(refDict['RefList']):
if 'T' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,refl.T[12+im])
H = np.array(refl[:4])
HP = H[:3]+modQ*H[3:] #projected hklm to hkl
SQ = 1./(2.*refl[4+im])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
Bab = 0.0
if phfx+'BabA' in parmDict:
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = parmDict[phfx+'BabA']*dBabdA
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = refDict['FF']['FF'][iref].T[Tindx]
Uniq = np.inner(H,SSGMT)
Phi = np.inner(H,SSGT)
UniqP = np.inner(HP,SGMT)
if SGInv: #if centro - expand HKL sets
Uniq = np.vstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.vstack((UniqP,-UniqP))
phase = twopi*(np.inner(Uniq[:,:3],(dXdata+Xdata).T)+Phi[:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/Uniq.shape[0]
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[0],axis=1).T #ops x atoms
HbH = -np.sum(UniqP[:,nxs,:3]*np.inner(UniqP[:,:3],bij),axis=-1) #ops x atoms
Hij = np.array([Mast*np.multiply.outer(U[:3],U[:3]) for U in UniqP]) #atoms x 3x3
Hij = np.array([G2lat.UijtoU6(uij) for uij in Hij]) #atoms x 6
Tuij = np.where(HbH<1.,np.exp(HbH),1.0) #ops x atoms
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[0] #ops x atoms
fot = (FF+FP-Bab)*Tcorr #ops x atoms
fotp = FPP*Tcorr #ops x atoms
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x sym X atoms
dGdf,dGdx,dGdu = G2mth.ModulationDerv(Uniq,UniqP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt)
# GfpuA is 2 x ops x atoms
# derivs are: ops x atoms x waves x 2,6,12, or 5 parms as [real,imag] parts
fa = np.array([((FF+FP).T-Bab).T*cosp*Tcorr,-Flack*FPP*sinp*Tcorr]) # array(2,nEqv,nAtoms)
fb = np.array([((FF+FP).T-Bab).T*sinp*Tcorr,Flack*FPP*cosp*Tcorr]) #or array(2,nEqv,nAtoms)
fag = fa*GfpuA[0]-fb*GfpuA[1]
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=1),axis=1) # 2 x twin
fbs = np.sum(np.sum(fbg,axis=1),axis=1)
fax = np.array([-fot*sinp,-fotp*cosp]) #positions; 2 x ops x atoms
fbx = np.array([fot*cosp,-fotp*sinp])
fax = fax*GfpuA[0]-fbx*GfpuA[1]
fbx = fbx*GfpuA[0]+fax*GfpuA[1]
#sum below is over Uniq
dfadfr = np.sum(fag/occ,axis=1) #Fdata != 0 ever avoids /0. problem
dfbdfr = np.sum(fbg/occ,axis=1) #Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(-SQfactor*fag,axis=1)
dfbdui = np.sum(-SQfactor*fbg,axis=1)
dfadx = np.sum(twopi*Uniq[:,:3]*np.swapaxes(fax,-2,-1)[:,:,:,nxs],axis=-2) #2 x nAtom x 3xyz; sum nOps
dfbdx = np.sum(twopi*Uniq[:,:3]*np.swapaxes(fbx,-2,-1)[:,:,:,nxs],axis=-2)
dfadua = np.sum(-Hij*np.swapaxes(fag,-2,-1)[:,:,:,nxs],axis=-2) #2 x nAtom x 6Uij; sum nOps
dfbdua = np.sum(-Hij*np.swapaxes(fbg,-2,-1)[:,:,:,nxs],axis=-2) #these are correct also for twins above
# array(2,nAtom,nWave,2) & array(2,nAtom,nWave,6) & array(2,nAtom,nWave,12); sum on nOps
dfadGf = np.sum(fa[:,:,:,nxs,nxs]*dGdf[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdf[1][nxs,:,:,:,:],axis=1)
dfbdGf = np.sum(fb[:,:,:,nxs,nxs]*dGdf[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdf[1][nxs,:,:,:,:],axis=1)
dfadGx = np.sum(fa[:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:],axis=1)
dfbdGx = np.sum(fb[:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:],axis=1)
dfadGu = np.sum(fa[:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=1)
dfbdGu = np.sum(fb[:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=1)
if not SGData['SGInv']: #Flack derivative
dfadfl = np.sum(-FPP*Tcorr*sinp)
dfbdfl = np.sum(FPP*Tcorr*cosp)
else:
dfadfl = 1.0
dfbdfl = 1.0
SA = fas[0]+fas[1] #float = A+A'
SB = fbs[0]+fbs[1] #float = B+B'
if 'P' in calcControls[hfx+'histType']: #checked perfect for centro & noncentro?
dFdfl[iref] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdfr[iref] = 2.*(fas[0]*dfadfr[0]+fas[1]*dfadfr[1])*Mdata/len(Uniq)+ \
2.*(fbs[0]*dfbdfr[0]-fbs[1]*dfbdfr[1])*Mdata/len(Uniq)
dFdx[iref] = 2.*(fas[0]*dfadx[0]+fas[1]*dfadx[1])+ \
2.*(fbs[0]*dfbdx[0]+fbs[1]*dfbdx[1])
dFdui[iref] = 2.*(fas[0]*dfadui[0]+fas[1]*dfadui[1])+ \
2.*(fbs[0]*dfbdui[0]-fbs[1]*dfbdui[1])
dFdua[iref] = 2.*(fas[0]*dfadua[0]+fas[1]*dfadua[1])+ \
2.*(fbs[0]*dfbdua[0]+fbs[1]*dfbdua[1])
dFdGf[iref] = 2.*(fas[0]*dfadGf[0]+fas[1]*dfadGf[1])+ \
2.*(fbs[0]*dfbdGf[0]+fbs[1]*dfbdGf[1])
dFdGx[iref] = 2.*(fas[0]*dfadGx[0]+fas[1]*dfadGx[1])+ \
2.*(fbs[0]*dfbdGx[0]-fbs[1]*dfbdGx[1])
dFdGu[iref] = 2.*(fas[0]*dfadGu[0]+fas[1]*dfadGu[1])+ \
2.*(fbs[0]*dfbdGu[0]+fbs[1]*dfbdGu[1])
else: #OK, I think
dFdfr[iref] = 2.*(SA*dfadfr[0]+SA*dfadfr[1]+SB*dfbdfr[0]+SB*dfbdfr[1])*Mdata/len(Uniq) #array(nRef,nAtom)
dFdx[iref] = 2.*(SA*dfadx[0]+SA*dfadx[1]+SB*dfbdx[0]+SB*dfbdx[1]) #array(nRef,nAtom,3)
dFdui[iref] = 2.*(SA*dfadui[0]+SA*dfadui[1]+SB*dfbdui[0]+SB*dfbdui[1]) #array(nRef,nAtom)
dFdua[iref] = 2.*(SA*dfadua[0]+SA*dfadua[1]+SB*dfbdua[0]+SB*dfbdua[1]) #array(nRef,nAtom,6)
dFdfl[iref] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdGf[iref] = 2.*(SA*dfadGf[0]+SB*dfbdGf[1]) #array(nRef,natom,nwave,2)
dFdGx[iref] = 2.*(SA*dfadGx[0]+SB*dfbdGx[1]) #array(nRef,natom,nwave,6)
dFdGu[iref] = 2.*(SA*dfadGu[0]+SB*dfbdGu[1]) #array(nRef,natom,nwave,12)
if phfx+'BabA' in parmDict:
dFdbab[iref] = 2.*fas[0]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
2.*fbs[0]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
#loop over atoms - each dict entry is list of derivatives for all the reflections
if not iref%100 :
print (' %d derivative time %.4f\r'%(iref,time.time()-time0),end='')
for i in range(len(Mdata)): #loop over atoms
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
for j in range(FSSdata.shape[1]): #loop over waves Fzero & Fwid?
dFdvDict[pfx+'Fsin:'+str(i)+':'+str(j)] = dFdGf.T[0][j][i]
dFdvDict[pfx+'Fcos:'+str(i)+':'+str(j)] = dFdGf.T[1][j][i]
nx = 0
if waveTypes[i] in ['Block','ZigZag']:
nx = 1
for j in range(XSSdata.shape[1]-nx): #loop over waves
dFdvDict[pfx+'Xsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[0][j][i]
dFdvDict[pfx+'Ysin:'+str(i)+':'+str(j+nx)] = dFdGx.T[1][j][i]
dFdvDict[pfx+'Zsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[2][j][i]
dFdvDict[pfx+'Xcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[3][j][i]
dFdvDict[pfx+'Ycos:'+str(i)+':'+str(j+nx)] = dFdGx.T[4][j][i]
dFdvDict[pfx+'Zcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[5][j][i]
for j in range(USSdata.shape[1]): #loop over waves
dFdvDict[pfx+'U11sin:'+str(i)+':'+str(j)] = dFdGu.T[0][j][i]
dFdvDict[pfx+'U22sin:'+str(i)+':'+str(j)] = dFdGu.T[1][j][i]
dFdvDict[pfx+'U33sin:'+str(i)+':'+str(j)] = dFdGu.T[2][j][i]
dFdvDict[pfx+'U12sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[3][j][i]
dFdvDict[pfx+'U13sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[4][j][i]
dFdvDict[pfx+'U23sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[5][j][i]
dFdvDict[pfx+'U11cos:'+str(i)+':'+str(j)] = dFdGu.T[6][j][i]
dFdvDict[pfx+'U22cos:'+str(i)+':'+str(j)] = dFdGu.T[7][j][i]
dFdvDict[pfx+'U33cos:'+str(i)+':'+str(j)] = dFdGu.T[8][j][i]
dFdvDict[pfx+'U12cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[9][j][i]
dFdvDict[pfx+'U13cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[10][j][i]
dFdvDict[pfx+'U23cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[11][j][i]
dFdvDict[phfx+'Flack'] = 4.*dFdfl.T
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def SStructureFactorDerv2(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factor derivatives for all h,k,l,m for phase - no twins
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filled in below
:param int im: = 1 (could be eliminated)
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict SSGData: super space group info.
:param dict calcControls:
:param dict ParmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
trefDict = copy.deepcopy(refDict)
dM = 1.e-4
dFdvDict = {}
for parm in parmDict:
if ':' not in parm:
continue
if parm.split(':')[2] in ['Tmin','Tmax','Xmax','Ymax','Zmax','Fzero','Fwid',
'MXsin','MXcos','MYsin','MYcos','MZsin','MZcos','AMx','AMy','AMz',]:
parmDict[parm] += dM
prefList = SStructureFactor(trefDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
parmDict[parm] -= 2*dM
mrefList = SStructureFactor(trefDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
parmDict[parm] += dM
dFdvDict[parm] = (prefList[:,9+im]-mrefList[:,9+im])/(2.*dM)
return dFdvDict
def SStructureFactorDervTw(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'Needs a doc string'
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
TwinLaw = np.array([[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],])
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
nTwin = len(TwinLaw)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata) #no. atoms
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast) #NB: Mmod is ReIm,Mxyz,Ntau,Natm
waveShapes,SCtauF,SCtauX,SCtauU,UmodAB = G2mth.makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
if not len(refDict['FF']):
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables) #will need wave here for anom. neutron b's
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((len(refDict['RefList']),len(dat)))
dFdvDict = {}
dFdfr = np.zeros((nRef,nTwin,mSize))
dFdx = np.zeros((nRef,nTwin,mSize,3))
dFdui = np.zeros((nRef,nTwin,mSize))
dFdua = np.zeros((nRef,nTwin,mSize,6))
dFdbab = np.zeros((nRef,nTwin,2))
dFdtw = np.zeros((nRef,nTwin))
dFdGf = np.zeros((nRef,nTwin,mSize,FSSdata.shape[1]))
dFdGx = np.zeros((nRef,nTwin,mSize,XSSdata.shape[1],3))
dFdGz = np.zeros((nRef,nTwin,mSize,5))
dFdGu = np.zeros((nRef,nTwin,mSize,USSdata.shape[1],6))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
nRef = len(refDict['RefList'])/100
for iref,refl in enumerate(refDict['RefList']):
if 'T' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,refl.T[12+im])
H = np.array(refl[:4])
HP = H[:3]+modQ*H[3:] #projected hklm to hkl
H = np.inner(H.T,TwinLaw) #maybe array(4,nTwins) or (4)
TwMask = np.any(H,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict:
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl[4+im])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = parmDict[phfx+'BabA']*dBabdA
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = refDict['FF']['FF'][iref].T[Tindx]
Uniq = np.inner(H,SSGMT)
Phi = np.inner(H,SSGT)
UniqP = np.inner(HP,SGMT)
if SGInv: #if centro - expand HKL sets
Uniq = np.vstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.vstack((UniqP,-UniqP))
phase = twopi*(np.inner(Uniq[:,:3],(dXdata+Xdata).T)+Phi[:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/Uniq.shape[0]
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[0]*len(TwinLaw),axis=1).T #ops x atoms
HbH = -np.sum(UniqP[:,nxs,:3]*np.inner(UniqP[:,:3],bij),axis=-1) #ops x atoms
Hij = np.array([Mast*np.multiply.outer(U[:3],U[:3]) for U in UniqP]) #atoms x 3x3
Hij = np.squeeze(np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(nTwin,-1,6)))
Tuij = np.where(HbH<1.,np.exp(HbH),1.0) #ops x atoms
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[0] #ops x atoms
fot = (FF+FP-Bab)*Tcorr #ops x atoms
fotp = FPP*Tcorr #ops x atoms
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x sym X atoms
dGdf,dGdx,dGdu,dGdz = G2mth.ModulationDerv(Uniq,UniqP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt)
# GfpuA is 2 x ops x atoms
# derivs are: ops x atoms x waves x 2,6,12, or 5 parms as [real,imag] parts
fa = np.array([((FF+FP).T-Bab).T*cosp*Tcorr,-Flack*FPP*sinp*Tcorr]) # array(2,nTwin,nEqv,nAtoms)
fb = np.array([((FF+FP).T-Bab).T*sinp*Tcorr,Flack*FPP*cosp*Tcorr]) #or array(2,nEqv,nAtoms)
fag = fa*GfpuA[0]-fb*GfpuA[1]
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=1),axis=1) # 2 x twin
fbs = np.sum(np.sum(fbg,axis=1),axis=1)
fax = np.array([-fot*sinp,-fotp*cosp]) #positions; 2 x twin x ops x atoms
fbx = np.array([fot*cosp,-fotp*sinp])
fax = fax*GfpuA[0]-fbx*GfpuA[1]
fbx = fbx*GfpuA[0]+fax*GfpuA[1]
#sum below is over Uniq
dfadfr = np.sum(fag/occ,axis=1) #Fdata != 0 ever avoids /0. problem
dfbdfr = np.sum(fbg/occ,axis=1) #Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(-SQfactor*fag,axis=1)
dfbdui = np.sum(-SQfactor*fbg,axis=1)
dfadx = np.array([np.sum(twopi*Uniq[it,:,:3]*np.swapaxes(fax,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfbdx = np.array([np.sum(twopi*Uniq[it,:,:3]*np.swapaxes(fbx,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfadua = np.array([np.sum(-Hij[it]*np.swapaxes(fag,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfbdua = np.array([np.sum(-Hij[it]*np.swapaxes(fbg,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
# array(2,nTwin,nAtom,3) & array(2,nTwin,nAtom,6) & array(2,nTwin,nAtom,12)
dfadGf = np.sum(fa[:,it,:,:,nxs,nxs]*dGdf[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdf[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGf = np.sum(fb[:,it,:,:,nxs,nxs]*dGdf[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdf[1][nxs,nxs,:,:,:,:],axis=1)
dfadGx = np.sum(fa[:,it,:,:,nxs,nxs]*dGdx[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdx[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGx = np.sum(fb[:,it,:,:,nxs,nxs]*dGdx[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdx[1][nxs,nxs,:,:,:,:],axis=1)
dfadGz = np.sum(fa[:,it,:,0,nxs,nxs]*dGdz[0][nxs,nxs,:,:,:]-fb[:,it,:,0,nxs,nxs]*dGdz[1][nxs,nxs,:,:,:],axis=1)
dfbdGz = np.sum(fb[:,it,:,0,nxs,nxs]*dGdz[0][nxs,nxs,:,:,:]+fa[:,it,:,0,nxs,nxs]*dGdz[1][nxs,nxs,:,:,:],axis=1)
dfadGu = np.sum(fa[:,it,:,:,nxs,nxs]*dGdu[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdu[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGu = np.sum(fb[:,it,:,:,nxs,nxs]*dGdu[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdu[1][nxs,nxs,:,:,:,:],axis=1)
# GSASIIpath.IPyBreak()
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1] #float = A+A' (might be array[nTwin])
SB = fbs[0]+fbs[1] #float = B+B' (might be array[nTwin])
dFdfr[iref] = [2.*TwMask[it]*(SA[it]*dfadfr[0][it]+SA[it]*dfadfr[1][it]+SB[it]*dfbdfr[0][it]+SB[it]*dfbdfr[1][it])*Mdata/len(Uniq[it]) for it in range(nTwin)]
dFdx[iref] = [2.*TwMask[it]*(SA[it]*dfadx[it][0]+SA[it]*dfadx[it][1]+SB[it]*dfbdx[it][0]+SB[it]*dfbdx[it][1]) for it in range(nTwin)]
dFdui[iref] = [2.*TwMask[it]*(SA[it]*dfadui[it][0]+SA[it]*dfadui[it][1]+SB[it]*dfbdui[it][0]+SB[it]*dfbdui[it][1]) for it in range(nTwin)]
dFdua[iref] = [2.*TwMask[it]*(SA[it]*dfadua[it][0]+SA[it]*dfadua[it][1]+SB[it]*dfbdua[it][0]+SB[it]*dfbdua[it][1]) for it in range(nTwin)]
dFdtw[iref] = np.sum(TwMask*fas,axis=0)**2+np.sum(TwMask*fbs,axis=0)**2
dFdGf[iref] = [2.*TwMask[it]*(SA[it]*dfadGf[1]+SB[it]*dfbdGf[1]) for it in range(nTwin)]
dFdGx[iref] = [2.*TwMask[it]*(SA[it]*dfadGx[1]+SB[it]*dfbdGx[1]) for it in range(nTwin)]
dFdGz[iref] = [2.*TwMask[it]*(SA[it]*dfadGz[1]+SB[it]*dfbdGz[1]) for it in range(nTwin)]
dFdGu[iref] = [2.*TwMask[it]*(SA[it]*dfadGu[1]+SB[it]*dfbdGu[1]) for it in range(nTwin)]
# GSASIIpath.IPyBreak()
dFdbab[iref] = 2.*fas[0]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
2.*fbs[0]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
#loop over atoms - each dict entry is list of derivatives for all the reflections
if not iref%100 :
print (' %d derivative time %.4f\r'%(iref,time.time()-time0),end='')
for i in range(len(Mdata)): #loop over atoms
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
for j in range(FSSdata.shape[1]): #loop over waves Fzero & Fwid?
dFdvDict[pfx+'Fsin:'+str(i)+':'+str(j)] = dFdGf.T[0][j][i]
dFdvDict[pfx+'Fcos:'+str(i)+':'+str(j)] = dFdGf.T[1][j][i]
nx = 0
if waveTypes[i] in ['Block','ZigZag']:
nx = 1
dFdvDict[pfx+'Tmin:'+str(i)+':0'] = dFdGz.T[0][i] #ZigZag/Block waves (if any)
dFdvDict[pfx+'Tmax:'+str(i)+':0'] = dFdGz.T[1][i]
dFdvDict[pfx+'Xmax:'+str(i)+':0'] = dFdGz.T[2][i]
dFdvDict[pfx+'Ymax:'+str(i)+':0'] = dFdGz.T[3][i]
dFdvDict[pfx+'Zmax:'+str(i)+':0'] = dFdGz.T[4][i]
for j in range(XSSdata.shape[1]-nx): #loop over waves
dFdvDict[pfx+'Xsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[0][j][i]
dFdvDict[pfx+'Ysin:'+str(i)+':'+str(j+nx)] = dFdGx.T[1][j][i]
dFdvDict[pfx+'Zsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[2][j][i]
dFdvDict[pfx+'Xcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[3][j][i]
dFdvDict[pfx+'Ycos:'+str(i)+':'+str(j+nx)] = dFdGx.T[4][j][i]
dFdvDict[pfx+'Zcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[5][j][i]
for j in range(USSdata.shape[1]): #loop over waves
dFdvDict[pfx+'U11sin:'+str(i)+':'+str(j)] = dFdGu.T[0][j][i]
dFdvDict[pfx+'U22sin:'+str(i)+':'+str(j)] = dFdGu.T[1][j][i]
dFdvDict[pfx+'U33sin:'+str(i)+':'+str(j)] = dFdGu.T[2][j][i]
dFdvDict[pfx+'U12sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[3][j][i]
dFdvDict[pfx+'U13sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[4][j][i]
dFdvDict[pfx+'U23sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[5][j][i]
dFdvDict[pfx+'U11cos:'+str(i)+':'+str(j)] = dFdGu.T[6][j][i]
dFdvDict[pfx+'U22cos:'+str(i)+':'+str(j)] = dFdGu.T[7][j][i]
dFdvDict[pfx+'U33cos:'+str(i)+':'+str(j)] = dFdGu.T[8][j][i]
dFdvDict[pfx+'U12cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[9][j][i]
dFdvDict[pfx+'U13cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[10][j][i]
dFdvDict[pfx+'U23cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[11][j][i]
# GSASIIpath.IPyBreak()
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varyList):
''' Single crystal extinction function; returns extinction & derivative
'''
extCor = 1.0
dervDict = {}
dervCor = 1.0
if calcControls[phfx+'EType'] != 'None':
SQ = 1/(4.*ref[4+im]**2)
if 'C' in parmDict[hfx+'Type']:
cos2T = 1.0-2.*SQ*parmDict[hfx+'Lam']**2 #cos(2theta)
else: #'T'
cos2T = 1.0-2.*SQ*ref[12+im]**2 #cos(2theta)
if 'SXC' in parmDict[hfx+'Type']:
AV = 7.9406e5/parmDict[pfx+'Vol']**2
PL = np.sqrt(1.0-cos2T**2)/parmDict[hfx+'Lam']
P12 = (calcControls[phfx+'Cos2TM']+cos2T**4)/(calcControls[phfx+'Cos2TM']+cos2T**2)
PLZ = AV*P12*ref[9+im]*parmDict[hfx+'Lam']**2
elif 'SNT' in parmDict[hfx+'Type']:
AV = 1.e7/parmDict[pfx+'Vol']**2
PL = SQ
PLZ = AV*ref[9+im]*ref[12+im]**2
elif 'SNC' in parmDict[hfx+'Type']:
AV = 1.e7/parmDict[pfx+'Vol']**2
PL = np.sqrt(1.0-cos2T**2)/parmDict[hfx+'Lam']
PLZ = AV*ref[9+im]*parmDict[hfx+'Lam']**2
if 'Primary' in calcControls[phfx+'EType']:
PLZ *= 1.5
else:
if 'C' in parmDict[hfx+'Type']:
PLZ *= calcControls[phfx+'Tbar']
else: #'T'
PLZ *= ref[13+im] #t-bar
if 'Primary' in calcControls[phfx+'EType']:
PLZ *= 1.5
PSIG = parmDict[phfx+'Ep']
elif 'I & II' in calcControls[phfx+'EType']:
PSIG = parmDict[phfx+'Eg']/np.sqrt(1.+(parmDict[phfx+'Es']*PL/parmDict[phfx+'Eg'])**2)
elif 'Type II' in calcControls[phfx+'EType']:
PSIG = parmDict[phfx+'Es']
else: # 'Secondary Type I'
PSIG = parmDict[phfx+'Eg']/PL
AG = 0.58+0.48*cos2T+0.24*cos2T**2
AL = 0.025+0.285*cos2T
BG = 0.02-0.025*cos2T
BL = 0.15-0.2*(0.75-cos2T)**2
if cos2T < 0.:
BL = -0.45*cos2T
CG = 2.
CL = 2.
PF = PLZ*PSIG
if 'Gaussian' in calcControls[phfx+'EApprox']:
PF4 = 1.+CG*PF+AG*PF**2/(1.+BG*PF)
extCor = np.sqrt(PF4)
PF3 = 0.5*(CG+2.*AG*PF/(1.+BG*PF)-AG*PF**2*BG/(1.+BG*PF)**2)/(PF4*extCor)
else:
PF4 = 1.+CL*PF+AL*PF**2/(1.+BL*PF)
extCor = np.sqrt(PF4)
PF3 = 0.5*(CL+2.*AL*PF/(1.+BL*PF)-AL*PF**2*BL/(1.+BL*PF)**2)/(PF4*extCor)
dervCor = (1.+PF)*PF3 #extinction corr for other derivatives
if 'Primary' in calcControls[phfx+'EType'] and phfx+'Ep' in varyList:
dervDict[phfx+'Ep'] = -ref[7+im]*PLZ*PF3
if 'II' in calcControls[phfx+'EType'] and phfx+'Es' in varyList:
dervDict[phfx+'Es'] = -ref[7+im]*PLZ*PF3*(PSIG/parmDict[phfx+'Es'])**3
if 'I' in calcControls[phfx+'EType'] and phfx+'Eg' in varyList:
dervDict[phfx+'Eg'] = -ref[7+im]*PLZ*PF3*(PSIG/parmDict[phfx+'Eg'])**3*PL**2
return 1./extCor,dervDict,dervCor
def Dict2Values(parmdict, varylist):
'''Use before call to leastsq to setup list of values for the parameters
in parmdict, as selected by key in varylist'''
return [parmdict[key] for key in varylist]
def Values2Dict(parmdict, varylist, values):
''' Use after call to leastsq to update the parameter dictionary with
values corresponding to keys in varylist'''
parmdict.update(zip(varylist,values))
def GetNewCellParms(parmDict,varyList):
'''Compute unit cell tensor terms from varied Aij and Dij values.
Terms are included in the dict only if Aij or Dij is varied.
'''
newCellDict = {}
Anames = ['A'+str(i) for i in range(6)]
Ddict = dict(zip(['D11','D22','D33','D12','D13','D23'],Anames))
for item in varyList:
keys = item.split(':')
if keys[2] in Ddict:
key = keys[0]+'::'+Ddict[keys[2]] #key is e.g. '0::A0'
parm = keys[0]+'::'+keys[2] #parm is e.g. '0::D11'
newCellDict[parm] = [key,parmDict[key]+parmDict[item]]
return newCellDict # is e.g. {'0::D11':A0-D11}
def ApplyXYZshifts(parmDict,varyList):
'''
takes atom x,y,z shift and applies it to corresponding atom x,y,z value
:param dict parmDict: parameter dictionary
:param list varyList: list of variables (not used!)
:returns: newAtomDict - dictionary of new atomic coordinate names & values; key is parameter shift name
'''
newAtomDict = {}
for item in parmDict:
if 'dA' in item:
parm = ''.join(item.split('d'))
parmDict[parm] += parmDict[item]
newAtomDict[item] = [parm,parmDict[parm]]
return newAtomDict
def SHTXcal(refl,im,g,pfx,hfx,SGData,calcControls,parmDict):
'Spherical harmonics texture'
IFCoup = 'Bragg' in calcControls[hfx+'instType']
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [parmDict[pfx+'SH omega'],parmDict[pfx+'SH chi'],parmDict[pfx+'SH phi']]
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = G2lat.GenSHCoeff(SGData['SGLaue'],parmDict[pfx+'SHmodel'],parmDict[pfx+'SHorder'])
for item in SHnames:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,M,parmDict[pfx+'SHmodel'],psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[pfx+item]*Lnorm*Kcl*Ksl
return odfCor
def SHTXcalDerv(refl,im,g,pfx,hfx,SGData,calcControls,parmDict):
'Spherical harmonics texture derivatives'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
IFCoup = 'Bragg' in calcControls[hfx+'instType']
odfCor = 1.0
dFdODF = {}
dFdSA = [0,0,0]
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [parmDict[pfx+'SH omega'],parmDict[pfx+'SH chi'],parmDict[pfx+'SH phi']]
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,dPSdA,dGMdA = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup)
SHnames = G2lat.GenSHCoeff(SGData['SGLaue'],parmDict[pfx+'SHmodel'],parmDict[pfx+'SHorder'])
for item in SHnames:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,dKsdp,dKsdg = G2lat.GetKsl(L,M,parmDict[pfx+'SHmodel'],psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[pfx+item]*Lnorm*Kcl*Ksl
dFdODF[pfx+item] = Lnorm*Kcl*Ksl
for i in range(3):
dFdSA[i] += parmDict[pfx+item]*Lnorm*Kcl*(dKsdp*dPSdA[i]+dKsdg*dGMdA[i])
return odfCor,dFdODF,dFdSA
def SHPOcal(refl,im,g,phfx,hfx,SGData,calcControls,parmDict):
'spherical harmonics preferred orientation (cylindrical symmetry only)'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [0.,0.,0.]
if 'Bragg' in calcControls[hfx+'instType']:
Gangls = [0.,90.,0.,parmDict[hfx+'Azimuth']]
IFCoup = True
else:
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
IFCoup = False
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = calcControls[phfx+'SHnames']
for item in SHnames:
L,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,0,'0',psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[phfx+item]*Lnorm*Kcl*Ksl
return np.squeeze(odfCor)
def SHPOcalDerv(refl,im,g,phfx,hfx,SGData,calcControls,parmDict):
'spherical harmonics preferred orientation derivatives (cylindrical symmetry only)'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
dFdODF = {}
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [0.,0.,0.]
if 'Bragg' in calcControls[hfx+'instType']:
Gangls = [0.,90.,0.,parmDict[hfx+'Azimuth']]
IFCoup = True
else:
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
IFCoup = False
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = calcControls[phfx+'SHnames']
for item in SHnames:
L,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,0,'0',psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[phfx+item]*Lnorm*Kcl*Ksl
dFdODF[phfx+item] = Kcl*Ksl*Lnorm
return odfCor,dFdODF
def GetPrefOri(uniq,G,g,phfx,hfx,SGData,calcControls,parmDict):
'March-Dollase preferred orientation correction'
POcorr = 1.0
MD = parmDict[phfx+'MD']
if MD != 1.0:
MDAxis = calcControls[phfx+'MDAxis']
sumMD = 0
for H in uniq:
cosP,sinP = G2lat.CosSinAngle(H,MDAxis,G)
A = 1.0/np.sqrt((MD*cosP)**2+sinP**2/MD)
sumMD += A**3
POcorr = sumMD/len(uniq)
return POcorr
def GetPrefOriDerv(refl,im,uniq,G,g,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string'
POcorr = 1.0
POderv = {}
if calcControls[phfx+'poType'] == 'MD':
MD = parmDict[phfx+'MD']
MDAxis = calcControls[phfx+'MDAxis']
sumMD = 0
sumdMD = 0
for H in uniq:
cosP,sinP = G2lat.CosSinAngle(H,MDAxis,G)
A = 1.0/np.sqrt((MD*cosP)**2+sinP**2/MD)
sumMD += A**3
sumdMD -= (1.5*A**5)*(2.0*MD*cosP**2-(sinP/MD)**2)
POcorr = sumMD/len(uniq)
POderv[phfx+'MD'] = sumdMD/len(uniq)
else: #spherical harmonics
if calcControls[phfx+'SHord']:
POcorr,POderv = SHPOcalDerv(refl,im,g,phfx,hfx,SGData,calcControls,parmDict)
return POcorr,POderv
def GetAbsorb(refl,im,hfx,calcControls,parmDict):
'Needs a doc string'
if 'Debye' in calcControls[hfx+'instType']:
if 'T' in calcControls[hfx+'histType']:
return G2pwd.Absorb('Cylinder',parmDict[hfx+'Absorption']*refl[14+im],abs(parmDict[hfx+'2-theta']),0,0)
else:
return G2pwd.Absorb('Cylinder',parmDict[hfx+'Absorption'],refl[5+im],0,0)
else:
return G2pwd.SurfaceRough(parmDict[hfx+'SurfRoughA'],parmDict[hfx+'SurfRoughB'],refl[5+im])
def GetAbsorbDerv(refl,im,hfx,calcControls,parmDict):
'Needs a doc string'
if 'Debye' in calcControls[hfx+'instType']:
if 'T' in calcControls[hfx+'histType']:
return G2pwd.AbsorbDerv('Cylinder',parmDict[hfx+'Absorption']*refl[14+im],abs(parmDict[hfx+'2-theta']),0,0)
else:
return G2pwd.AbsorbDerv('Cylinder',parmDict[hfx+'Absorption'],refl[5+im],0,0)
else:
return np.array(G2pwd.SurfaceRoughDerv(parmDict[hfx+'SurfRoughA'],parmDict[hfx+'SurfRoughB'],refl[5+im]))
def GetPwdrExt(refl,im,pfx,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
coef = np.array([-0.5,0.25,-0.10416667,0.036458333,-0.0109375,2.8497409E-3])
pi2 = np.sqrt(2./np.pi)
if 'T' in calcControls[hfx+'histType']:
sth2 = sind(abs(parmDict[hfx+'2-theta'])/2.)**2
wave = refl[14+im]
else: #'C'W
sth2 = sind(refl[5+im]/2.)**2
wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0))
c2th = 1.-2.0*sth2
flv2 = refl[9+im]*(wave/parmDict[pfx+'Vol'])**2
if 'X' in calcControls[hfx+'histType']:
flv2 *= 0.079411*(1.0+c2th**2)/2.0
xfac = flv2*parmDict[phfx+'Extinction']
exb = 1.0
if xfac > -1.:
exb = 1./np.sqrt(1.+xfac)
exl = 1.0
if 0 < xfac <= 1.:
xn = np.array([xfac**(i+1) for i in range(6)])
exl += np.sum(xn*coef)
elif xfac > 1.:
xfac2 = 1./np.sqrt(xfac)
exl = pi2*(1.-0.125/xfac)*xfac2
return exb*sth2+exl*(1.-sth2)
def GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
coef = np.array([-0.5,0.25,-0.10416667,0.036458333,-0.0109375,2.8497409E-3])
pi2 = np.sqrt(2./np.pi)
if 'T' in calcControls[hfx+'histType']:
sth2 = sind(abs(parmDict[hfx+'2-theta'])/2.)**2
wave = refl[14+im]
else: #'C'W
sth2 = sind(refl[5+im]/2.)**2
wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0))
c2th = 1.-2.0*sth2
flv2 = refl[9+im]*(wave/parmDict[pfx+'Vol'])**2
if 'X' in calcControls[hfx+'histType']:
flv2 *= 0.079411*(1.0+c2th**2)/2.0
xfac = flv2*parmDict[phfx+'Extinction']
dbde = -500.*flv2
if xfac > -1.:
dbde = -0.5*flv2/np.sqrt(1.+xfac)**3
dlde = 0.
if 0 < xfac <= 1.:
xn = np.array([i*flv2*xfac**i for i in [1,2,3,4,5,6]])
dlde = np.sum(xn*coef)/xfac
elif xfac > 1.:
xfac2 = 1./np.sqrt(xfac)
dlde = 0.5*flv2*pi2*xfac2*(-1./xfac+0.375/xfac**2)
return dbde*sth2+dlde*(1.-sth2)
def GetIntensityCorr(refl,im,uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string' #need powder extinction!
Icorr = parmDict[phfx+'Scale']*parmDict[hfx+'Scale']*refl[3+im] #scale*multiplicity
if 'X' in parmDict[hfx+'Type']:
Icorr *= G2pwd.Polarization(parmDict[hfx+'Polariz.'],refl[5+im],parmDict[hfx+'Azimuth'])[0]
POcorr = 1.0
if pfx+'SHorder' in parmDict: #generalized spherical harmonics texture - takes precidence
POcorr = SHTXcal(refl,im,g,pfx,hfx,SGData,calcControls,parmDict)
elif calcControls[phfx+'poType'] == 'MD': #March-Dollase
POcorr = GetPrefOri(uniq,G,g,phfx,hfx,SGData,calcControls,parmDict)
elif calcControls[phfx+'SHord']: #cylindrical spherical harmonics
POcorr = SHPOcal(refl,im,g,phfx,hfx,SGData,calcControls,parmDict)
Icorr *= POcorr
AbsCorr = 1.0
AbsCorr = GetAbsorb(refl,im,hfx,calcControls,parmDict)
Icorr *= AbsCorr
ExtCorr = GetPwdrExt(refl,im,pfx,phfx,hfx,calcControls,parmDict)
Icorr *= ExtCorr
return Icorr,POcorr,AbsCorr,ExtCorr
def GetIntensityDerv(refl,im,wave,uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string' #need powder extinction derivs!
dIdsh = 1./parmDict[hfx+'Scale']
dIdsp = 1./parmDict[phfx+'Scale']
if 'X' in parmDict[hfx+'Type']:
pola,dIdPola = G2pwd.Polarization(parmDict[hfx+'Polariz.'],refl[5+im],parmDict[hfx+'Azimuth'])
dIdPola /= pola
else: #'N'
dIdPola = 0.0
dFdODF = {}
dFdSA = [0,0,0]
dIdPO = {}
if pfx+'SHorder' in parmDict:
odfCor,dFdODF,dFdSA = SHTXcalDerv(refl,im,g,pfx,hfx,SGData,calcControls,parmDict)
for iSH in dFdODF:
dFdODF[iSH] /= odfCor
for i in range(3):
dFdSA[i] /= odfCor
elif calcControls[phfx+'poType'] == 'MD' or calcControls[phfx+'SHord']:
POcorr,dIdPO = GetPrefOriDerv(refl,im,uniq,G,g,phfx,hfx,SGData,calcControls,parmDict)
for iPO in dIdPO:
dIdPO[iPO] /= POcorr
if 'T' in parmDict[hfx+'Type']:
dFdAb = GetAbsorbDerv(refl,im,hfx,calcControls,parmDict)*wave/refl[16+im] #wave/abs corr
dFdEx = GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict)/refl[17+im] #/ext corr
else:
dFdAb = GetAbsorbDerv(refl,im,hfx,calcControls,parmDict)*wave/refl[13+im] #wave/abs corr
dFdEx = GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict)/refl[14+im] #/ext corr
return dIdsh,dIdsp,dIdPola,dIdPO,dFdODF,dFdSA,dFdAb,dFdEx
def GetSampleSigGam(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict):
'Needs a doc string'
if 'C' in calcControls[hfx+'histType']: #All checked & OK
costh = cosd(refl[5+im]/2.)
#crystallite size
if calcControls[phfx+'SizeType'] == 'isotropic':
Sgam = 1.8*wave/(np.pi*parmDict[phfx+'Size;i']*costh)
elif calcControls[phfx+'SizeType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Sgam = (1.8*wave/np.pi)/(parmDict[phfx+'Size;i']*parmDict[phfx+'Size;a']*costh)
Sgam *= np.sqrt((sinP*parmDict[phfx+'Size;a'])**2+(cosP*parmDict[phfx+'Size;i'])**2)
else: #ellipsoidal crystallites
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR = G2pwd.ellipseSize(H,Sij,GB)
Sgam = 1.8*wave/(np.pi*costh*lenR)
#microstrain
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 0.018*parmDict[phfx+'Mustrain;i']*tand(refl[5+im]/2.)/np.pi
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
Mgam = 0.018*Si*Sa*tand(refl[5+im]/2.)/(np.pi*np.sqrt((Si*cosP)**2+(Sa*sinP)**2))
else: #generalized - P.W. Stephens model
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
Mgam = 0.018*refl[4+im]**2*tand(refl[5+im]/2.)*np.sqrt(Sum)/np.pi
elif 'T' in calcControls[hfx+'histType']: #All checked & OK
#crystallite size
if calcControls[phfx+'SizeType'] == 'isotropic': #OK
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/parmDict[phfx+'Size;i']
elif calcControls[phfx+'SizeType'] == 'uniaxial': #OK
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/(parmDict[phfx+'Size;i']*parmDict[phfx+'Size;a'])
Sgam *= np.sqrt((sinP*parmDict[phfx+'Size;a'])**2+(cosP*parmDict[phfx+'Size;i'])**2)
else: #ellipsoidal crystallites #OK
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR = G2pwd.ellipseSize(H,Sij,GB)
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/lenR
#microstrain
if calcControls[phfx+'MustrainType'] == 'isotropic': #OK
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*parmDict[phfx+'Mustrain;i']
elif calcControls[phfx+'MustrainType'] == 'uniaxial': #OK
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*Si*Sa/np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
else: #generalized - P.W. Stephens model OK
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
Mgam = 1.e-6*parmDict[hfx+'difC']*np.sqrt(Sum)*refl[4+im]**3
gam = Sgam*parmDict[phfx+'Size;mx']+Mgam*parmDict[phfx+'Mustrain;mx']
sig = (Sgam*(1.-parmDict[phfx+'Size;mx']))**2+(Mgam*(1.-parmDict[phfx+'Mustrain;mx']))**2
sig /= ateln2
return sig,gam
def GetSampleSigGamDerv(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict):
'Needs a doc string'
gamDict = {}
sigDict = {}
if 'C' in calcControls[hfx+'histType']: #All checked & OK
costh = cosd(refl[5+im]/2.)
tanth = tand(refl[5+im]/2.)
#crystallite size derivatives
if calcControls[phfx+'SizeType'] == 'isotropic':
Sgam = 1.8*wave/(np.pi*costh*parmDict[phfx+'Size;i'])
gamDict[phfx+'Size;i'] = -1.8*wave*parmDict[phfx+'Size;mx']/(np.pi*costh*parmDict[phfx+'Size;i']**2)
sigDict[phfx+'Size;i'] = -3.6*Sgam*wave*(1.-parmDict[phfx+'Size;mx'])**2/(np.pi*costh*ateln2)
elif calcControls[phfx+'SizeType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Size;i']
Sa = parmDict[phfx+'Size;a']
gami = 1.8*wave/(costh*np.pi*Si*Sa)
sqtrm = np.sqrt((sinP*Sa)**2+(cosP*Si)**2)
Sgam = gami*sqtrm
dsi = gami*Si*cosP**2/sqtrm-Sgam/Si
dsa = gami*Sa*sinP**2/sqtrm-Sgam/Sa
gamDict[phfx+'Size;i'] = dsi*parmDict[phfx+'Size;mx']
gamDict[phfx+'Size;a'] = dsa*parmDict[phfx+'Size;mx']
sigDict[phfx+'Size;i'] = 2.*dsi*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
sigDict[phfx+'Size;a'] = 2.*dsa*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
else: #ellipsoidal crystallites
const = 1.8*wave/(np.pi*costh)
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR,dRdS = G2pwd.ellipseSizeDerv(H,Sij,GB)
Sgam = const/lenR
for i,item in enumerate([phfx+'Size;%d'%(j) for j in range(6)]):
gamDict[item] = -(const/lenR**2)*dRdS[i]*parmDict[phfx+'Size;mx']
sigDict[item] = -2.*Sgam*(const/lenR**2)*dRdS[i]*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
gamDict[phfx+'Size;mx'] = Sgam
sigDict[phfx+'Size;mx'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])/ateln2
#microstrain derivatives
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 0.018*parmDict[phfx+'Mustrain;i']*tand(refl[5+im]/2.)/np.pi
gamDict[phfx+'Mustrain;i'] = 0.018*tanth*parmDict[phfx+'Mustrain;mx']/np.pi
sigDict[phfx+'Mustrain;i'] = 0.036*Mgam*tanth*(1.-parmDict[phfx+'Mustrain;mx'])**2/(np.pi*ateln2)
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
gami = 0.018*Si*Sa*tanth/np.pi
sqtrm = np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
Mgam = gami/sqtrm
dsi = -gami*Si*cosP**2/sqtrm**3
dsa = -gami*Sa*sinP**2/sqtrm**3
gamDict[phfx+'Mustrain;i'] = (Mgam/Si+dsi)*parmDict[phfx+'Mustrain;mx']
gamDict[phfx+'Mustrain;a'] = (Mgam/Sa+dsa)*parmDict[phfx+'Mustrain;mx']
sigDict[phfx+'Mustrain;i'] = 2*(Mgam/Si+dsi)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
sigDict[phfx+'Mustrain;a'] = 2*(Mgam/Sa+dsa)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
else: #generalized - P.W. Stephens model
const = 0.018*refl[4+im]**2*tanth/np.pi
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
gamDict[phfx+'Mustrain;'+str(i)] = strm*parmDict[phfx+'Mustrain;mx']/2.
sigDict[phfx+'Mustrain;'+str(i)] = strm*(1.-parmDict[phfx+'Mustrain;mx'])**2
Mgam = const*np.sqrt(Sum)
for i in range(len(Strms)):
gamDict[phfx+'Mustrain;'+str(i)] *= Mgam/Sum
sigDict[phfx+'Mustrain;'+str(i)] *= const**2/ateln2
gamDict[phfx+'Mustrain;mx'] = Mgam
sigDict[phfx+'Mustrain;mx'] = -2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])/ateln2
else: #'T'OF - All checked & OK
if calcControls[phfx+'SizeType'] == 'isotropic': #OK
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/parmDict[phfx+'Size;i']
gamDict[phfx+'Size;i'] = -Sgam*parmDict[phfx+'Size;mx']/parmDict[phfx+'Size;i']
sigDict[phfx+'Size;i'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])**2/(ateln2*parmDict[phfx+'Size;i'])
elif calcControls[phfx+'SizeType'] == 'uniaxial': #OK
const = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Size;i']
Sa = parmDict[phfx+'Size;a']
gami = const/(Si*Sa)
sqtrm = np.sqrt((sinP*Sa)**2+(cosP*Si)**2)
Sgam = gami*sqtrm
dsi = gami*Si*cosP**2/sqtrm-Sgam/Si
dsa = gami*Sa*sinP**2/sqtrm-Sgam/Sa
gamDict[phfx+'Size;i'] = dsi*parmDict[phfx+'Size;mx']
gamDict[phfx+'Size;a'] = dsa*parmDict[phfx+'Size;mx']
sigDict[phfx+'Size;i'] = 2.*dsi*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
sigDict[phfx+'Size;a'] = 2.*dsa*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
else: #OK ellipsoidal crystallites
const = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR,dRdS = G2pwd.ellipseSizeDerv(H,Sij,GB)
Sgam = const/lenR
for i,item in enumerate([phfx+'Size;%d'%(j) for j in range(6)]):
gamDict[item] = -(const/lenR**2)*dRdS[i]*parmDict[phfx+'Size;mx']
sigDict[item] = -2.*Sgam*(const/lenR**2)*dRdS[i]*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
gamDict[phfx+'Size;mx'] = Sgam #OK
sigDict[phfx+'Size;mx'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])/ateln2 #OK
#microstrain derivatives
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*parmDict[phfx+'Mustrain;i']
gamDict[phfx+'Mustrain;i'] = 1.e-6*refl[4+im]*parmDict[hfx+'difC']*parmDict[phfx+'Mustrain;mx'] #OK
sigDict[phfx+'Mustrain;i'] = 2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])**2/(ateln2*parmDict[phfx+'Mustrain;i'])
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
gami = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*Si*Sa
sqtrm = np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
Mgam = gami/sqtrm
dsi = -gami*Si*cosP**2/sqtrm**3
dsa = -gami*Sa*sinP**2/sqtrm**3
gamDict[phfx+'Mustrain;i'] = (Mgam/Si+dsi)*parmDict[phfx+'Mustrain;mx']
gamDict[phfx+'Mustrain;a'] = (Mgam/Sa+dsa)*parmDict[phfx+'Mustrain;mx']
sigDict[phfx+'Mustrain;i'] = 2*(Mgam/Si+dsi)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
sigDict[phfx+'Mustrain;a'] = 2*(Mgam/Sa+dsa)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
else: #generalized - P.W. Stephens model OK
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
const = 1.e-6*parmDict[hfx+'difC']*refl[4+im]**3
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
gamDict[phfx+'Mustrain;'+str(i)] = strm*parmDict[phfx+'Mustrain;mx']/2.
sigDict[phfx+'Mustrain;'+str(i)] = strm*(1.-parmDict[phfx+'Mustrain;mx'])**2
Mgam = const*np.sqrt(Sum)
for i in range(len(Strms)):
gamDict[phfx+'Mustrain;'+str(i)] *= Mgam/Sum
sigDict[phfx+'Mustrain;'+str(i)] *= const**2/ateln2
gamDict[phfx+'Mustrain;mx'] = Mgam
sigDict[phfx+'Mustrain;mx'] = -2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])/ateln2
return sigDict,gamDict
def GetReflPos(refl,im,wave,A,pfx,hfx,calcControls,parmDict):
'Needs a doc string'
if im:
h,k,l,m = refl[:4]
vec = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
d = 1./np.sqrt(G2lat.calc_rDsqSS(np.array([h,k,l,m]),A,vec))
else:
h,k,l = refl[:3]
d = 1./np.sqrt(G2lat.calc_rDsq(np.array([h,k,l]),A))
refl[4+im] = d
if 'C' in calcControls[hfx+'histType']:
pos = 2.0*asind(wave/(2.0*d))+parmDict[hfx+'Zero']
const = 9.e-2/(np.pi*parmDict[hfx+'Gonio. radius']) #shifts in microns
if 'Bragg' in calcControls[hfx+'instType']:
pos -= const*(4.*parmDict[hfx+'Shift']*cosd(pos/2.0)+ \
parmDict[hfx+'Transparency']*sind(pos)*100.0) #trans(=1/mueff) in cm
else: #Debye-Scherrer - simple but maybe not right
pos -= const*(parmDict[hfx+'DisplaceX']*cosd(pos)+parmDict[hfx+'DisplaceY']*sind(pos))
elif 'T' in calcControls[hfx+'histType']:
pos = parmDict[hfx+'difC']*d+parmDict[hfx+'difA']*d**2+parmDict[hfx+'difB']/d+parmDict[hfx+'Zero']
#do I need sample position effects - maybe?
return pos
def GetReflPosDerv(refl,im,wave,A,pfx,hfx,calcControls,parmDict):
'Needs a doc string'
dpr = 180./np.pi
if im:
h,k,l,m = refl[:4]
vec = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
dstsq = G2lat.calc_rDsqSS(np.array([h,k,l,m]),A,vec)
h,k,l = [h+m*vec[0],k+m*vec[1],l+m*vec[2]] #do proj of hklm to hkl so dPdA & dPdV come out right
else:
m = 0
h,k,l = refl[:3]
dstsq = G2lat.calc_rDsq(np.array([h,k,l]),A)
dst = np.sqrt(dstsq)
dsp = 1./dst
if 'C' in calcControls[hfx+'histType']:
pos = refl[5+im]-parmDict[hfx+'Zero']
const = dpr/np.sqrt(1.0-wave**2*dstsq/4.0)
dpdw = const*dst
dpdA = np.array([h**2,k**2,l**2,h*k,h*l,k*l])*const*wave/(2.0*dst)
dpdZ = 1.0
dpdV = np.array([2.*h*A[0]+k*A[3]+l*A[4],2*k*A[1]+h*A[3]+l*A[5],
2*l*A[2]+h*A[4]+k*A[5]])*m*const*wave/(2.0*dst)
shft = 9.e-2/(np.pi*parmDict[hfx+'Gonio. radius']) #shifts in microns
if 'Bragg' in calcControls[hfx+'instType']:
dpdSh = -4.*shft*cosd(pos/2.0)
dpdTr = -shft*sind(pos)*100.0
return dpdA,dpdw,dpdZ,dpdSh,dpdTr,0.,0.,dpdV
else: #Debye-Scherrer - simple but maybe not right
dpdXd = -shft*cosd(pos)
dpdYd = -shft*sind(pos)
return dpdA,dpdw,dpdZ,0.,0.,dpdXd,dpdYd,dpdV
elif 'T' in calcControls[hfx+'histType']:
dpdA = -np.array([h**2,k**2,l**2,h*k,h*l,k*l])*parmDict[hfx+'difC']*dsp**3/2.
dpdZ = 1.0
dpdDC = dsp
dpdDA = dsp**2
dpdDB = 1./dsp
dpdV = np.array([2.*h*A[0]+k*A[3]+l*A[4],2*k*A[1]+h*A[3]+l*A[5],
2*l*A[2]+h*A[4]+k*A[5]])*m*parmDict[hfx+'difC']*dsp**3/2.
return dpdA,dpdZ,dpdDC,dpdDA,dpdDB,dpdV
def GetHStrainShift(refl,im,SGData,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
laue = SGData['SGLaue']
uniq = SGData['SGUniq']
h,k,l = refl[:3]
if laue in ['m3','m3m']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+l**2)+ \
refl[4+im]**2*parmDict[phfx+'eA']*((h*k)**2+(h*l)**2+(k*l)**2)/(h**2+k**2+l**2)**2
elif laue in ['6/m','6/mmm','3m1','31m','3']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+h*k)+parmDict[phfx+'D33']*l**2
elif laue in ['3R','3mR']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+l**2)+parmDict[phfx+'D12']*(h*k+h*l+k*l)
elif laue in ['4/m','4/mmm']:
Dij = parmDict[phfx+'D11']*(h**2+k**2)+parmDict[phfx+'D33']*l**2
elif laue in ['mmm']:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2
elif laue in ['2/m']:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2
if uniq == 'a':
Dij += parmDict[phfx+'D23']*k*l
elif uniq == 'b':
Dij += parmDict[phfx+'D13']*h*l
elif uniq == 'c':
Dij += parmDict[phfx+'D12']*h*k
else:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2+ \
parmDict[phfx+'D12']*h*k+parmDict[phfx+'D13']*h*l+parmDict[phfx+'D23']*k*l
if 'C' in calcControls[hfx+'histType']:
return -180.*Dij*refl[4+im]**2*tand(refl[5+im]/2.0)/np.pi
else:
return -Dij*parmDict[hfx+'difC']*refl[4+im]**2/2.
def GetHStrainShiftDerv(refl,im,SGData,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
laue = SGData['SGLaue']
uniq = SGData['SGUniq']
h,k,l = refl[:3]
if laue in ['m3','m3m']:
dDijDict = {phfx+'D11':h**2+k**2+l**2,
phfx+'eA':refl[4+im]**2*((h*k)**2+(h*l)**2+(k*l)**2)/(h**2+k**2+l**2)**2}
elif laue in ['6/m','6/mmm','3m1','31m','3']:
dDijDict = {phfx+'D11':h**2+k**2+h*k,phfx+'D33':l**2}
elif laue in ['3R','3mR']:
dDijDict = {phfx+'D11':h**2+k**2+l**2,phfx+'D12':h*k+h*l+k*l}
elif laue in ['4/m','4/mmm']:
dDijDict = {phfx+'D11':h**2+k**2,phfx+'D33':l**2}
elif laue in ['mmm']:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2}
elif laue in ['2/m']:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2}
if uniq == 'a':
dDijDict[phfx+'D23'] = k*l
elif uniq == 'b':
dDijDict[phfx+'D13'] = h*l
elif uniq == 'c':
dDijDict[phfx+'D12'] = h*k
else:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2,
phfx+'D12':h*k,phfx+'D13':h*l,phfx+'D23':k*l}
if 'C' in calcControls[hfx+'histType']:
for item in dDijDict:
dDijDict[item] *= 180.0*refl[4+im]**2*tand(refl[5+im]/2.0)/np.pi
else:
for item in dDijDict:
dDijDict[item] *= -parmDict[hfx+'difC']*refl[4+im]**3/2.
return dDijDict
def GetDij(phfx,SGData,parmDict):
HSvals = [parmDict[phfx+name] for name in G2spc.HStrainNames(SGData)]
return G2spc.HStrainVals(HSvals,SGData)
def GetFobsSq(Histograms,Phases,parmDict,calcControls):
'''Compute the observed structure factors for Powder histograms and store in reflection array
Multiprocessing support added
'''
if GSASIIpath.GetConfigValue('Show_timing',False):
starttime = time.time() #; print 'start GetFobsSq'
histoList = list(Histograms.keys())
histoList.sort()
Ka2 = shl = lamRatio = kRatio = None
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
Limits = calcControls[hfx+'Limits']
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.0005)
Ka2 = False
kRatio = 0.0
if hfx+'Lam1' in list(parmDict.keys()):
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
x,y,w,yc,yb,yd = Histogram['Data']
xMask = ma.getmaskarray(x)
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])
ymb = np.array(y-yb)
ymb = np.where(ymb,ymb,1.0)
ycmb = np.array(yc-yb)
ratio = 1./np.where(ycmb,ycmb/ymb,1.e10)
refLists = Histogram['Reflection Lists']
for phase in refLists:
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
if histogram not in Phase['Histograms']:
continue
im = 0
if Phase['General'].get('Modulated',False):
im = 1
pId = Phase['pId']
phfx = '%d:%d:'%(pId,hId)
refDict = refLists[phase]
sumFo = 0.0
sumdF = 0.0
sumFosq = 0.0
sumdFsq = 0.0
sumInt = 0.0
nExcl = 0
# test to see if we are using multiprocessing below
useMP,ncores = G2mp.InitMP()
if len(refDict['RefList']) < 100: useMP = False
if useMP: # multiprocessing: create a set of initialized Python processes
MPpool = mp.Pool(G2mp.ncores,G2mp.InitFobsSqGlobals,
[x,ratio,shl,xB,xF,im,lamRatio,kRatio,xMask,Ka2])
profArgs = [[] for i in range(G2mp.ncores)]
else:
G2mp.InitFobsSqGlobals(x,ratio,shl,xB,xF,im,lamRatio,kRatio,xMask,Ka2)
if 'C' in calcControls[hfx+'histType']:
# are we multiprocessing?
for iref,refl in enumerate(refDict['RefList']):
if useMP:
profArgs[iref%G2mp.ncores].append((refl,iref))
else:
icod= G2mp.ComputeFobsSqCW(refl,iref)
if type(icod) is tuple:
refl[8+im] = icod[0]
sumInt += icod[1]
if parmDict[phfx+'LeBail']: refl[9+im] = refl[8+im]
elif icod == -1:
refl[3+im] *= -1
nExcl += 1
elif icod == -2:
break
if useMP:
for sInt,resList in MPpool.imap_unordered(G2mp.ComputeFobsSqCWbatch,profArgs):
sumInt += sInt
for refl8im,irefl in resList:
if refl8im is None:
refDict['RefList'][irefl][3+im] *= -1
nExcl += 1
else:
refDict['RefList'][irefl][8+im] = refl8im
if parmDict[phfx+'LeBail']:
refDict['RefList'][irefl][9+im] = refDict['RefList'][irefl][8+im]
elif 'T' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if useMP:
profArgs[iref%G2mp.ncores].append((refl,iref))
else:
icod= G2mp.ComputeFobsSqTOF(refl,iref)
if type(icod) is tuple:
refl[8+im] = icod[0]
sumInt += icod[1]
if parmDict[phfx+'LeBail']: refl[9+im] = refl[8+im]
elif icod == -1:
refl[3+im] *= -1
nExcl += 1
elif icod == -2:
break
if useMP:
for sInt,resList in MPpool.imap_unordered(G2mp.ComputeFobsSqTOFbatch,profArgs):
sumInt += sInt
for refl8im,irefl in resList:
if refl8im is None:
refDict['RefList'][irefl][3+im] *= -1
nExcl += 1
else:
refDict['RefList'][irefl][8+im] = refl8im
if parmDict[phfx+'LeBail']:
refDict['RefList'][irefl][9+im] = refDict['RefList'][irefl][8+im]
if useMP: MPpool.terminate()
sumFo = 0.0
sumdF = 0.0
sumFosq = 0.0
sumdFsq = 0.0
for iref,refl in enumerate(refDict['RefList']):
Fo = np.sqrt(np.abs(refl[8+im]))
Fc = np.sqrt(np.abs(refl[9]+im))
sumFo += Fo
sumFosq += refl[8+im]**2
sumdF += np.abs(Fo-Fc)
sumdFsq += (refl[8+im]-refl[9+im])**2
if sumFo:
Histogram['Residuals'][phfx+'Rf'] = min(100.,(sumdF/sumFo)*100.)
Histogram['Residuals'][phfx+'Rf^2'] = min(100.,np.sqrt(sumdFsq/sumFosq)*100.)
else:
Histogram['Residuals'][phfx+'Rf'] = 100.
Histogram['Residuals'][phfx+'Rf^2'] = 100.
Histogram['Residuals'][phfx+'sumInt'] = sumInt
Histogram['Residuals'][phfx+'Nref'] = len(refDict['RefList'])-nExcl
Histogram['Residuals']['hId'] = hId
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
Histogram['Residuals']['hId'] = Histograms[histogram]['hId']
if GSASIIpath.GetConfigValue('Show_timing',False):
print ('GetFobsSq t=',time.time()-starttime)
def getPowderProfile(parmDict,x,varylist,Histogram,Phases,calcControls,pawleyLookup,histogram=None):
'Computes the powder pattern for a histogram based on contributions from all used phases'
if GSASIIpath.GetConfigValue('Show_timing',False): starttime = time.time()
def GetReflSigGamCW(refl,im,wave,G,GB,phfx,calcControls,parmDict):
U = parmDict[hfx+'U']
V = parmDict[hfx+'V']
W = parmDict[hfx+'W']
X = parmDict[hfx+'X']
Y = parmDict[hfx+'Y']
Z = parmDict[hfx+'Z']
tanPos = tand(refl[5+im]/2.0)
Ssig,Sgam = GetSampleSigGam(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict)
sig = U*tanPos**2+V*tanPos+W+Ssig #save peak sigma
sig = max(0.001,sig)
gam = X/cosd(refl[5+im]/2.0)+Y*tanPos+Sgam+Z #save peak gamma
gam = max(0.001,gam)
return sig,gam
def GetReflSigGamTOF(refl,im,G,GB,phfx,calcControls,parmDict):
sig = parmDict[hfx+'sig-0']+parmDict[hfx+'sig-1']*refl[4+im]**2+ \
parmDict[hfx+'sig-2']*refl[4+im]**4+parmDict[hfx+'sig-q']*refl[4+im]
gam = parmDict[hfx+'X']*refl[4+im]+parmDict[hfx+'Y']*refl[4+im]**2+parmDict[hfx+'Z']
Ssig,Sgam = GetSampleSigGam(refl,im,0.0,G,GB,SGData,hfx,phfx,calcControls,parmDict)
sig += Ssig
gam += Sgam
return sig,gam
def GetReflAlpBet(refl,im,hfx,parmDict):
alp = parmDict[hfx+'alpha']/refl[4+im]
bet = parmDict[hfx+'beta-0']+parmDict[hfx+'beta-1']/refl[4+im]**4+parmDict[hfx+'beta-q']/refl[4+im]**2
return alp,bet
hId = Histogram['hId']
hfx = ':%d:'%(hId)
bakType = calcControls[hfx+'bakType']
fixedBkg = {i:Histogram['Background'][1][i] for i in Histogram['Background'][1] if i.startswith("_")}
yb,Histogram['sumBk'] = G2pwd.getBackground(hfx,parmDict,bakType,calcControls[hfx+'histType'],x,fixedBkg)
yc = np.zeros_like(yb)
cw = np.diff(ma.getdata(x))
cw = np.append(cw,cw[-1])
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.002)
Ka2 = False
if hfx+'Lam1' in (parmDict.keys()):
wave = parmDict[hfx+'Lam1']
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
else:
wave = parmDict[hfx+'Lam']
else:
shl = 0.
for phase in Histogram['Reflection Lists']:
refDict = Histogram['Reflection Lists'][phase]
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
if histogram and not histogram in Phase['Histograms']:
continue
pId = Phase['pId']
pfx = '%d::'%(pId)
phfx = '%d:%d:'%(pId,hId)
hfx = ':%d:'%(hId)
SGData = Phase['General']['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
#??
Dij = GetDij(phfx,SGData,parmDict)
A = [parmDict[pfx+'A%d'%(i)]+Dij[i] for i in range(6)] #TODO: need to do someting if Dij << 0.
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
if np.any(np.diag(G)<0.) or np.any(np.isnan(A)):
raise G2obj.G2Exception('invalid metric tensor \n cell/Dij refinement not advised')
GA,GB = G2lat.Gmat2AB(G) #Orthogonalization matricies
Vst = np.sqrt(nl.det(G)) #V*
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
if im:
SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
elif parmDict[pfx+'isMag'] and 'N' in calcControls[hfx+'histType']:
MagStructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else:
StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
badPeak = False
# test to see if we are using multiprocessing here
useMP,ncores = G2mp.InitMP()
if len(refDict['RefList']) < 100: useMP = False
if useMP: # multiprocessing: create a set of initialized Python processes
MPpool = mp.Pool(ncores,G2mp.InitPwdrProfGlobals,[im,shl,x])
profArgs = [[] for i in range(ncores)]
if 'C' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
refl[5+im] = GetReflPos(refl,im,wave,A,pfx,hfx,calcControls,parmDict) #corrected reflection position
Lorenz = 1./(2.*sind(refl[5+im]/2.)**2*cosd(refl[5+im]/2.)) #Lorentz correction
refl[6+im:8+im] = GetReflSigGamCW(refl,im,wave,G,GB,phfx,calcControls,parmDict) #peak sig & gam
refl[11+im:15+im] = GetIntensityCorr(refl,im,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
refl[11+im] *= Vst*Lorenz
if Phase['General'].get('doPawley'):
try:
if im:
pInd = pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pInd = pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
refl[9+im] = parmDict[pInd]
except KeyError:
# print ' ***Error %d,%d,%d missing from Pawley reflection list ***'%(h,k,l)
continue
Wd,fmin,fmax = G2pwd.getWidthsCW(refl[5+im],refl[6+im],refl[7+im],shl)
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
elif iBeg > iFin: #bad peak coeff - skip
badPeak = True
continue
if useMP:
profArgs[iref%ncores].append((refl[5+im],refl,iBeg,iFin,1.))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*G2pwd.getFCJVoigt3(refl[5+im],refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin])) #>90% of time spent here
if Ka2:
pos2 = refl[5+im]+lamRatio*tand(refl[5+im]/2.0) # + 360/pi * Dlam/lam * tan(th)
Wd,fmin,fmax = G2pwd.getWidthsCW(pos2,refl[6+im],refl[7+im],shl)
iBeg = np.searchsorted(x,pos2-fmin)
iFin = np.searchsorted(x,pos2+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
return yc,yb
elif iBeg > iFin: #bad peak coeff - skip
continue
if useMP:
profArgs[iref%ncores].append((pos2,refl,iBeg,iFin,kRatio))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*kRatio*G2pwd.getFCJVoigt3(pos2,refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin])) #and here
elif 'T' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
refl[5+im] = GetReflPos(refl,im,0.0,A,pfx,hfx,calcControls,parmDict) #corrected reflection position - #TODO - what about tabluated offset?
Lorenz = sind(abs(parmDict[hfx+'2-theta'])/2)*refl[4+im]**4 #TOF Lorentz correction
# refl[5+im] += GetHStrainShift(refl,im,SGData,phfx,hfx,calcControls,parmDict) #apply hydrostatic strain shift
refl[6+im:8+im] = GetReflSigGamTOF(refl,im,G,GB,phfx,calcControls,parmDict) #peak sig & gam
refl[12+im:14+im] = GetReflAlpBet(refl,im,hfx,parmDict) #TODO - skip if alp, bet tabulated?
refl[11+im],refl[15+im],refl[16+im],refl[17+im] = GetIntensityCorr(refl,im,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
refl[11+im] *= Vst*Lorenz
if Phase['General'].get('doPawley'):
try:
if im:
pInd =pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pInd =pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
refl[9+im] = parmDict[pInd]
except KeyError:
# print ' ***Error %d,%d,%d missing from Pawley reflection list ***'%(h,k,l)
continue
Wd,fmin,fmax = G2pwd.getWidthsTOF(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im])
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
elif iBeg > iFin: #bad peak coeff - skip
badPeak = True
continue
if useMP:
profArgs[iref%ncores].append((refl[5+im],refl,iBeg,iFin))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*G2pwd.getEpsVoigt(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im],ma.getdata(x[iBeg:iFin]))/cw[iBeg:iFin]
# print 'profile calc time: %.3fs'%(time.time()-time0)
if useMP and 'C' in calcControls[hfx+'histType']:
for y in MPpool.imap_unordered(G2mp.ComputePwdrProfCW,profArgs):
yc += y
MPpool.terminate()
elif useMP:
for y in MPpool.imap_unordered(G2mp.ComputePwdrProfTOF,profArgs):
yc += y
MPpool.terminate()
if badPeak:
print ('ouch #4 bad profile coefficients yield negative peak width; some reflections skipped')
if GSASIIpath.GetConfigValue('Show_timing',False):
print ('getPowderProfile t=%.3f'%(time.time()-starttime))
return yc,yb
def getPowderProfileDervMP(args):
'''Computes the derivatives of the computed powder pattern with respect to all
refined parameters.
Multiprocessing version.
'''
import pytexture as ptx
ptx.pyqlmninit() #initialize fortran arrays for spherical harmonics for each processor
parmDict,x,varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars = args[:9]
prc,tprc,histogram = 0,1,None
if len(args) >= 10: prc=args[9]
if len(args) >= 11: tprc=args[10]
if len(args) >= 12: histogram=args[11]
def cellVaryDerv(pfx,SGData,dpdA):
if SGData['SGLaue'] in ['-1',]:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],
[pfx+'A3',dpdA[3]],[pfx+'A4',dpdA[4]],[pfx+'A5',dpdA[5]]]
elif SGData['SGLaue'] in ['2/m',]:
if SGData['SGUniq'] == 'a':
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A5',dpdA[5]]]
elif SGData['SGUniq'] == 'b':
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A4',dpdA[4]]]
else:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A3',dpdA[3]]]
elif SGData['SGLaue'] in ['mmm',]:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['4/m','4/mmm']:
return [[pfx+'A0',dpdA[0]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['6/m','6/mmm','3m1', '31m', '3']:
return [[pfx+'A0',dpdA[0]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['3R', '3mR']:
return [[pfx+'A0',dpdA[0]+dpdA[1]+dpdA[2]],[pfx+'A3',dpdA[3]+dpdA[4]+dpdA[5]]]
elif SGData['SGLaue'] in ['m3m','m3']:
return [[pfx+'A0',dpdA[0]]]
# create a list of dependent variables and set up a dictionary to hold their derivatives
# dependentVars = G2mv.GetDependentVars()
depDerivDict = {}
for j in dependentVars:
depDerivDict[j] = np.zeros(shape=(len(x)))
# print 'dependent vars',dependentVars
hId = Histogram['hId']
hfx = ':%d:'%(hId)
bakType = calcControls[hfx+'bakType']
dMdv = np.zeros(shape=(len(varylist),len(x)))
dMdb,dMddb,dMdpk = G2pwd.getBackgroundDerv(hfx,parmDict,bakType,calcControls[hfx+'histType'],x)
if prc == 0 and hfx+'Back;0' in varylist: # for now assume that Back;x vars to not appear in constraints
bBpos = varylist.index(hfx+'Back;0')
dMdv[bBpos:bBpos+len(dMdb)] += dMdb #TODO crash if bck parms tossed
names = [hfx+'DebyeA',hfx+'DebyeR',hfx+'DebyeU']
for name in varylist:
if prc == 0 and 'Debye' in name:
Id = int(name.split(';')[-1])
parm = name[:int(name.rindex(';'))]
ip = names.index(parm)
dMdv[varylist.index(name)] += dMddb[3*Id+ip]
names = [hfx+'BkPkpos',hfx+'BkPkint',hfx+'BkPksig',hfx+'BkPkgam']
for name in varylist:
if prc == 0 and 'BkPk' in name:
parm,Id = name.split(';')
Id = int(Id)
if parm in names:
ip = names.index(parm)
dMdv[varylist.index(name)] += dMdpk[4*Id+ip]
cw = np.diff(ma.getdata(x))
cw = np.append(cw,cw[-1])
Ka2 = False #also for TOF!
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.002)
if hfx+'Lam1' in (parmDict.keys()):
wave = parmDict[hfx+'Lam1']
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
else:
wave = parmDict[hfx+'Lam']
for phase in Histogram['Reflection Lists']:
refDict = Histogram['Reflection Lists'][phase]
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
if histogram and histogram not in Phase['Histograms']:
continue
SGData = Phase['General']['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
#??
pId = Phase['pId']
pfx = '%d::'%(pId)
phfx = '%d:%d:'%(pId,hId)
Dij = GetDij(phfx,SGData,parmDict)
A = [parmDict[pfx+'A%d'%(i)]+Dij[i] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
GA,GB = G2lat.Gmat2AB(G) #Orthogonalization matricies
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
if im:
dFdvDict = SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
dFdvDict.update(SStructureFactorDerv2(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict))
else:
if Phase['General']['Type'] == 'magnetic':
dFdvDict = MagStructureFactorDerv(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
dFdvDict.update(MagStructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict))
else:
dFdvDict = StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase)
# determine the parameters that will have derivatives computed only at end
nonatomvarylist = []
for name in varylist:
if '::RBV;' not in name:
try:
aname = name.split(pfx)[1][:2]
if aname not in ['Af','dA','AU','RB','AM','Xs','Xc','Ys','Yc','Zs','Zc', \
'Tm','Xm','Ym','Zm','U1','U2','U3','MX','MY','MZ']: continue # skip anything not an atom or rigid body param
except IndexError:
continue
nonatomvarylist.append(name)
nonatomdependentVars = []
for name in dependentVars:
if '::RBV;' not in name:
try:
aname = name.split(pfx)[1][:2]
if aname not in ['Af','dA','AU','RB','AM','Xs','Xc','Ys','Yc','Zs','Zc', \
'Tm','Xm','Ym','Zm','U1','U2','U3','MX','MY','MZ']: continue # skip anything not an atom or rigid body param
except IndexError:
continue
nonatomdependentVars.append(name)
#==========================================================================================
#==========================================================================================
for iref in range(prc,len(refDict['RefList']),tprc):
refl = refDict['RefList'][iref]
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
if 'T' in calcControls[hfx+'histType']:
wave = refl[14+im]
dIdsh,dIdsp,dIdpola,dIdPO,dFdODF,dFdSA,dFdAb,dFdEx = GetIntensityDerv(refl,im,wave,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
if 'C' in calcControls[hfx+'histType']: #CW powder
Wd,fmin,fmax = G2pwd.getWidthsCW(refl[5+im],refl[6+im],refl[7+im],shl)
else: #'T'OF
Wd,fmin,fmax = G2pwd.getWidthsTOF(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im])
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
pos = refl[5+im]
if 'C' in calcControls[hfx+'histType']:
tanth = tand(pos/2.0)
costh = cosd(pos/2.0)
lenBF = iFin-iBeg
dMdpk = np.zeros(shape=(6,lenBF))
dMdipk = G2pwd.getdFCJVoigt3(refl[5+im],refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin]))
for i in range(5):
dMdpk[i] += 100.*cw[iBeg:iFin]*refl[11+im]*refl[9+im]*dMdipk[i]
dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4],'L1/L2':np.zeros_like(dMdpk[0])}
if Ka2:
pos2 = refl[5+im]+lamRatio*tanth # + 360/pi * Dlam/lam * tan(th)
iBeg2 = np.searchsorted(x,pos2-fmin)
iFin2 = np.searchsorted(x,pos2+fmax)
if iBeg2-iFin2:
lenBF2 = iFin2-iBeg2
dMdpk2 = np.zeros(shape=(6,lenBF2))
dMdipk2 = G2pwd.getdFCJVoigt3(pos2,refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg2:iFin2]))
for i in range(5):
dMdpk2[i] = 100.*cw[iBeg2:iFin2]*refl[11+im]*refl[9+im]*kRatio*dMdipk2[i]
dMdpk2[5] = 100.*cw[iBeg2:iFin2]*refl[11+im]*dMdipk2[0]
dervDict2 = {'int':dMdpk2[0],'pos':dMdpk2[1],'sig':dMdpk2[2],'gam':dMdpk2[3],'shl':dMdpk2[4],'L1/L2':dMdpk2[5]*refl[9]}
else: #'T'OF
lenBF = iFin-iBeg
if lenBF < 0: #bad peak coeff
break
dMdpk = np.zeros(shape=(6,lenBF))
dMdipk = G2pwd.getdEpsVoigt(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im],ma.getdata(x[iBeg:iFin]))
for i in range(6):
dMdpk[i] += refl[11+im]*refl[9+im]*dMdipk[i] #cw[iBeg:iFin]*
dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'alp':dMdpk[2],'bet':dMdpk[3],'sig':dMdpk[4],'gam':dMdpk[5]}
if Phase['General'].get('doPawley'):
dMdpw = np.zeros(len(x))
try:
if im:
pIdx = pfx+'PWLref:'+str(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pIdx = pfx+'PWLref:'+str(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
idx = varylist.index(pIdx)
dMdpw[iBeg:iFin] = dervDict['int']/refl[9+im]
if Ka2: #not for TOF either
dMdpw[iBeg2:iFin2] += dervDict2['int']/refl[9+im]
dMdv[idx] = dMdpw
except: # ValueError:
pass
if 'C' in calcControls[hfx+'histType']:
dpdA,dpdw,dpdZ,dpdSh,dpdTr,dpdX,dpdY,dpdV = GetReflPosDerv(refl,im,wave,A,pfx,hfx,calcControls,parmDict)
names = {hfx+'Scale':[dIdsh,'int'],hfx+'Polariz.':[dIdpola,'int'],phfx+'Scale':[dIdsp,'int'],
hfx+'U':[tanth**2,'sig'],hfx+'V':[tanth,'sig'],hfx+'W':[1.0,'sig'],
hfx+'X':[1.0/costh,'gam'],hfx+'Y':[tanth,'gam'],hfx+'Z':[1.0,'gam'],hfx+'SH/L':[1.0,'shl'],
hfx+'I(L2)/I(L1)':[1.0,'L1/L2'],hfx+'Zero':[dpdZ,'pos'],hfx+'Lam':[dpdw,'pos'],
hfx+'Shift':[dpdSh,'pos'],hfx+'Transparency':[dpdTr,'pos'],hfx+'DisplaceX':[dpdX,'pos'],
hfx+'DisplaceY':[dpdY,'pos'],phfx+'Extinction':[dFdEx,'int'],}
if 'Bragg' in calcControls[hfx+'instType']:
names.update({hfx+'SurfRoughA':[dFdAb[0],'int'],
hfx+'SurfRoughB':[dFdAb[1],'int'],})
else:
names.update({hfx+'Absorption':[dFdAb,'int'],})
else: #'T'OF
dpdA,dpdZ,dpdDC,dpdDA,dpdDB,dpdV = GetReflPosDerv(refl,im,0.0,A,pfx,hfx,calcControls,parmDict)
names = {hfx+'Scale':[dIdsh,'int'],phfx+'Scale':[dIdsp,'int'],
hfx+'difC':[dpdDC,'pos'],hfx+'difA':[dpdDA,'pos'],hfx+'difB':[dpdDB,'pos'],
hfx+'Zero':[dpdZ,'pos'],hfx+'X':[refl[4+im],'gam'],hfx+'Y':[refl[4+im]**2,'gam'],hfx+'Z':[1.0,'gam'],
hfx+'alpha':[1./refl[4+im],'alp'],hfx+'beta-0':[1.0,'bet'],hfx+'beta-1':[1./refl[4+im]**4,'bet'],
hfx+'beta-q':[1./refl[4+im]**2,'bet'],hfx+'sig-0':[1.0,'sig'],hfx+'sig-1':[refl[4+im]**2,'sig'],
hfx+'sig-2':[refl[4+im]**4,'sig'],hfx+'sig-q':[refl[4+im],'sig'],
hfx+'Absorption':[dFdAb,'int'],phfx+'Extinction':[dFdEx,'int'],}
for name in names:
item = names[name]
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += item[0]*dervDict[item[1]]
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += item[0]*dervDict2[item[1]]
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += item[0]*dervDict[item[1]]
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += item[0]*dervDict2[item[1]]
for iPO in dIdPO:
if iPO in varylist:
dMdv[varylist.index(iPO)][iBeg:iFin] += dIdPO[iPO]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(iPO)][iBeg2:iFin2] += dIdPO[iPO]*dervDict2['int']
elif iPO in dependentVars:
depDerivDict[iPO][iBeg:iFin] += dIdPO[iPO]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[iPO][iBeg2:iFin2] += dIdPO[iPO]*dervDict2['int']
for i,name in enumerate(['omega','chi','phi']):
aname = pfx+'SH '+name
if aname in varylist:
dMdv[varylist.index(aname)][iBeg:iFin] += dFdSA[i]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(aname)][iBeg2:iFin2] += dFdSA[i]*dervDict2['int']
elif aname in dependentVars:
depDerivDict[aname][iBeg:iFin] += dFdSA[i]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[aname][iBeg2:iFin2] += dFdSA[i]*dervDict2['int']
for iSH in dFdODF:
if iSH in varylist:
dMdv[varylist.index(iSH)][iBeg:iFin] += dFdODF[iSH]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(iSH)][iBeg2:iFin2] += dFdODF[iSH]*dervDict2['int']
elif iSH in dependentVars:
depDerivDict[iSH][iBeg:iFin] += dFdODF[iSH]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[iSH][iBeg2:iFin2] += dFdODF[iSH]*dervDict2['int']
cellDervNames = cellVaryDerv(pfx,SGData,dpdA)
for name,dpdA in cellDervNames:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dpdA*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dpdA*dervDict2['pos']
elif name in dependentVars: #need to scale for mixed phase constraints?
depDerivDict[name][iBeg:iFin] += dpdA*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dpdA*dervDict2['pos']
dDijDict = GetHStrainShiftDerv(refl,im,SGData,phfx,hfx,calcControls,parmDict)
for name in dDijDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dDijDict[name]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dDijDict[name]*dervDict2['pos']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += dDijDict[name]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dDijDict[name]*dervDict2['pos']
for i,name in enumerate([pfx+'mV0',pfx+'mV1',pfx+'mV2']):
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dpdV[i]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dpdV[i]*dervDict2['pos']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += dpdV[i]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dpdV[i]*dervDict2['pos']
if 'C' in calcControls[hfx+'histType']:
sigDict,gamDict = GetSampleSigGamDerv(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict)
else: #'T'OF
sigDict,gamDict = GetSampleSigGamDerv(refl,im,0.0,G,GB,SGData,hfx,phfx,calcControls,parmDict)
for name in gamDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += gamDict[name]*dervDict['gam']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += gamDict[name]*dervDict2['gam']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += gamDict[name]*dervDict['gam']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += gamDict[name]*dervDict2['gam']
for name in sigDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += sigDict[name]*dervDict['sig']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += sigDict[name]*dervDict2['sig']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += sigDict[name]*dervDict['sig']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += sigDict[name]*dervDict2['sig']
for name in ['BabA','BabU']:
if refl[9+im]:
if phfx+name in varylist:
dMdv[varylist.index(phfx+name)][iBeg:iFin] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict['int']/refl[9+im]
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(phfx+name)][iBeg2:iFin2] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict2['int']/refl[9+im]
elif phfx+name in dependentVars:
depDerivDict[phfx+name][iBeg:iFin] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict['int']/refl[9+im]
if Ka2 and iFin2-iBeg2:
depDerivDict[phfx+name][iBeg2:iFin2] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict2['int']/refl[9+im]
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
#do atom derivatives - for RB,F,X & U so far - how do I scale mixed phase constraints?
corr = 0.
corr2 = 0.
if refl[9+im]:
corr = dervDict['int']/refl[9+im]
if Ka2 and iFin2-iBeg2:
corr2 = dervDict2['int']/refl[9+im]
for name in nonatomvarylist:
dMdv[varylist.index(name)][iBeg:iFin] += dFdvDict[name][iref]*corr
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dFdvDict[name][iref]*corr2
for name in nonatomdependentVars:
depDerivDict[name][iBeg:iFin] += dFdvDict[name][iref]*corr
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dFdvDict[name][iref]*corr2
# now process derivatives in constraints
dMdv[:,ma.getmaskarray(x)] = 0. # instead of masking, zero out masked values
#G2mv.Dict2Deriv(varylist,depDerivDict,dMdv)
return dMdv,depDerivDict
def UserRejectHKL(ref,im,userReject):
if ref[5+im]/ref[6+im] < userReject['minF/sig']:
return False
elif userReject['MaxD'] < ref[4+im] > userReject['MinD']:
return False
elif ref[11+im] < userReject['MinExt']:
return False
elif abs(ref[5+im]-ref[7+im])/ref[6+im] > userReject['MaxDF/F']:
return False
return True
def dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict):
'''Loop over reflections in a HKLF histogram and compute derivatives of the fitting
model (M) with respect to all parameters. Independent and dependant dM/dp arrays
are returned to either dervRefine or HessRefine.
:returns:
'''
hId = Histogram['hId']
hfx = ':%d:'%(hId)
pfx = '%d::'%(Phase['pId'])
phfx = '%d:%d:'%(Phase['pId'],hId)
SGData = Phase['General']['SGData']
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
A = [parmDict[pfx+'A%d'%(i)] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
TwinLaw = calcControls[phfx+'TwinLaw']
refDict = Histogram['Data']
if parmDict[phfx+'Scale'] < 0.:
parmDict[phfx+'Scale'] = .001
if im: # split to nontwin/twin versions
if len(TwinLaw) > 1:
dFdvDict = SStructureFactorDervTw(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict) #not ok
else:
dFdvDict = SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict) #OK
dFdvDict.update(SStructureFactorDerv2(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict))
else:
if len(TwinLaw) > 1:
dFdvDict = StructureFactorDervTw2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else: #correct!!
if Phase['General']['Type'] == 'magnetic':
dFdvDict = MagStructureFactorDerv(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
dFdvDict.update(MagStructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict))
else:
dFdvDict = StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase)
dMdvh = np.zeros((len(varylist),len(refDict['RefList'])))
dependentVars = G2mv.GetDependentVars()
depDerivDict = {}
for j in dependentVars:
depDerivDict[j] = np.zeros(shape=(len(refDict['RefList'])))
wdf = np.zeros(len(refDict['RefList']))
if calcControls['F**2']:
for iref,ref in enumerate(refDict['RefList']):
if ref[6+im] > 0:
dervDict,dervCor = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist+dependentVars)[1:]
w = 1.0/ref[6+im]
if ref[3+im] > 0:
wdf[iref] = w*(ref[5+im]-ref[7+im])
for j,var in enumerate(varylist):
if var in dFdvDict:
dMdvh[j][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
for var in dependentVars:
if var in dFdvDict:
depDerivDict[var][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
if phfx+'Scale' in varylist:
dMdvh[varylist.index(phfx+'Scale')][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
elif phfx+'Scale' in dependentVars:
depDerivDict[phfx+'Scale'][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
for item in ['Ep','Es','Eg']:
if phfx+item in varylist and phfx+item in dervDict:
dMdvh[varylist.index(phfx+item)][iref] = w*dervDict[phfx+item]/ref[11+im] #OK
elif phfx+item in dependentVars and phfx+item in dervDict:
depDerivDict[phfx+item][iref] = w*dervDict[phfx+item]/ref[11+im] #OK
for item in ['BabA','BabU']:
if phfx+item in varylist:
dMdvh[varylist.index(phfx+item)][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
elif phfx+item in dependentVars:
depDerivDict[phfx+item][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
else: #F refinement
for iref,ref in enumerate(refDict['RefList']):
if ref[5+im] > 0.:
dervDict,dervCor = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist+dependentVars)[1:]
Fo = np.sqrt(ref[5+im])
Fc = np.sqrt(ref[7+im])
w = 1.0/ref[6+im]
if ref[3+im] > 0:
wdf[iref] = 2.0*Fc*w*(Fo-Fc)
for j,var in enumerate(varylist):
if var in dFdvDict:
dMdvh[j][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
for var in dependentVars:
if var in dFdvDict:
depDerivDict[var][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
if phfx+'Scale' in varylist:
dMdvh[varylist.index(phfx+'Scale')][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
elif phfx+'Scale' in dependentVars:
depDerivDict[phfx+'Scale'][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
for item in ['Ep','Es','Eg']: #OK!
if phfx+item in varylist and phfx+item in dervDict:
dMdvh[varylist.index(phfx+item)][iref] = w*dervDict[phfx+item]/ref[11+im]
elif phfx+item in dependentVars and phfx+item in dervDict:
depDerivDict[phfx+item][iref] = w*dervDict[phfx+item]/ref[11+im]
for item in ['BabA','BabU']:
if phfx+item in varylist:
dMdvh[varylist.index(phfx+item)][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
elif phfx+item in dependentVars:
depDerivDict[phfx+item][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
return dMdvh,depDerivDict,wdf
def dervRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg):
'''Loop over histograms and compute derivatives of the fitting
model (M) with respect to all parameters. Results are returned in
a Jacobian matrix (aka design matrix) of dimensions (n by m) where
n is the number of parameters and m is the number of data
points. This can exceed memory when m gets large. This routine is
used when refinement derivatives are selected as "analtytic
Jacobian" in Controls.
:returns: Jacobian numpy.array dMdv for all histograms concatinated
'''
parmDict.update(zip(varylist,values))
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
dependentVars = G2mv.GetDependentVars()
histoList = list(Histograms.keys())
histoList.sort()
First = True
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
dMdv,depDerivDict = getPowderProfileDervMP([parmDict,x[xB:xF],
varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars])
G2mv.Dict2Deriv(varylist,depDerivDict,dMdv)
dMdvh = np.sqrt(w[xB:xF])*dMdv
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
dMdvh,depDerivDict,wdf = dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict)
hfx = ':%d:'%(Histogram['hId'])
wtFactor = calcControls[hfx+'wtFactor']
# now process derivatives in constraints
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
else:
continue #skip non-histogram entries
if First:
dMdV = np.sqrt(wtFactor)*dMdvh
First = False
else:
dMdV = np.concatenate((dMdV.T,np.sqrt(wtFactor)*dMdvh.T)).T
GetFobsSq(Histograms,Phases,parmDict,calcControls)
pNames,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if np.any(pVals):
dpdv = penaltyDeriv(pNames,pVals,HistoPhases,calcControls,parmDict,varylist)
dMdV = np.concatenate((dMdV.T,(np.sqrt(pWt)*dpdv).T)).T
return dMdV
def HessRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg):
'''Loop over histograms and compute derivatives of the fitting
model (M) with respect to all parameters. For each histogram, the
Jacobian matrix, dMdv, with dimensions (n by m) where n is the
number of parameters and m is the number of data points *in the
histogram*. The (n by n) Hessian is computed from each Jacobian
and it is returned. This routine is used when refinement
derivatives are selected as "analtytic Hessian" in Controls.
:returns: Vec,Hess where Vec is the least-squares vector and Hess is the Hessian
'''
parmDict.update(zip(varylist,values))
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
dependentVars = G2mv.GetDependentVars()
#fixup H atom positions here?
ApplyRBModels(parmDict,Phases,rigidbodyDict) #,Update=True??
Hess = np.empty(0)
Vec = np.empty(0)
histoList = list(Histograms.keys())
histoList.sort()
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
W = wtFactor*w
dy = y-yc
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
useMP,ncores = G2mp.InitMP()
if GSASIIpath.GetConfigValue('Show_timing',False): starttime = time.time()
if useMP:
MPpool = mp.Pool(ncores)
dMdvh = None
depDerivDict = None
# old approach, create all args prior to use
# profArgs = [
# (parmDict,x[xB:xF],varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars,
# i,ncores,histogram) for i in range(ncores)]
# for dmdv,depDerivs in MPpool.imap_unordered(getPowderProfileDervMP,profArgs):
# better, use a generator so arg is created as used
profGenArgs = (
(parmDict,x[xB:xF],varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars,
i,ncores,histogram) for i in range(ncores))
for dmdv,depDerivs in MPpool.imap_unordered(getPowderProfileDervMP,profGenArgs):
if dMdvh is None:
dMdvh = dmdv
depDerivDict = depDerivs
else:
dMdvh += dmdv
for key in depDerivs.keys(): depDerivDict[key] += depDerivs[key]
MPpool.terminate()
else:
dMdvh,depDerivDict = getPowderProfileDervMP([parmDict,x[xB:xF],
varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars,0,1,histogram])
#dMdvh = getPowderProfileDerv(parmDict,x[xB:xF],
# varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars)
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
if GSASIIpath.GetConfigValue('Show_timing',False): print ('getPowderProfileDerv t=%.3f'%(time.time()-starttime))
Wt = ma.sqrt(W[xB:xF])[nxs,:]
Dy = dy[xB:xF][nxs,:]
dMdvh *= Wt
if dlg:
GoOn = dlg.Update(Histogram['Residuals']['wR'],newmsg='Hessian for histogram %d\nAll data Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))
if type(GoOn) is tuple:
if not GoOn[0]:
raise G2obj.G2RefineCancel('Cancel pressed')
elif not GoOn:
raise G2obj.G2RefineCancel('Cancel pressed')
dlg.Raise()
if len(Hess):
Hess += np.inner(dMdvh,dMdvh)
dMdvh *= Wt*Dy
Vec += np.sum(dMdvh,axis=1)
else:
Hess = np.inner(dMdvh,dMdvh)
dMdvh *= Wt*Dy
Vec = np.sum(dMdvh,axis=1)
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
dMdvh,depDerivDict,wdf = dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict)
hId = Histogram['hId']
hfx = ':%d:'%(Histogram['hId'])
wtFactor = calcControls[hfx+'wtFactor']
# now process derivatives in constraints
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
# print 'matrix build time: %.3f'%(time.time()-time0)
if dlg:
GoOn = dlg.Update(Histogram['Residuals']['wR'],newmsg='Hessian for histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))
if type(GoOn) is tuple:
if not GoOn[0]:
raise G2obj.G2RefineCancel('Cancel pressed')
elif not GoOn:
raise G2obj.G2RefineCancel('Cancel pressed')
dlg.Raise()
if len(Hess):
Vec += wtFactor*np.sum(dMdvh*wdf,axis=1)
Hess += wtFactor*np.inner(dMdvh,dMdvh)
else:
Vec = wtFactor*np.sum(dMdvh*wdf,axis=1)
Hess = wtFactor*np.inner(dMdvh,dMdvh)
else:
continue #skip non-histogram entries
GetFobsSq(Histograms,Phases,parmDict,calcControls)
pNames,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if np.any(pVals):
dpdv = penaltyDeriv(pNames,pVals,HistoPhases,calcControls,parmDict,varylist)
Vec -= np.sum(dpdv*pWt*pVals,axis=1)
Hess += np.inner(dpdv*pWt,dpdv)
return Vec,Hess
def errRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg=None):
'''Computes the point-by-point discrepancies between every data point in every histogram
and the observed value. Used in the Jacobian, Hessian & numeric least-squares to compute function
:returns: an np array of differences between observed and computed diffraction values.
'''
Values2Dict(parmDict, varylist, values)
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
M = np.empty(0)
SumwYo = 0
Nobs = 0
Nrej = 0
Next = 0
ApplyRBModels(parmDict,Phases,rigidbodyDict)
#fixup Hatom positions here....
histoList = list(Histograms.keys())
histoList.sort()
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
yc *= 0.0 #zero full calcd profiles
yb *= 0.0
yd *= 0.0
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
yc[xB:xF],yb[xB:xF] = getPowderProfile(parmDict,x[xB:xF],
varylist,Histogram,Phases,calcControls,pawleyLookup,histogram)
yc[xB:xF] += yb[xB:xF]
if not np.any(y): #fill dummy data
try:
rv = st.poisson(yc[xB:xF])
y[xB:xF] = rv.rvs()
except ValueError:
y[xB:xF] = yc[xB:xF]
Z = np.ones_like(yc[xB:xF])
Z[1::2] *= -1
y[xB:xF] = yc[xB:xF]+np.abs(y[xB:xF]-yc[xB:xF])*Z
w[xB:xF] = np.where(y[xB:xF]>0.,1./y[xB:xF],1.0)
yd[xB:xF] = y[xB:xF]-yc[xB:xF]
W = wtFactor*w
wdy = -ma.sqrt(w[xB:xF])*(yd[xB:xF])
Histogram['Residuals']['Durbin-Watson'] = ma.sum(ma.diff(wdy)**2)/ma.sum(wdy**2)
wdy *= wtFactor
Histogram['Residuals']['Nobs'] = ma.count(x[xB:xF])
Nobs += Histogram['Residuals']['Nobs']
Histogram['Residuals']['sumwYo'] = ma.sum(W[xB:xF]*y[xB:xF]**2)
SumwYo += Histogram['Residuals']['sumwYo']
Histogram['Residuals']['R'] = min(100.,ma.sum(ma.abs(yd[xB:xF]))/ma.sum(y[xB:xF])*100.)
Histogram['Residuals']['wR'] = min(100.,ma.sqrt(ma.sum(wdy**2)/Histogram['Residuals']['sumwYo'])*100.)
sumYmB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yb[xB:xF]),0.))
sumwYmB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(y[xB:xF]-yb[xB:xF])**2,0.))
sumYB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF],0.))
sumwYB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF])**2,0.))
Histogram['Residuals']['Rb'] = min(100.,100.*sumYB/sumYmB)
Histogram['Residuals']['wRb'] = min(100.,100.*ma.sqrt(sumwYB2/sumwYmB2))
Histogram['Residuals']['wRmin'] = min(100.,100.*ma.sqrt(Histogram['Residuals']['Nobs']/Histogram['Residuals']['sumwYo']))
if dlg:
GoOn = dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))
if type(GoOn) is tuple:
if not GoOn[0]:
raise G2obj.G2RefineCancel('Cancel pressed')
elif not GoOn:
raise G2obj.G2RefineCancel('Cancel pressed')
dlg.Raise()
M = np.concatenate((M,wdy))
#end of PWDR processing
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
Histogram['Residuals'] = {}
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
pfx = '%d::'%(Phase['pId'])
phfx = '%d:%d:'%(Phase['pId'],hId)
SGData = Phase['General']['SGData']
TwinLaw = calcControls[phfx+'TwinLaw']
im = 0
if parmDict[phfx+'Scale'] < 0.:
parmDict[phfx+'Scale'] = .001
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
A = [parmDict[pfx+'A%d'%(i)] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
refDict = Histogram['Data']
if im:
if len(TwinLaw) > 1:
SStructureFactorTw(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
# print 'sf-calc time: %.3f'%(time.time()-time0)
df = np.zeros(len(refDict['RefList']))
sumwYo = 0
sumFo = 0
sumFo2 = 0
sumFc2 = 0
sumdF = 0
sumdF2 = 0
if im:
sumSSFo = np.zeros(10)
sumSSFo2 = np.zeros(10)
sumSSdF = np.zeros(10)
sumSSdF2 = np.zeros(10)
sumSSwYo = np.zeros(10)
sumSSwdf2 = np.zeros(10)
SSnobs = np.zeros(10)
nobs = 0
nrej = 0
next = 0
maxH = 0
if calcControls['F**2']:
for i,ref in enumerate(refDict['RefList']):
if ref[6+im] > 0:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
w = 1.0/ref[6+im] # 1/sig(F^2)
ref[7+im] *= parmDict[phfx+'Scale']*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
if UserRejectHKL(ref,im,calcControls['UsrReject']) and ref[3+im]: #skip sp.gp. absences (mul=0)
ref[3+im] = abs(ref[3+im]) #mark as allowed
Fo = np.sqrt(ref[5+im])
sumFo += Fo
sumFo2 += ref[5+im]
sumFc2 += ref[7+im]
sumdF += abs(Fo-np.sqrt(ref[7+im]))
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(ref[5+im]-ref[7+im])
sumwYo += (w*ref[5+im])**2 #w*Fo^2
if im: #accumulate super lattice sums
ind = int(abs(ref[3]))
sumSSFo[ind] += Fo
sumSSFo2[ind] += ref[5+im]
sumSSdF[ind] += abs(Fo-np.sqrt(ref[7+im]))
sumSSdF2[ind] += abs(ref[5+im]-ref[7+im])
sumSSwYo[ind] += (w*ref[5+im])**2 #w*Fo^2
sumSSwdf2[ind] += df[i]**2
SSnobs[ind] += 1
maxH = max(maxH,ind)
else:
if ref[3+im]:
ref[3+im] = -abs(ref[3+im]) #mark as rejected
nrej += 1
else: #sp.gp.extinct
next += 1
else:
for i,ref in enumerate(refDict['RefList']):
if ref[5+im] > 0.:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
ref[7+im] *= parmDict[phfx+'Scale']*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
Fo = np.sqrt(ref[5+im])
Fc = np.sqrt(ref[7+im])
w = 2.0*Fo/ref[6+im] # 1/sig(F)?
if UserRejectHKL(ref,im,calcControls['UsrReject']) and ref[3+im]: #skip sp.gp. absences (mul=0)
ref[3+im] = abs(ref[3+im]) #mark as allowed
sumFo += Fo
sumFo2 += ref[5+im]
sumFc2 += ref[7+im]
sumdF += abs(Fo-Fc)
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(Fo-Fc)
sumwYo += (w*Fo)**2
if im:
ind = int(abs(ref[3]))
sumSSFo[ind] += Fo
sumSSFo2[ind] += ref[5+im]
sumSSdF[ind] += abs(Fo-Fc)
sumSSdF2[ind] += abs(ref[5+im]-ref[7+im])
sumSSwYo[ind] += (w*Fo)**2
sumSSwdf2[ind] += df[i]**2
SSnobs[ind] += 1
maxH = max(maxH,ind)
else:
if ref[3+im]:
ref[3+im] = -abs(ref[3+im]) #mark as rejected
nrej += 1
else: #sp.gp.extinct
next += 1
Scale = sumFo2/sumFc2
if (Scale < 0.8 or Scale > 1.2) and phfx+'Scale' in varylist:
print ('New scale: %.4f'%(Scale*parmDict[phfx+'Scale']))
indx = varylist.index(phfx+'Scale')
values[indx] = Scale*parmDict[phfx+'Scale']
Histogram['Residuals']['Nobs'] = nobs
Histogram['Residuals']['sumwYo'] = sumwYo
SumwYo += sumwYo
Histogram['Residuals']['wR'] = min(100.,np.sqrt(np.sum(df**2)/sumwYo)*100.)
Histogram['Residuals'][phfx+'Rf'] = 100.*sumdF/sumFo
Histogram['Residuals'][phfx+'Rf^2'] = 100.*sumdF2/sumFo2
Histogram['Residuals'][phfx+'Nref'] = nobs
Histogram['Residuals'][phfx+'Nrej'] = nrej
Histogram['Residuals'][phfx+'Next'] = next
if im:
Histogram['Residuals'][phfx+'SSRf'] = 100.*sumSSdF[:maxH+1]/sumSSFo[:maxH+1]
Histogram['Residuals'][phfx+'SSRf^2'] = 100.*sumSSdF2[:maxH+1]/sumSSFo2[:maxH+1]
Histogram['Residuals'][phfx+'SSNref'] = SSnobs[:maxH+1]
Histogram['Residuals']['SSwR'] = np.sqrt(sumSSwdf2[:maxH+1]/sumSSwYo[:maxH+1])*100.
Nobs += nobs
Nrej += nrej
Next += next
if dlg:
GoOn = dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))
if type(GoOn) is tuple:
if not GoOn[0]:
raise G2obj.G2RefineCancel('Cancel pressed')
elif not GoOn:
raise G2obj.G2RefineCancel('Cancel pressed')
dlg.Raise()
M = np.concatenate((M,wtFactor*df))
# end of HKLF processing
# GetFobsSq(Histograms,Phases,parmDict,calcControls)
Histograms['sumwYo'] = SumwYo
Histograms['Nobs'] = Nobs
Histograms['Nrej'] = Nrej
Histograms['Next'] = Next
Rw = min(100.,np.sqrt(np.sum(M**2)/SumwYo)*100.)
if dlg:
GoOn = dlg.Update(Rw,newmsg='%s%8.3f%s'%('All data Rw =',Rw,'%'))
if type(GoOn) is tuple:
if not GoOn[0]:
parmDict['saved values'] = values
raise G2obj.G2RefineCancel('Cancel pressed')
elif not GoOn:
parmDict['saved values'] = values
raise G2obj.G2RefineCancel('Cancel pressed')
dlg.Raise()
pDict,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if len(pVals):
pSum = np.sum(pWt*pVals**2)
for name in pWsum:
if pWsum[name]:
print (' Penalty function for %5d %8ss = %12.5g'%(pWnum[name],name,pWsum[name]))
print ('Total penalty function: %12.5g on %d terms'%(pSum,len(pVals)))
Nobs += len(pVals)
M = np.concatenate((M,np.sqrt(pWt)*pVals))
return M | PypiClean |
/OBP_reliability_pillar_u1-0.2.1-py3-none-any.whl/OBP_reliability_pillar_u1/dynamodb/dynamodb_pitr_enabled.py | import logging
from botocore.exceptions import ClientError
from OBP_reliability_pillar_u1.dynamodb.utils import list_dynamodb_tables
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# Checks compliance for dynamodb-pitr-enabled
def dynamodb_pitr_enabled(self, regions) -> dict:
"""
:param regions:
:param self:
:return:
"""
logger.info(" ---Inside dynamodb :: dynamodb_pitr_enabled()")
self.refresh_session()
result = True
failReason = ''
offenders = []
control_id = 'Id5.5'
compliance_type = "Dynamodb pitr enabled"
description = "Checks that point in time recovery (PITR) is enabled for Amazon DynamoDB tables"
resource_type = "Dynamodb"
risk_level = 'Medium'
# regions = self.session.get_available_regions('dynamodb')
for region in regions:
try:
client = self.session.client('dynamodb', region_name=region)
dynamodb_tables = list_dynamodb_tables(client)
for table in dynamodb_tables:
response = client.describe_continuous_backups(
TableName=table
)
try:
status = response['ContinuousBackupsDescription']['PointInTimeRecoveryDescription']['PointInTimeRecoveryStatus']
if status == 'DISABLED':
result = False
failReason = "PITR is disabled"
offenders.append(table)
except KeyError:
result = False
failReason = "PITR is disabled"
offenders.append(table)
except ClientError as e:
logger.error("Something went wrong with region {}: {}".format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'ControlId': control_id,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level
} | PypiClean |
/Dabo-0.9.16.tar.gz/Dabo-0.9.16/dabo/dReportWriter.py | import dabo
from dabo.dLocalize import _
from dabo.lib.reportWriter import ReportWriter
from dabo.dObject import dObject
import dabo.dEvents as dEvents
# dReportWriter is simply a raw ReportWriter/dObject mixin:
class dReportWriter(dObject, ReportWriter):
"""The Dabo Report Writer Engine, which mixes a data cursor and a report
format file (.rfxml) to output a PDF.
For each row in the Cursor, a detail band is printed. For each page in the
report, the pageBackground, pageHeader, pageFooter, and pageForeground
bands are printed. For each defined grouping, the groupHeader and groupFooter
bands are printed.
Report variables can be defined as accumulators, or for any purpose you
need. All properties of the report form are evaluated at runtime, so that
you can achieve full flexibility and ultimate control.
There is also a pure-python interface available.
"""
def _onReportCancel(self):
super(dReportWriter, self)._onReportCancel()
self.raiseEvent(dEvents.ReportCancel)
self._hideProgress()
def _onReportBegin(self):
super(dReportWriter, self)._onReportBegin()
self.raiseEvent(dEvents.ReportBegin)
self._showProgress()
def _onReportEnd(self):
super(dReportWriter, self)._onReportEnd()
self.raiseEvent(dEvents.ReportEnd)
self._updateProgress(force=True)
#self._hideProgress() ## Let the form controlling the progress gauge do this (less blinky)
def _onReportIteration(self):
super(dReportWriter, self)._onReportIteration()
self.raiseEvent(dEvents.ReportIteration)
self._updateProgress()
def _showProgress(self):
win = self.ProgressControl
if win:
win.Caption = "Processing %s..." % self.ReportForm.getProp("Title")
win.updateProgress(0, len(self.Cursor))
win.show()
win.Form.fitToSizer()
def _updateProgress(self, force=False):
if force or self.RecordNumber % 10 == 0:
win = self.ProgressControl
if win:
win.updateProgress(self.RecordNumber, len(self.Cursor))
dabo.ui.yieldUI(_safe=True)
def _hideProgress(self):
win = self.ProgressControl
if win:
win.hide()
win.Form.fitToSizer()
dabo.ui.yieldUI(_safe=True)
def _getEncoding(self):
try:
v = self._encoding
except AttributeError:
v = dabo.getEncoding()
self._encoding = v
return v
def _setEncoding(self, val):
self._encoding = val
def _getHomeDirectory(self):
try:
v = self._homeDirectory
except AttributeError:
v = self._homeDirectory = self.Application.HomeDirectory
return v
def _setHomeDirectory(self, val):
self._homeDirectory = val
def _getProgressControl(self):
try:
v = self._progressControl
except AttributeError:
v = self._progressControl = None
return v
def _setProgressControl(self, val):
self._progressControl = val
Encoding = property(_getEncoding, _setEncoding, None,
_("Specifies the encoding for unicode strings. (str)"))
HomeDirectory = property(_getHomeDirectory, _setHomeDirectory, None,
_("""Specifies the home directory for the report.
Resources on disk (image files, etc.) will be looked for relative to the
HomeDirectory if specified with relative pathing. The HomeDirectory should
be the directory that contains the report form file. If you set
self.ReportFormFile, HomeDirectory will be set for you automatically.
Otherwise, it will get set to self.Application.HomeDirectory."""))
ProgressControl = property(_getProgressControl, _setProgressControl, None,
_("""Specifies the control to receive progress updates.
The specified control will be updated with every record processed. It must have
a updateProgress(current_row, num_rows) method.
For the default control, use dabo.ui.dReportProgress.
"""))
if __name__ == "__main__":
## run a test:
rw = dReportWriter(Name="dReportWriter1", OutputFile="./dRW-test.pdf")
print rw.Name, rw.Application
xml = """
<report>
<title>Test Report from dReportWriter</title>
<testcursor iid="int" cArtist="str">
<record iid="1" cArtist='"The Clash"' />
<record iid="2" cArtist='"Queen"' />
<record iid="3" cArtist='"Metallica"' />
<record iid="3" cArtist='"The Boomtown Rats"' />
</testcursor>
<page>
<size>"letter"</size>
<orientation>"portrait"</orientation>
<marginLeft>".5 in"</marginLeft>
<marginRight>".5 in"</marginRight>
<marginTop>".5 in"</marginTop>
<marginBottom>".5 in"</marginBottom>
</page>
<pageHeader>
<height>"0.5 in"</height>
<objects>
<string>
<expr>self.ReportForm["title"]</expr>
<align>"center"</align>
<x>"3.75 in"</x>
<y>".3 in"</y>
<hAnchor>"center"</hAnchor>
<width>"6 in"</width>
<height>".25 in"</height>
<borderWidth>"0 pt"</borderWidth>
<fontName>"Helvetica"</fontName>
<fontSize>14</fontSize>
</string>
</objects>
</pageHeader>
<pageFooter>
<height>"0.75 in"</height>
<objects>
<string>
<expr>"(also see the test in dabo/lib/reporting)"</expr>
<align>"right"</align>
<hAnchor>"right"</hAnchor>
<x>self.Bands["pageFooter"]["width"]-1</x>
<y>"0 in"</y>
<width>"6 in"</width>
</string>
</objects>
</pageFooter>
<detail>
<height>".25 in"</height>
<objects>
<string>
<expr>self.Record['cArtist']</expr>
<width>"6 in"</width>
<x>"1.25 in"</x>
</string>
</objects>
</detail>
<pageBackground></pageBackground>
</report>
"""
rw.ReportFormXML = xml
rw.UseTestCursor = True
rw.write() | PypiClean |
/ConStrain-0.3.0.tar.gz/ConStrain-0.3.0/versioneer.py | # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring
# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements
# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error
# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with
# pylint:disable=attribute-defined-outside-init,too-many-arguments
import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%%s*" %% tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if my_path.endswith(".pyc") or my_path.endswith(".pyo"):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.22) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if 'build_ext' in cmds:
_build_ext = cmds['build_ext']
elif "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | PypiClean |
/ABR-0.0.10.tar.gz/ABR-0.0.10/abr/app.py | import logging
logging.basicConfig(level=logging.INFO)
import argparse
from collections import Counter
from pathlib import Path
from matplotlib import pylab as pl
from numpy import random
import pandas as pd
from scipy import stats
from atom.api import Bool, Typed, Str
import enaml
from enaml.application import deferred_call
from enaml.core.api import d_, Declarative
from enaml.qt.qt_application import QtApplication
with enaml.imports():
from abr.launch_window import LaunchWindow
from abr.main_window import (CompareWindow, DNDWindow, load_files,
SerialWindow)
from abr.presenter import SerialWaveformPresenter, WaveformPresenter
from abr.parsers import Parser
P_LATENCIES = {
1: stats.norm(1.5, 0.5),
2: stats.norm(2.5, 1),
3: stats.norm(3.0, 1),
4: stats.norm(4.0, 1),
5: stats.norm(5.0, 2),
}
def add_default_arguments(parser, waves=True):
parser.add_argument('--nofilter', action='store_false', dest='filter',
default=True, help='Do not filter waveform')
parser.add_argument('--lowpass',
help='Lowpass cutoff (Hz), default 3000 Hz',
default=3000, type=float)
parser.add_argument('--highpass',
help='Highpass cutoff (Hz), default 300 Hz',
default=300, type=float)
parser.add_argument('--order',
help='Filter order, default 1st order', default=1,
type=int)
parser.add_argument('--parser', default='EPL', help='Parser to use')
parser.add_argument('--user', help='Name of person analyzing data')
if waves:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--threshold-only', action='store_true')
group.add_argument('--all-waves', action='store_true')
group.add_argument('--waves', type=int, nargs='+')
def parse_args(parser, waves=True):
options = parser.parse_args()
exclude = ('filter', 'lowpass', 'highpass', 'order', 'parser', 'user',
'waves', 'all_waves', 'threshold_only')
new_options = {k: v for k, v in vars(options).items() if k not in exclude}
filter_settings = None
if options.filter:
filter_settings = {
'lowpass': options.lowpass,
'highpass': options.highpass,
'order': options.order,
}
new_options['parser'] = Parser(options.parser, filter_settings,
options.user)
if not waves:
return new_options
if options.all_waves:
waves = [1, 2, 3, 4, 5]
elif options.threshold_only:
waves = []
else:
waves = options.waves[:]
new_options['latencies'] = {w: P_LATENCIES[w] for w in waves}
return new_options
def main_launcher():
app = QtApplication()
window = LaunchWindow()
window.show()
app.start()
app.stop()
def main_gui():
parser = argparse.ArgumentParser('abr_gui')
add_default_arguments(parser)
parser.add_argument('--demo', action='store_true', dest='demo',
default=False, help='Load demo data')
parser.add_argument('filenames', nargs='*')
options = parse_args(parser)
app = QtApplication()
view = DNDWindow(parser=options['parser'], latencies=options['latencies'])
deferred_call(load_files, options['parser'], options['latencies'],
options['filenames'], view.find('dock_area'))
view.show()
app.start()
app.stop()
def main_batch():
parser = argparse.ArgumentParser("abr_batch")
add_default_arguments(parser)
parser.add_argument('dirnames', nargs='*')
parser.add_argument('--skip-errors', action='store_true')
options = parse_args(parser)
parser = options['parser']
app = QtApplication()
presenter = SerialWaveformPresenter(parser=parser,
latencies=options['latencies'],
paths=options['dirnames'])
view = SerialWindow(presenter=presenter)
view.show()
app.start()
app.stop()
class Compare(Declarative):
data = Typed(pd.DataFrame)
x_column = d_(Str())
y_column = d_(Str())
as_difference = d_(Bool(True))
jitter = d_(Bool(True))
axes = Typed(pl.Axes)
figure = Typed(pl.Figure)
selected = Typed(list)
def _default_figure(self):
return pl.Figure()
def _default_axes(self):
return self.figure.add_subplot(111)
def _observe_data(self, event):
self._update_plot()
def _observe_x_column(self, event):
self._update_plot()
def _observe_y_column(self, event):
self._update_plot()
def _observe_as_difference(self, event):
self._update_plot()
def _observe_jitter(self, event):
self._update_plot()
def _default_x_column(self):
return self.data.columns[0]
def _default_y_column(self):
i = 1 if (len(self.data.columns) > 1) else 0
return self.data.columns[i]
def _update_plot(self):
x = self.data[self.x_column].copy()
y = self.data[self.y_column].copy()
if self.as_difference:
y -= x
if self.jitter:
x += np.random.uniform(-1, 1, len(x))
y += np.random.uniform(-1, 1, len(x))
self.axes.clear()
self.axes.plot(x, y, 'ko', picker=4, mec='w', mew=1)
if self.figure.canvas is not None:
self.figure.canvas.draw()
def pick_handler(self, event):
rows = self.data.iloc[event.ind]
files = list(rows.index.get_level_values('raw_file'))
frequencies = list(rows.index.get_level_values('frequency'))
self.selected = list(zip(files, frequencies))
def main_compare():
parser = argparse.ArgumentParser("abr_compare")
add_default_arguments(parser, waves=False)
parser.add_argument('directory')
options = parse_args(parser, waves=False)
data = options['parser'].load_analyses(options['directory'])
data = data.reset_index(['analyzed_file'], drop=True).unstack('user')
data = data.sort_index()
figure, axes = pl.subplots(1, 1)
compare = Compare(data=data)
app = QtApplication()
view = CompareWindow(parser=options['parser'], compare=compare)
view.show()
app.start()
app.stop() | PypiClean |
/GenomeTreeTk-0.1.6.tar.gz/GenomeTreeTk-0.1.6/genometreetk/infer_workflow.py |
import os
import logging
from biolib.common import make_sure_path_exists
from biolib.misc.time_keeper import TimeKeeper
from biolib.external.hmmer import HmmModelParser
from biolib.external.fasttree import FastTree
from genometreetk.common import (read_genome_id_file,
read_genome_dir_file,
read_marker_id_file,
create_concatenated_alignment)
from genometreetk.markers.align_markers import AlignMarkers
class InferWorkflow(object):
"""Infer phylogenetic tree from concatenated marker genes."""
def __init__(self, genome_dir_file, pfam_model_file, tigrfams_model_dir, cpus):
"""Initialization.
Parameters
----------
genome_dir_file : str
File specifying directory for each genome.
pfam_model_file : str
File containing Pfam HMMs.
tigrfams_model_dir : str
Directory containing TIGRFAMs HMMs.
cpus : int
Number of cpus to use.
"""
self.logger = logging.getLogger()
self.genome_dir_file = genome_dir_file
self.pfam_model_file = pfam_model_file
self.tigrfams_model_dir = tigrfams_model_dir
self.cpus = cpus
def _fetch_marker_models(self, marker_genes, hmm_model_out, hmm_info_out, output_model_dir):
"""Save PFAM and TIGRFAM marker genes into a single HMM model file.
Parameters
----------
marker_genes : iterable
Marker genes to fetch.
hmm_model_out : str
File to containing phylogenetically informative HMMs.
hmm_info_out : str
File to contain information about HMMs.
output_model_dir : str
Directory to write individual HMM model files.
"""
marker_id_to_name = {}
for line in open(self.pfam_model_file):
if 'NAME' in line:
name = line.split()[1].rstrip()
elif 'ACC' in line:
acc = line.split()[1].rstrip()
marker_id_to_name[acc] = name
fout_model = open(hmm_model_out, 'w')
for marker_id in marker_genes:
output_model_file = os.path.join(output_model_dir, marker_id + '.hmm')
if 'PF' in marker_id:
os.system('hmmfetch ' + self.pfam_model_file + ' ' + marker_id_to_name[marker_id] + ' > ' + output_model_file)
else:
input_model_file = os.path.join(self.tigrfams_model_dir, marker_id + '.HMM')
os.system('hmmfetch ' + input_model_file + ' ' + marker_id + ' > ' + output_model_file)
# write model to file
for line in open(output_model_file):
fout_model.write(line)
fout_model.close()
self.logger.info(' HMM models written to: ' + hmm_model_out)
# read HMM model metadata
hmm_model_parse = HmmModelParser(hmm_model_out)
hmm_models = hmm_model_parse.models()
fout_info = open(hmm_info_out, 'w')
fout_info.write('Model Accession\tName\tDescription\tLength\n')
for model in hmm_models.values():
fout_info.write('%s\t%s\t%s\t%s\n' % (model.acc, model.name, model.desc, model.leng))
fout_info.close()
self.logger.info(' HMM information written to: ' + hmm_info_out)
def run(self, genome_id_file,
marker_id_file,
model,
output_dir):
"""Identify phylogenetic tree.
Parameters
----------
genome_id_file : str
File specifying unique ids of genomes to include in tree.
marker_id_file : str
File specifying unique ids of marker genes to use for inference.
model : str ['wag' or 'jtt']
Model of evolution to use.
output_dir : str
Directory to store results.
"""
time_keeper = TimeKeeper()
output_alignment_dir = os.path.join(output_dir, 'alignments')
make_sure_path_exists(output_alignment_dir)
output_model_dir = os.path.join(output_dir, 'hmm_models')
make_sure_path_exists(output_model_dir)
# read directory for each genome
genome_dirs = read_genome_dir_file(self.genome_dir_file)
# read genomes within the ingroup
ncbi_genome_ids, user_genome_ids = read_genome_id_file(genome_id_file)
genome_ids = ncbi_genome_ids.union(user_genome_ids)
self.logger.info('Inferring tree for %d genomes.' % len(genome_ids))
self.logger.info('NCBI genomes: %d' % len(ncbi_genome_ids))
self.logger.info('User genomes: %d' % len(user_genome_ids))
# get marker genes
self.logger.info('Reading marker genes.')
marker_genes = read_marker_id_file(marker_id_file)
self.logger.info('Read %d marker genes.' % len(marker_genes))
# gather all single-copy HMMs into a single model file
hmm_model_out = os.path.join(output_dir, 'phylo.hmm')
hmm_info_out = os.path.join(output_dir, 'phylo.tsv')
self.logger.info('Generating marker gene HMM model files.')
self._fetch_marker_models(marker_genes, hmm_model_out, hmm_info_out, output_model_dir)
# align gene sequences
align_markers = AlignMarkers(self.cpus)
align_markers.run(genome_ids, genome_dirs, marker_genes, True, output_alignment_dir, output_model_dir)
# create concatenated alignment file
self.logger.info('Concatenating alignments.')
concatenated_alignment_file = os.path.join(output_dir, 'concatenated_alignment.faa')
marker_file = os.path.join(output_dir, 'concatenated_markers.tsv')
create_concatenated_alignment(genome_ids, marker_genes, output_alignment_dir, concatenated_alignment_file, marker_file)
# create concatenated genome tree
self.logger.info('Inferring concatenated genome tree.')
concatenated_tree = os.path.join(output_dir, 'concatenated.tree')
concatenated_tree_log = os.path.join(output_dir, 'concatenated.tree.log')
log_file = os.path.join(output_dir, 'fasttree.log')
fast_tree = FastTree(multithreaded=True)
fast_tree.run(concatenated_alignment_file, 'prot', model, concatenated_tree, concatenated_tree_log, log_file)
# generate summary report
report_out = os.path.join(output_dir, 'infer_workflow.log')
fout = open(report_out, 'w')
fout.write('[infer]\n')
fout.write('Genome Id file: %s\n' % genome_id_file)
fout.write('Marker Id file: %s\n' % marker_id_file)
fout.write('Model of evolution: %s\n' % model)
fout.write(time_keeper.get_time_stamp())
fout.close() | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_wal.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u121b\u1208\u12f6",
"\u1243\u121b"
],
"DAY": [
"\u12c8\u130b",
"\u1233\u12ed\u1296",
"\u121b\u1246\u1233\u129b",
"\u12a0\u1229\u12cb",
"\u1203\u1219\u1233",
"\u12a0\u122d\u1263",
"\u1244\u122b"
],
"MONTH": [
"\u1303\u1295\u12e9\u12c8\u122a",
"\u134c\u1265\u1229\u12c8\u122a",
"\u121b\u122d\u127d",
"\u12a4\u1355\u1228\u120d",
"\u121c\u12ed",
"\u1301\u1295",
"\u1301\u120b\u12ed",
"\u12a6\u1308\u1235\u1275",
"\u1234\u1355\u1274\u121d\u1260\u122d",
"\u12a6\u12ad\u1270\u12cd\u1260\u122d",
"\u1296\u126c\u121d\u1260\u122d",
"\u12f2\u1234\u121d\u1260\u122d"
],
"SHORTDAY": [
"\u12c8\u130b",
"\u1233\u12ed\u1296",
"\u121b\u1246\u1233\u129b",
"\u12a0\u1229\u12cb",
"\u1203\u1219\u1233",
"\u12a0\u122d\u1263",
"\u1244\u122b"
],
"SHORTMONTH": [
"\u1303\u1295\u12e9",
"\u134c\u1265\u1229",
"\u121b\u122d\u127d",
"\u12a4\u1355\u1228",
"\u121c\u12ed",
"\u1301\u1295",
"\u1301\u120b\u12ed",
"\u12a6\u1308\u1235",
"\u1234\u1355\u1274",
"\u12a6\u12ad\u1270",
"\u1296\u126c\u121d",
"\u12f2\u1234\u121d"
],
"fullDate": "EEEE\u1365 dd MMMM \u130b\u120b\u1233 y G",
"longDate": "dd MMMM y",
"medium": "dd-MMM-y h:mm:ss a",
"mediumDate": "dd-MMM-y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/yy h:mm a",
"shortDate": "dd/MM/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "Birr",
"DECIMAL_SEP": ".",
"GROUP_SEP": "\u2019",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "wal",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/livescript/livescript.js | (function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode('livescript', function(){
var tokenBase = function(stream, state) {
var next_rule = state.next || "start";
if (next_rule) {
state.next = state.next;
var nr = Rules[next_rule];
if (nr.splice) {
for (var i$ = 0; i$ < nr.length; ++i$) {
var r = nr[i$];
if (r.regex && stream.match(r.regex)) {
state.next = r.next || state.next;
return r.token;
}
}
stream.next();
return 'error';
}
if (stream.match(r = Rules[next_rule])) {
if (r.regex && stream.match(r.regex)) {
state.next = r.next;
return r.token;
} else {
stream.next();
return 'error';
}
}
}
stream.next();
return 'error';
};
var external = {
startState: function(){
return {
next: 'start',
lastToken: {style: null, indent: 0, content: ""}
};
},
token: function(stream, state){
while (stream.pos == stream.start)
var style = tokenBase(stream, state);
state.lastToken = {
style: style,
indent: stream.indentation(),
content: stream.current()
};
return style.replace(/\./g, ' ');
},
indent: function(state){
var indentation = state.lastToken.indent;
if (state.lastToken.content.match(indenter)) {
indentation += 2;
}
return indentation;
}
};
return external;
});
var identifier = '(?![\\d\\s])[$\\w\\xAA-\\uFFDC](?:(?!\\s)[$\\w\\xAA-\\uFFDC]|-[A-Za-z])*';
var indenter = RegExp('(?:[({[=:]|[-~]>|\\b(?:e(?:lse|xport)|d(?:o|efault)|t(?:ry|hen)|finally|import(?:\\s*all)?|const|var|let|new|catch(?:\\s*' + identifier + ')?))\\s*$');
var keywordend = '(?![$\\w]|-[A-Za-z]|\\s*:(?![:=]))';
var stringfill = {
token: 'string',
regex: '.+'
};
var Rules = {
start: [
{
token: 'comment.doc',
regex: '/\\*',
next: 'comment'
}, {
token: 'comment',
regex: '#.*'
}, {
token: 'keyword',
regex: '(?:t(?:h(?:is|row|en)|ry|ypeof!?)|c(?:on(?:tinue|st)|a(?:se|tch)|lass)|i(?:n(?:stanceof)?|mp(?:ort(?:\\s+all)?|lements)|[fs])|d(?:e(?:fault|lete|bugger)|o)|f(?:or(?:\\s+own)?|inally|unction)|s(?:uper|witch)|e(?:lse|x(?:tends|port)|val)|a(?:nd|rguments)|n(?:ew|ot)|un(?:less|til)|w(?:hile|ith)|o[fr]|return|break|let|var|loop)' + keywordend
}, {
token: 'constant.language',
regex: '(?:true|false|yes|no|on|off|null|void|undefined)' + keywordend
}, {
token: 'invalid.illegal',
regex: '(?:p(?:ackage|r(?:ivate|otected)|ublic)|i(?:mplements|nterface)|enum|static|yield)' + keywordend
}, {
token: 'language.support.class',
regex: '(?:R(?:e(?:gExp|ferenceError)|angeError)|S(?:tring|yntaxError)|E(?:rror|valError)|Array|Boolean|Date|Function|Number|Object|TypeError|URIError)' + keywordend
}, {
token: 'language.support.function',
regex: '(?:is(?:NaN|Finite)|parse(?:Int|Float)|Math|JSON|(?:en|de)codeURI(?:Component)?)' + keywordend
}, {
token: 'variable.language',
regex: '(?:t(?:hat|il|o)|f(?:rom|allthrough)|it|by|e)' + keywordend
}, {
token: 'identifier',
regex: identifier + '\\s*:(?![:=])'
}, {
token: 'variable',
regex: identifier
}, {
token: 'keyword.operator',
regex: '(?:\\.{3}|\\s+\\?)'
}, {
token: 'keyword.variable',
regex: '(?:@+|::|\\.\\.)',
next: 'key'
}, {
token: 'keyword.operator',
regex: '\\.\\s*',
next: 'key'
}, {
token: 'string',
regex: '\\\\\\S[^\\s,;)}\\]]*'
}, {
token: 'string.doc',
regex: '\'\'\'',
next: 'qdoc'
}, {
token: 'string.doc',
regex: '"""',
next: 'qqdoc'
}, {
token: 'string',
regex: '\'',
next: 'qstring'
}, {
token: 'string',
regex: '"',
next: 'qqstring'
}, {
token: 'string',
regex: '`',
next: 'js'
}, {
token: 'string',
regex: '<\\[',
next: 'words'
}, {
token: 'string.regex',
regex: '//',
next: 'heregex'
}, {
token: 'string.regex',
regex: '\\/(?:[^[\\/\\n\\\\]*(?:(?:\\\\.|\\[[^\\]\\n\\\\]*(?:\\\\.[^\\]\\n\\\\]*)*\\])[^[\\/\\n\\\\]*)*)\\/[gimy$]{0,4}',
next: 'key'
}, {
token: 'constant.numeric',
regex: '(?:0x[\\da-fA-F][\\da-fA-F_]*|(?:[2-9]|[12]\\d|3[0-6])r[\\da-zA-Z][\\da-zA-Z_]*|(?:\\d[\\d_]*(?:\\.\\d[\\d_]*)?|\\.\\d[\\d_]*)(?:e[+-]?\\d[\\d_]*)?[\\w$]*)'
}, {
token: 'lparen',
regex: '[({[]'
}, {
token: 'rparen',
regex: '[)}\\]]',
next: 'key'
}, {
token: 'keyword.operator',
regex: '\\S+'
}, {
token: 'text',
regex: '\\s+'
}
],
heregex: [
{
token: 'string.regex',
regex: '.*?//[gimy$?]{0,4}',
next: 'start'
}, {
token: 'string.regex',
regex: '\\s*#{'
}, {
token: 'comment.regex',
regex: '\\s+(?:#.*)?'
}, {
token: 'string.regex',
regex: '\\S+'
}
],
key: [
{
token: 'keyword.operator',
regex: '[.?@!]+'
}, {
token: 'identifier',
regex: identifier,
next: 'start'
}, {
token: 'text',
regex: '',
next: 'start'
}
],
comment: [
{
token: 'comment.doc',
regex: '.*?\\*/',
next: 'start'
}, {
token: 'comment.doc',
regex: '.+'
}
],
qdoc: [
{
token: 'string',
regex: ".*?'''",
next: 'key'
}, stringfill
],
qqdoc: [
{
token: 'string',
regex: '.*?"""',
next: 'key'
}, stringfill
],
qstring: [
{
token: 'string',
regex: '[^\\\\\']*(?:\\\\.[^\\\\\']*)*\'',
next: 'key'
}, stringfill
],
qqstring: [
{
token: 'string',
regex: '[^\\\\"]*(?:\\\\.[^\\\\"]*)*"',
next: 'key'
}, stringfill
],
js: [
{
token: 'string',
regex: '[^\\\\`]*(?:\\\\.[^\\\\`]*)*`',
next: 'key'
}, stringfill
],
words: [
{
token: 'string',
regex: '.*?\\]>',
next: 'key'
}, stringfill
]
};
for (var idx in Rules) {
var r = Rules[idx];
if (r.splice) {
for (var i = 0, len = r.length; i < len; ++i) {
var rr = r[i];
if (typeof rr.regex === 'string') {
Rules[idx][i].regex = new RegExp('^' + rr.regex);
}
}
} else if (typeof rr.regex === 'string') {
Rules[idx].regex = new RegExp('^' + r.regex);
}
}
CodeMirror.defineMIME('text/x-livescript', 'livescript');
}); | PypiClean |
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/lib/contacts.py | import re
import dns
from dns.exception import DNSException
import json
import traceback
import sys
from . import bitcoin
from . import dnssec
from .util import export_meta, import_meta, print_error, to_string
class Contacts(dict):
def __init__(self, storage):
self.storage = storage
d = self.storage.get('contacts', {})
try:
self.update(d)
except:
return
# backward compatibility
for k, v in self.items():
_type, n = v
if _type == 'address' and bitcoin.is_address(n):
self.pop(k)
self[n] = ('address', k)
def save(self):
self.storage.put('contacts', dict(self))
def import_file(self, path):
import_meta(path, self._validate, self.load_meta)
def load_meta(self, data):
self.update(data)
self.save()
def export_file(self, filename):
export_meta(self, filename)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.save()
def pop(self, key):
if key in self.keys():
dict.pop(self, key)
self.save()
def resolve(self, k):
if bitcoin.is_address(k):
return {
'address': k,
'type': 'address'
}
if k in self.keys():
_type, addr = self[k]
if _type == 'address':
return {
'address': addr,
'type': 'contact'
}
out = self.resolve_openalias(k)
if out:
address, name, validated = out
return {
'address': address,
'name': name,
'type': 'openalias',
'validated': validated
}
raise Exception("Invalid Zcash address or alias", k)
def resolve_openalias(self, url):
# support email-style addresses, per the OA standard
url = url.replace('@', '.')
try:
records, validated = dnssec.query(url, dns.rdatatype.TXT)
except DNSException as e:
print_error('Error resolving openalias: ', str(e))
return None
prefix = 'zcash'
for record in records:
string = to_string(record.strings[0], 'utf8')
if string.startswith('oa1:' + prefix):
address = self.find_regex(string, r'recipient_address=([A-Za-z0-9]+)')
name = self.find_regex(string, r'recipient_name=([^;]+)')
if not name:
name = address
if not address:
continue
return address, name, validated
def find_regex(self, haystack, needle):
regex = re.compile(needle)
try:
return regex.search(haystack).groups()[0]
except AttributeError:
return None
def _validate(self, data):
for k, v in list(data.items()):
if k == 'contacts':
return self._validate(v)
if not bitcoin.is_address(k):
data.pop(k)
else:
_type, _ = v
if _type != 'address':
data.pop(k)
return data | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/geometry/shapes/torus.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import cos
from math import pi
from math import sin
from compas.geometry import matrix_from_frame
from compas.geometry import transform_points
from compas.geometry import Frame
from compas.geometry import Plane
from ._shape import Shape
class Torus(Shape):
"""A torus is defined by a plane and two radii.
Parameters
----------
plane : [point, normal] | :class:`~compas.geometry.Plane`
The plane of the torus.
radius_axis: float
The radius of the axis.
radius_pipe: float
The radius of the pipe.
Attributes
----------
plane : :class:`~compas.geometry.Plane`
The torus' plane.
radius_axis : float
The radius of the axis.
radius_pipe : float
The radius of the pipe.
center : :class:`~compas.geometry.Point`, read-only
The centre of the torus.
area : float, read-only
The surface area of the torus.
volume : float, read-only
The volume of the torus.
Examples
--------
>>> from compas.geometry import Plane
>>> from compas.geometry import Torus
>>> torus = Torus(Plane.worldXY(), 5., 2.)
>>> from compas.geometry import Plane
>>> from compas.geometry import Torus
>>> torus = Torus(Plane.worldXY(), 5, 2)
>>> sdict = {'plane': Plane.worldXY().data, 'radius_axis': 5., 'radius_pipe': 2.}
>>> sdict == torus.data
True
"""
__slots__ = ["_plane", "_radius_axis", "_radius_pipe"]
def __init__(self, plane, radius_axis, radius_pipe, **kwargs):
super(Torus, self).__init__(**kwargs)
self._plane = None
self._radius_axis = None
self._radius_pipe = None
self.plane = plane
self.radius_axis = radius_axis
self.radius_pipe = radius_pipe
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
import schema
return schema.Schema(
{
"plane": Plane.DATASCHEMA.fget(None),
"radius_axis": schema.And(float, lambda x: x > 0),
"radius_pipe": schema.And(float, lambda x: x > 0),
}
)
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return "torus"
@property
def data(self):
"""dict : Returns the data dictionary that represents the torus."""
return {
"plane": self.plane.data,
"radius_axis": self.radius_axis,
"radius_pipe": self.radius_pipe,
}
@data.setter
def data(self, data):
self.plane = Plane.from_data(data["plane"])
self.radius_axis = data["radius_axis"]
self.radius_pipe = data["radius_pipe"]
@classmethod
def from_data(cls, data):
"""Construct a torus from its data representation.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`~compas.geometry.Torus`
The constructed torus.
Examples
--------
>>> from compas.geometry import Torus
>>> data = {'plane': Plane.worldXY().data, 'radius_axis': 4., 'radius_pipe': 1.}
>>> torus = Torus.from_data(data)
"""
torus = cls(Plane.from_data(data["plane"]), data["radius_axis"], data["radius_pipe"])
return torus
# ==========================================================================
# properties
# ==========================================================================
@property
def plane(self):
return self._plane
@plane.setter
def plane(self, plane):
self._plane = Plane(*plane)
@property
def radius_axis(self):
return self._radius_axis
@radius_axis.setter
def radius_axis(self, radius):
self._radius_axis = float(radius)
@property
def radius_pipe(self):
return self._radius_pipe
@radius_pipe.setter
def radius_pipe(self, radius):
self._radius_pipe = float(radius)
@property
def center(self):
return self.plane.point
@property
def area(self):
return (2 * pi * self.radius_pipe) * (2 * pi * self.radius_axis)
@property
def volume(self):
return (pi * self.radius_pipe**2) * (2 * pi * self.radius_axis)
# ==========================================================================
# customisation
# ==========================================================================
def __repr__(self):
return "Torus({0!r}, {1!r}, {2!r})".format(self.plane, self.radius_axis, self.radius_pipe)
def __len__(self):
return 3
def __getitem__(self, key):
if key == 0:
return self.plane
elif key == 1:
return self.radius_axis
elif key == 2:
return self.radius_pipe
else:
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.plane = value
elif key == 1:
self.radius_axis = value
elif key == 2:
self.radius_pipe = value
else:
raise KeyError
def __iter__(self):
return iter([self.plane, self.radius_axis, self.radius_pipe])
# ==========================================================================
# constructors
# ==========================================================================
# ==========================================================================
# methods
# ==========================================================================
def to_vertices_and_faces(self, u=16, v=16, triangulated=False):
"""Returns a list of vertices and faces
Parameters
----------
u : int, optional
Number of faces in the "u" direction.
v : int, optional
Number of faces in the "v" direction.
triangulated: bool, optional
If True, triangulate the faces.
Returns
-------
list[list[float]]
A list of vertex locations.
list[list[int]]
And a list of faces,
with each face defined as a list of indices into the list of vertices.
"""
if u < 3:
raise ValueError("The value for u should be u > 3.")
if v < 3:
raise ValueError("The value for v should be v > 3.")
theta = pi * 2 / u
phi = pi * 2 / v
vertices = []
for i in range(u):
for j in range(v):
x = cos(i * theta) * (self.radius_axis + self.radius_pipe * cos(j * phi))
y = sin(i * theta) * (self.radius_axis + self.radius_pipe * cos(j * phi))
z = self.radius_pipe * sin(j * phi)
vertices.append([x, y, z])
# transform vertices to torus' plane
frame = Frame.from_plane(self.plane)
M = matrix_from_frame(frame)
vertices = transform_points(vertices, M)
faces = []
for i in range(u):
ii = (i + 1) % u
for j in range(v):
jj = (j + 1) % v
a = i * v + j
b = ii * v + j
c = ii * v + jj
d = i * v + jj
faces.append([a, b, c, d])
if triangulated:
triangles = []
for face in faces:
if len(face) == 4:
triangles.append(face[0:3])
triangles.append([face[0], face[2], face[3]])
else:
triangles.append(face)
faces = triangles
return vertices, faces
def transform(self, transformation):
"""Transform the torus.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation used to transform the Torus.
Returns
-------
None
Examples
--------
>>> from compas.geometry import Frame
>>> from compas.geometry import Plane
>>> from compas.geometry import Transformation
>>> from compas.geometry import Torus
>>> torus = Torus(Plane.worldXY(), 5, 2)
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(frame)
>>> torus.transform(T)
"""
self.plane.transform(transformation) | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_blender/uninstall.py | import os
import sys
import compas
from compas._os import remove
from compas._os import remove_symlink
from compas._os import rename
import compas_blender
__all__ = ["uninstall"]
def uninstall(blender_path, version=None):
"""Uninstall COMPAS from Blender.
Parameters
----------
blender_path : str
The path to the folder with the version number of Blender.
For example, on Mac: ``'/Applications/Blender.app/Contents/Resources/2.83'``.
On Windows: ``'%PROGRAMFILES%/Blender Foundation/Blender 2.83/2.83'``.
version : {'2.83', '2.93', '3.1'}, optional
The version number of Blender.
Default is ``'2.93'``.
Examples
--------
.. code-block:: bash
$ python -m compas_blender.uninstall /Applications/blender.app/Contents/Resources/2.80
"""
if not os.environ.get("CONDA_PREFIX"):
print(
"Conda environment not found. The installation into Blender requires an active conda environment with a matching Python version to continue."
)
sys.exit(-1)
if not version and not blender_path:
version = "2.93"
if version and blender_path:
print(
"Both options cannot be provided simultaneously. Provide the full installation path, or the version with flag -v."
)
sys.exit(-1)
if version:
if compas.LINUX:
print(
"Version-based installs are currently not supported for Linux. Please provide the full installation path with the -p option."
)
sys.exit(-1)
blender_path = compas_blender._get_default_blender_installation_path(version)
if not os.path.exists(blender_path):
raise FileNotFoundError("Blender version folder not found.")
path, version = os.path.split(blender_path)
print("Uninstalling COMPAS for Blender {}".format(version))
startup = os.path.join(blender_path, "scripts", "startup")
blenderpython_src = os.path.join(blender_path, "python")
blenderpython_dst = os.path.join(blender_path, "original_python")
compas_bootstrapper = os.path.join(startup, "compas_bootstrapper.py")
if os.path.exists(blenderpython_dst):
print("Found existing installation, restoring bundled python installation...")
if os.path.exists(blenderpython_src):
remove_symlink(blenderpython_src)
print(
" Renaming original_python back to bundled python folder: {} => {}".format(
blenderpython_dst, blenderpython_src
)
)
rename(blenderpython_dst, blenderpython_src)
if os.path.exists(compas_bootstrapper):
print(" Removing compas bootstrapper...")
remove(compas_bootstrapper)
print()
print("COMPAS has been uninstalled from Blender {}.".format(version))
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"blenderpath",
nargs="?",
help="The path to the folder with the version number of Blender.",
)
parser.add_argument(
"-v",
"--version",
choices=["2.83", "2.93", "3.1"],
help="The version of Blender to install COMPAS in.",
)
args = parser.parse_args()
uninstall(args.blenderpath, version=args.version) | PypiClean |
/Flask-PicoCMS-0.0.6.tar.gz/Flask-PicoCMS-0.0.6/README.md | Flask-PicoCMS
=============
Lightweight CMS backend for Flask apps.
Installation
------------
The easiest way to install this is through pip.
```
pip install Flask-PicoCMS
```
Basic example
-------------
Flask code `app.py`
```python
import os
from flask import Flask
from flask_picocms import CMS
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config["PICOCMS_DATABASE"] = os.path.join(basedir, "cms.sqlite")
app.config["PICOCMS_SOURCE_PAGES"] = os.path.join(basedir, "cms", "pages")
app.config["PICOCMS_SOURCE_DATA"] = os.path.join(basedir, "cms", "data")
pico = CMS(app)
# to make development easier
@app.before_request
def rebuild_cms():
pico.rebuild()
@app.route("/")
def index():
page = pico.get_content("/welcome")
return page.json["content"]["message"]
```
CMS file (*.toml* or *.json*) `cms/pages/welcome.toml`
```toml
title="welcome"
draft=false
date="2017-10-15"
[content]
title="welcome to picocms"
message="Hello World!"
``` | PypiClean |
/Fanery-0.2.5.tar.gz/Fanery-0.2.5/fanery/_service.py | __all__ = ['service', 'static']
from _term import (
Hict, is_str, to_str,
parse_term, parse_json,
to_simple, to_json,
)
from _crypto import (
nacl_box, nacl_sign, nacl_verify,
nacl_nonce, nacl_random, nacl_sha256,
PrivateKey, PublicKey, Box, SigningKey
)
from _autoreload import observe_module
from _auth import hooks as auth
from _state import get_state
import _config as conf
import _exc as exc
from os.path import splitext, normpath, realpath, isdir, isfile, join
from base64 import b64encode, b64decode
from time import time as timestamp
from functools import wraps
from sys import exc_info
from cxor import xor
import logging
logger = logging.getLogger('fanery.service')
try:
import ipdb as pdb
except ImportError:
import pdb
try:
import xtraceback
traceback = xtraceback.compat
traceback.install_traceback()
traceback.install_sys_excepthook()
except ImportError:
import traceback
# services registry -> see service() and lookup()
registry = dict()
def is_abusive(domain, origin):
max_abuse_level = auth.max_origin_abuse_level(domain)
if max_abuse_level > 0:
origin_abuse_level = auth.abuse_level_by_origin(origin, domain)
return bool(origin_abuse_level >= max_abuse_level)
def decrypt(keys, sign, pad=None):
enc = nacl_verify(sign, keys.vkey)
box, _, _ = nacl_box(keys.ckey, keys.skey)
msg = box.decrypt(xor(enc, pad) if pad else enc)
return box, parse_json(msg)
def encrypt(state, box, term, pad=None):
enc = box.encrypt(to_json(to_simple(term)), nacl_nonce())
assert not pad or len(enc) <= len(pad), 'short-pad'
sign = nacl_sign(xor(enc, pad) if pad else enc, state.sseed)[0]
return Hict(sign=b64encode(sign))
def service(urlpath=None, auto_parse=True, static=False, accel=False,
cache=False, force_download=False, content_disposition=False,
auto_close_file=True, check_abusive=True, check_perm=True,
postmortem_debug=False, force_define=False, log_exc=True,
use_one_time_pad=True, safe=True, ssl=None, csp=None,
**output):
for k, v in output.iteritems():
if not (k and k.isalnum() and callable(v)):
raise exc.InvalidOutputFormatter(k, v)
else:
output.setdefault('json', lambda t: to_json(to_simple(t)))
output.setdefault('txt', lambda t: to_str(to_simple(t)))
output.setdefault('raw', repr)
if not safe:
use_one_time_pad = False
if content_disposition is True:
force_download = False
elif force_download is True:
content_disposition = False
def decorator(f):
srv_path = (urlpath or f.__name__).strip('/')
if srv_path in registry:
if force_define is True or conf.IS_DEVELOPMENT:
logger.debug("redefine <%s> for %s() in %s line %d" % (
srv_path,
f.func_code.co_name,
f.func_code.co_filename,
f.func_code.co_firstlineno))
else:
raise exc.MultipleDefine(srv_path)
elif conf.IS_DEVELOPMENT:
observe_module(f)
@wraps(f)
def wrapper(*args, **argd):
_state_ = get_state()
if ssl is not False and (not conf.IS_DEVELOPMENT or ssl) and not _state_.ssl: # noqa
raise exc.RequireSSL(srv_path)
domain = _state_.domain
origin = _state_.origin
if check_abusive and is_abusive(domain, origin):
raise exc.Abusive(origin)
if safe is True:
sid = b64decode(argd.pop('sid', ''))
if not sid:
auth.incr_abuse(origin, domain,
abuse=('sid', None, srv_path))
raise exc.Unauthorized
stored = auth.load_state(domain, sid)
if not stored:
auth.incr_abuse(origin, domain,
abuse=('sid', sid, srv_path))
raise exc.UnknownSession
elif stored.expires <= timestamp():
raise exc.ExpiredSession
else:
_state_.update(stored)
_state_.origin = origin
if check_perm and not auth.has_permission(_state_, srv_path):
auth.incr_abuse(origin, domain, incr=3,
abuse=('auth', sid, srv_path))
raise exc.Unauthorized
sign = b64decode(argd.pop('sign', ''))
if not sign:
auth.incr_abuse(origin, domain, incr=3,
abuse=('sign', sid, srv_path))
raise exc.InvalidCall('sign')
if use_one_time_pad is True:
pid, pads = b64decode(argd.pop('pid', '')), stored.pads
cpads = pads.get(srv_path, None)
if not pid or not cpads or pid not in cpads:
auth.incr_abuse(origin, domain, incr=3,
abuse=('pid', sid, srv_path))
raise exc.InvalidCall('pid')
keys = cpads.pop(pid)
pad = keys.pad
if not cpads:
del pads[srv_path]
else:
keys = _state_
pad = None
try:
box, term = decrypt(keys, sign, pad)
except:
auth.incr_abuse(origin, domain, incr=3,
abuse=('pad', sid, srv_path))
raise exc.InvalidCall('pad')
argd.update(term)
if auto_parse is True:
args = parse_term(args) if args else args
argd = parse_term(argd) if argd else argd
try:
_state_.service = srv_path
if not use_one_time_pad and safe is True:
ret = f(box, *args, **argd)
else:
ret = f(*args, **argd)
if safe is True:
if _state_.expires > 0:
_state_.expires = timestamp() + auth.session_timeout(domain) # noqa
auth.store_state(_state_)
else:
auth.destroy_state(_state_)
except Exception:
exc_type, exc_value, exc_traceback = exc_info()
if postmortem_debug is True:
traceback.print_exc()
pdb.post_mortem(exc_traceback)
elif log_exc is True:
logger.error(srv_path, exc_info=True)
if safe is True:
ret = dict(low=isinstance(exc_type, (AssertionError, exc.FaneryException)), # noqa
exc=exc_type.__name__, err=to_simple(exc_value.args)) # noqa
_state_.error.update(ret)
else:
raise
if use_one_time_pad is True:
return encrypt(keys, box, ret)
else:
return ret
wrapper.csp = csp
wrapper.ssl = ssl
wrapper.safe = safe
wrapper.cache = cache
wrapper.static = static
wrapper.output = output
wrapper.urlpath = srv_path
wrapper.auto_parse = auto_parse
wrapper.force_download = force_download
wrapper.auto_close_file = auto_close_file
wrapper.content_disposition = content_disposition
wrapper.accel = accel.lower() if is_str(accel) else None
registry[srv_path] = wrapper
return wrapper
return decorator
def lookup(urlpath, full=None, max_recursion=5, max_urlpath_args=5):
path, ext = splitext(urlpath.strip().strip('/'))
args = []
while path not in registry and len(args) <= max_urlpath_args:
try:
path, arg = path.strip().rsplit('/', 1)
args.append(arg.strip())
except ValueError:
break
fun = registry.get(path, None)
if not fun and max_recursion > 0:
try:
prefix, urlpath = urlpath.split('/', 1)
return lookup(urlpath, full or urlpath, max_recursion - 1)
except ValueError:
fun = registry.get('', None)
args = [full]
elif args:
args.reverse()
if fun and fun.static is True:
args[-1] += ext
out = fun.output.get(ext[1:], False) if ext and fun else None
return fun, args, ext, out
def consume(_state_, service_call, *args, **argd):
start = timestamp()
try:
fun = error = exception = None
if isinstance(service_call, basestring):
urlpath = service_call
fun, argv, ext, out = lookup(normpath(service_call))
else:
urlpath, fun, argv, ext, out = service_call
if not callable(fun):
raise exc.NotFound(urlpath)
if ext and not (fun.static or callable(out)):
raise exc.UndefinedOutputFormatter(urlpath)
argv.extend(args)
ret = fun(*argv, **argd)
return fun, ext, out(ret) if ext and out else ret
except exc.FaneryException, error:
raise
except Exception, exception:
raise
finally:
sid, profile = _state_.sid, _state_.profile
msg = "%s %s %s %s %s %s %s %s %0.6f %s" % (
'S' if _state_.ssl else '-',
'A' if fun and fun.auto_parse else '-',
'S' if fun and fun.static else '-',
_state_.origin, _state_.domain,
sid.encode('hex') if sid else '-',
profile.username if profile else '-',
_state_.role or '-',
timestamp() - start,
urlpath)
if _state_.error:
logger.error("%s [%s]" % (msg, _state_.error.exc))
elif error:
logger.warning("%s [%s]" % (msg, error.__class__.__name__))
elif exception:
logger.error("%s [%s]" % (msg, exception.__class__.__name__))
else:
logger.info(msg)
def static(urlpath, root, index='index.html', **argd):
root = join(realpath(normpath(root)), '')
if not isdir(root):
raise exc.NotFound(root)
argd.setdefault('cache', True)
argd.setdefault('log_exc', False)
argd.setdefault('csp', "default-src 'self' 'unsafe-eval'")
@service(urlpath, auto_parse=False, static=True, safe=False, **argd)
def serve_file(*args, **argd):
filepath = realpath(normpath(join(root, *args)))
if isdir(filepath):
if index:
filepath = join(filepath, index)
elif filepath.startswith(root):
return filepath
else:
raise exc.NotFound(join(*args or [index]))
if isfile(filepath) and filepath.startswith(root):
return filepath
else:
raise exc.NotFound(join(*args or [index]))
return serve_file
static(conf.JFANERY_URLPATH, conf.JFANERY_DIRPATH)
@service('fanery/prelogin', ssl=False, safe=False, auto_parse=False, log_exc=False) # noqa
def prelogin(identity, **extra):
state = get_state()
origin = state.origin
domain = state.domain
max_count = auth.max_prelogin_count(domain)
prelogin_count = auth.prelogin_count(domain, origin)
if max_count <= 0 or prelogin_count < max_count:
profile = auth.fetch_profile(domain, identity, **extra)
if profile is not None:
sid = auth.gen_session_id()
hash = profile.password_hash
ckey = PrivateKey(nacl_sha256(hash))
skey = PrivateKey.generate()
sseed = nacl_random(32)
cseed = nacl_random(32)
box = Box(skey, ckey.public_key)
csign = SigningKey(cseed)
sign = csign.sign(hash)[:64]
pad = nacl_random(len(sign))
sign = xor(sign, pad)
auth.store_prelogin(expires=timestamp() + auth.prelogin_grace_time(domain), # noqa
sid=sid, domain=domain, origin=origin,
identity=identity, sign=sign,
profile=profile._uuid,
ckey=ckey.public_key.encode(),
vkey=csign.verify_key.encode(),
skey=skey.encode(), sseed=sseed)
vkey = SigningKey(sseed).verify_key.encode()
enc = box.encrypt(cseed + vkey + sid + pad, nacl_nonce())
return Hict(pkey=b64encode(skey.public_key.encode()),
salt=b64encode(profile.password_salt),
enc=b64encode(enc))
else:
auth.incr_abuse(origin, domain, abuse=('prelogin', identity))
# do not tell directly prelogin attempt was unsuccessful
# let the client waste time trying to decrypt some garbage
return Hict(pkey=b64encode(PrivateKey.generate().public_key.encode()),
salt=b64encode(nacl_random(8)),
enc=b64encode(nacl_random(184)))
@service('fanery/login', ssl=False, safe=False, auto_parse=False, log_exc=False) # noqa
def login(identity, sid, sign, force=False):
state = get_state()
origin = state.origin
domain = state.domain
sign = b64decode(sign)
sid = b64decode(sid)
prelogin = auth.fetch_prelogin(origin, domain, sid)
profile = auth.fetch_profile(domain, identity) if prelogin else None
if not profile or not prelogin or timestamp() >= prelogin.expires:
auth.incr_abuse(origin, domain, abuse=('login', identity))
elif prelogin.profile != profile._uuid or prelogin.sign != sign:
auth.incr_abuse(origin, domain, incr=3, abuse=('sign', identity))
else:
max_sessions = auth.max_active_user_sessions(domain, identity)
if max_sessions > 0:
active_sessions = auth.active_user_sessions(domain, identity)
if not force and active_sessions >= max_sessions:
raise exc.MaxActiveSessions
elif force:
auth.destroy_user_sessions(domain, identity)
box = Box(PrivateKey(prelogin.skey), PublicKey(prelogin.ckey))
skey = PrivateKey.generate()
ckey = PrivateKey.generate()
expires = timestamp() + auth.session_timeout(domain)
state.update(sid=sid, profile=profile, expires=expires,
ckey=ckey.public_key.encode(), skey=skey.encode(),
sseed=prelogin.sseed, vkey=prelogin.vkey)
auth.store_state(state)
tstamp = str(int(timestamp()))
msg = ckey.encode() + skey.public_key.encode() + tstamp
enc = box.encrypt(msg, nacl_nonce())
sign = SigningKey(prelogin.sseed).sign(enc)
return Hict(sign=b64encode(sign))
raise exc.InvalidCredential
@service('fanery/gen_pad', ssl=False, use_one_time_pad=False, auto_parse=False, log_exc=False) # noqa
def gen_pad(box, call, pad, tstamp):
state = get_state()
if not state.profile:
raise exc.Unauthorized
try:
tsdiff = timestamp() - float(tstamp)
except:
raise exc.InvalidCall
domain = state.domain
pads = state.pads
if abs(tsdiff) > auth.pad_grace_time(domain, call):
auth.incr_abuse(state.origin, domain, abuse=('timeout', call))
raise exc.CallPadTimeout(dict(tsdiff=tsdiff))
elif not auth.has_permission(state, call):
auth.incr_abuse(state.origin, domain, abuse=('denied', call))
raise exc.Unauthorized
elif call in pads and len(pads[call]) >= auth.max_pads_count(domain, call):
auth.incr_abuse(state.origin, domain, abuse=('pads', call))
raise exc.CallPadsLimitExceeded
# gen one-time-pad and one-time-keys
cid = nacl_random(16)
cseed = nacl_random(32)
sseed = nacl_random(32)
skey = PrivateKey.generate()
ckey = PrivateKey.generate()
# server call pad data
pad = b64decode(pad)
size = len(pad) - 410
cvkey = SigningKey(cseed).verify_key
cpad = pads[call][cid] = Hict(pad=nacl_random(size), call=call,
skey=skey.encode(), sseed=sseed,
ckey=ckey.public_key.encode(),
vkey=cvkey.encode())
# client pad data
svkey = SigningKey(sseed).verify_key
data = Hict(pad=b64encode(cpad.pad),
pid=b64encode(cid),
cseed=b64encode(cseed),
ckey=b64encode(ckey.encode()),
skey=b64encode(skey.public_key.encode()),
vkey=b64encode(svkey.encode()))
return encrypt(state, box, data, pad)
@service('fanery/logout', ssl=False, auto_parse=False, log_exc=False)
def logout(*args, **argd):
_state_ = get_state()
_state_.expires = 0
return True | PypiClean |
/Manager-2.0.5.tar.gz/Manager-2.0.5/manager/globals/constants.py | #**********************************************************************************************************************
#*** Future imports.
#**********************************************************************************************************************
from __future__ import unicode_literals
#**********************************************************************************************************************
#*** External imports.
#**********************************************************************************************************************
import os
import platform
#**********************************************************************************************************************
#*** Internal imports.
#**********************************************************************************************************************
import manager
#**********************************************************************************************************************
#*** Module attributes.
#**********************************************************************************************************************
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["Constants"]
#**********************************************************************************************************************
#*** Module classes and definitions.
#**********************************************************************************************************************
class Constants():
"""
Defines **Manager** package default constants.
"""
applicationName = "Manager"
"""
:param applicationName: Package Application name.
:type applicationName: unicode
"""
majorVersion = "2"
"""
:param majorVersion: Package major version.
:type majorVersion: unicode
"""
minorVersion = "0"
"""
:param minorVersion: Package minor version.
:type minorVersion: unicode
"""
changeVersion = "5"
"""
:param changeVersion: Package change version.
:type changeVersion: unicode
"""
version = ".".join((majorVersion, minorVersion, changeVersion))
"""
:param version: Package version.
:type version: unicode
"""
logger = "Manager_Logger"
"""
:param logger: Package logger name.
:type logger: unicode
"""
verbosityLevel = 3
"""
:param verbosityLevel: Default logging verbosity level.
:type verbosityLevel: int
"""
verbosityLabels = ("Critical", "Error", "Warning", "Info", "Debug")
"""
:param verbosityLabels: Logging verbosity labels.
:type verbosityLabels: tuple
"""
loggingDefaultFormatter = "Default"
"""
:param loggingDefaultFormatter: Default logging formatter name.
:type loggingDefaultFormatter: unicode
"""
loggingSeparators = "*" * 96
"""
:param loggingSeparators: Logging separators.
:type loggingSeparators: unicode
"""
defaultCodec = manager.DEFAULT_CODEC
"""
:param defaultCodec: Default codec.
:type defaultCodec: unicode
"""
codecError = manager.CODEC_ERROR
"""
:param codecError: Default codec error behavior.
:type codecError: unicode
"""
applicationDirectory = os.sep.join(("Manager", ".".join((majorVersion, minorVersion))))
"""
:param applicationDirectory: Package Application directory.
:type applicationDirectory: unicode
"""
if platform.system() == "Windows" or platform.system() == "Microsoft" or platform.system() == "Darwin":
providerDirectory = "HDRLabs"
"""
:param providerDirectory: Package provider directory.
:type providerDirectory: unicode
"""
elif platform.system() == "Linux":
providerDirectory = ".HDRLabs"
"""
:param providerDirectory: Package provider directory.
:type providerDirectory: unicode
"""
nullObject = "None"
"""
:param nullObject: Default null object string.
:type nullObject: unicode
""" | PypiClean |
/kiramibot-0.2.0.tar.gz/kiramibot-0.2.0/kirami/utils/jsondata.py |
import json
from pathlib import Path
from typing import Any, ClassVar
from pydantic import BaseModel, PrivateAttr, root_validator
from typing_extensions import Self
from kirami.config import DATA_DIR
from kirami.exception import FileNotExistError, ReadFileError
class JsonDict(dict[str, Any]):
_file_path: Path
_auto_load: bool
_initial_data: dict[str, Any]
def __init__(
self,
data: dict[str, Any] | None = None,
/,
*,
path: str | Path = DATA_DIR,
auto_load: bool = False,
) -> None:
"""创建 json 数据字典
### 参数
data: json 数据
path: 文件路径
auto_load: 是否自动加载文件
"""
self._file_path = Path(path)
self._auto_load = auto_load
self._initial_data = data.copy() if data else {}
if auto_load and self._file_path.is_file():
json_data = json.loads(self._file_path.read_text("utf-8"))
else:
json_data = self._initial_data
self.file_path.parent.mkdir(parents=True, exist_ok=True)
super().__init__(**json_data)
@property
def file_path(self) -> Path:
"""文件路径"""
return self._file_path
def load(self) -> None:
"""从文件加载数据"""
if self._auto_load:
raise RuntimeError("Auto load is enabled, cannot load manually.")
if not self._file_path.is_file():
raise FileNotFoundError(self._file_path)
self.update(json.loads(self._file_path.read_text("utf-8")))
def save(self) -> None:
"""保存数据到文件"""
self.file_path.write_text(json.dumps(self))
def clear(self) -> None:
"""清除全部数据"""
super().clear()
self.save()
def delete(self) -> None:
"""删除文件"""
super().clear()
self.file_path.unlink(missing_ok=True)
def reset(self) -> None:
"""重置数据"""
super().clear()
self.update(self._initial_data)
self.save()
class JsonModel(BaseModel):
"""json 模型"""
_file_path: ClassVar[Path]
_auto_load: ClassVar[bool]
_scatter_fields: ClassVar[list[str]]
_initial_data: dict[str, Any] = PrivateAttr()
def __init_subclass__(
cls,
path: str | Path = DATA_DIR,
auto_load: bool = False,
) -> None:
cls._file_path = Path(path) / f"{cls.__name__.lower()}.json"
cls._auto_load = auto_load
scatter_fields = []
for field in cls.__fields__.values():
if field.field_info.extra.get("scatter", False):
scatter_fields.append(field.name)
field.field_info.allow_mutation = False
cls._scatter_fields = scatter_fields
if cls._auto_load and cls._scatter_fields:
raise ValueError("auto_load and scatter fields cannot be used together.")
return super().__init_subclass__()
def __init__(self, **data: Any) -> None:
super().__init__(**data)
self.file_path.parent.mkdir(parents=True, exist_ok=True)
self._initial_data = self.dict()
@root_validator(pre=True)
def _load_file(cls, values: dict[str, Any]) -> dict[str, Any]:
if cls._auto_load and cls._file_path.is_file():
return json.loads(cls._file_path.read_text("utf-8"))
return values
@property
def file_path(self) -> Path:
"""文件路径"""
file_path = self.__class__._file_path
if self.__class__._scatter_fields:
return file_path.with_suffix("") / f"{self.scatter_key}.json"
return file_path
@property
def scatter_key(self) -> str:
"""离散键"""
return "_".join(
str(getattr(self, field)) for field in self.__class__._scatter_fields
)
@classmethod
def load(cls, scatter_key: str | None = None) -> Self:
"""加载数据。
### 参数
scatter_key: 离散键
"""
if cls._auto_load:
raise ReadFileError("Auto load is enabled, cannot load manually.")
if scatter_key:
file_path = cls._file_path.with_suffix("") / f"{scatter_key}.json"
else:
file_path = cls._file_path
if file_path.is_file():
return cls(**json.loads(file_path.read_text("utf-8")))
raise FileNotExistError
@classmethod
def load_all(cls) -> list[Self]:
"""加载全部数据"""
if cls._auto_load:
raise ReadFileError("Auto load is enabled, cannot load manually.")
if not cls._scatter_fields:
raise ReadFileError("No scatter fields.")
return [
cls.load(file.name)
for file in cls._file_path.with_suffix("").glob("*.json")
]
def save(self) -> None:
"""保存数据到文件"""
self.file_path.write_text(json.dumps(self.dict()))
def delete(self) -> None:
"""删除文件"""
self.file_path.unlink(missing_ok=True)
def reset(self) -> None:
"""重置数据"""
for field, value in self._initial_data.items():
setattr(self, field, value)
class Config:
validate_assignment = True | PypiClean |
/Kivy-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/kivy/loader.py | __all__ = ('Loader', 'LoaderBase', 'ProxyImage')
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.cache import Cache
from kivy.core.image import ImageLoader, Image
from kivy.config import Config
from kivy.utils import platform
from collections import deque
from time import sleep
from os.path import join
from os import write, close, unlink, environ
import threading
import mimetypes
# Register a cache for loader
Cache.register('kv.loader', limit=500, timeout=60)
class ProxyImage(Image):
'''Image returned by the Loader.image() function.
:Properties:
`loaded`: bool, defaults to False
This value may be True if the image is already cached.
:Events:
`on_load`
Fired when the image is loaded or changed.
`on_error`
Fired when the image cannot be loaded.
`error`: Exception data that occurred
'''
__events__ = ('on_load', 'on_error')
def __init__(self, arg, **kwargs):
loaded = kwargs.pop('loaded', False)
super(ProxyImage, self).__init__(arg, **kwargs)
self.loaded = loaded
def on_load(self):
pass
def on_error(self, error):
pass
class LoaderBase(object):
'''Common base for the Loader and specific implementations.
By default, the Loader will be the best available loader implementation.
The _update() function is called every 1 / 25.s or each frame if we have
less than 25 FPS.
'''
_trigger_update = None
'''Alias for mimetype extensions.
If you have trouble to have the right extension to be detected,
you can either add #.EXT at the end of the url, or use this array
to correct the detection.
For example, a zip-file on Windows can be detected as pyz.
By default, '.pyz' is translated to '.zip'
.. versionadded:: 1.11.0
'''
EXT_ALIAS = {
'.pyz': '.zip'
}
def __init__(self):
self._loading_image = None
self._error_image = None
self._num_workers = 2
self._max_upload_per_frame = 2
self._paused = False
self._resume_cond = threading.Condition()
self._q_load = deque()
self._q_done = deque()
self._client = []
self._running = False
self._start_wanted = False
self._trigger_update = Clock.create_trigger(self._update)
def __del__(self):
if self._trigger_update is not None:
self._trigger_update.cancel()
def _set_num_workers(self, num):
if num < 2:
raise Exception('Must have at least 2 workers')
self._num_workers = num
def _get_num_workers(self):
return self._num_workers
num_workers = property(_get_num_workers, _set_num_workers)
'''Number of workers to use while loading (used only if the loader
implementation supports it). This setting impacts the loader only on
initialization. Once the loader is started, the setting has no impact::
from kivy.loader import Loader
Loader.num_workers = 4
The default value is 2 for giving a smooth user experience. You could
increase the number of workers, then all the images will be loaded faster,
but the user will not been able to use the application while loading.
Prior to 1.6.0, the default number was 20, and loading many full-hd images
was completely blocking the application.
.. versionadded:: 1.6.0
'''
def _set_max_upload_per_frame(self, num):
if num is not None and num < 1:
raise Exception('Must have at least 1 image processing per image')
self._max_upload_per_frame = num
def _get_max_upload_per_frame(self):
return self._max_upload_per_frame
max_upload_per_frame = property(_get_max_upload_per_frame,
_set_max_upload_per_frame)
'''The number of images to upload per frame. By default, we'll
upload only 2 images to the GPU per frame. If you are uploading many
small images, you can easily increase this parameter to 10 or more.
If you are loading multiple full HD images, the upload time may have
consequences and block the application. If you want a
smooth experience, use the default.
As a matter of fact, a Full-HD RGB image will take ~6MB in memory,
so it may take time. If you have activated mipmap=True too, then the
GPU must calculate the mipmap of these big images too, in real time.
Then it may be best to reduce the :attr:`max_upload_per_frame` to 1
or 2. If you want to get rid of that (or reduce it a lot), take a
look at the DDS format.
.. versionadded:: 1.6.0
'''
def _get_loading_image(self):
if not self._loading_image:
loading_png_fn = join(kivy_data_dir, 'images', 'image-loading.zip')
self._loading_image = ImageLoader.load(filename=loading_png_fn)
return self._loading_image
def _set_loading_image(self, image):
if isinstance(image, str):
self._loading_image = ImageLoader.load(filename=image)
else:
self._loading_image = image
loading_image = property(_get_loading_image, _set_loading_image)
'''Image used for loading.
You can change it by doing::
Loader.loading_image = 'loading.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def _get_error_image(self):
if not self._error_image:
error_png_fn = join(
'atlas://data/images/defaulttheme/image-missing')
self._error_image = ImageLoader.load(filename=error_png_fn)
return self._error_image
def _set_error_image(self, image):
if isinstance(image, str):
self._error_image = ImageLoader.load(filename=image)
else:
self._error_image = image
error_image = property(_get_error_image, _set_error_image)
'''Image used for error.
You can change it by doing::
Loader.error_image = 'error.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def start(self):
'''Start the loader thread/process.'''
self._running = True
def run(self, *largs):
'''Main loop for the loader.'''
pass
def stop(self):
'''Stop the loader thread/process.'''
self._running = False
def pause(self):
'''Pause the loader, can be useful during interactions.
.. versionadded:: 1.6.0
'''
self._paused = True
def resume(self):
'''Resume the loader, after a :meth:`pause`.
.. versionadded:: 1.6.0
'''
self._paused = False
self._resume_cond.acquire()
self._resume_cond.notify_all()
self._resume_cond.release()
def _wait_for_resume(self):
while self._running and self._paused:
self._resume_cond.acquire()
self._resume_cond.wait(0.25)
self._resume_cond.release()
def _load(self, kwargs):
'''(internal) Loading function, called by the thread.
Will call _load_local() if the file is local,
or _load_urllib() if the file is on Internet.
'''
while len(self._q_done) >= (
self.max_upload_per_frame * self._num_workers):
sleep(0.1)
self._wait_for_resume()
filename = kwargs['filename']
load_callback = kwargs['load_callback']
post_callback = kwargs['post_callback']
try:
proto = filename.split(':', 1)[0]
except:
# if blank filename then return
return
if load_callback is not None:
data = load_callback(filename)
elif proto in ('http', 'https', 'ftp', 'smb'):
data = self._load_urllib(filename, kwargs['kwargs'])
else:
data = self._load_local(filename, kwargs['kwargs'])
if post_callback:
data = post_callback(data)
self._q_done.appendleft((filename, data))
self._trigger_update()
def _load_local(self, filename, kwargs):
'''(internal) Loading a local file'''
# With recent changes to CoreImage, we must keep data otherwise,
# we might be unable to recreate the texture afterwise.
return ImageLoader.load(filename, keep_data=True, **kwargs)
def _load_urllib(self, filename, kwargs):
'''(internal) Loading a network file. First download it, save it to a
temporary file, and pass it to _load_local().'''
import urllib.request
import tempfile
proto = filename.split(':', 1)[0]
if proto == 'smb':
try:
# note: it's important to load SMBHandler every time
# otherwise the data is occasionally not loaded
from smb.SMBHandler import SMBHandler
except ImportError:
Logger.warning(
'Loader: can not load PySMB: make sure it is installed')
return
data = fd = _out_osfd = None
try:
_out_filename = ''
if proto == 'smb':
# read from samba shares
fd = urllib.request.build_opener(SMBHandler).open(filename)
else:
# read from internet
request = urllib.request.Request(filename)
if Config.has_option('network', 'useragent'):
useragent = Config.get('network', 'useragent')
if useragent:
request.add_header('User-Agent', useragent)
# A custom context is only needed on Android and iOS
# as we need to use the certs provided via certifi.
ssl_ctx = None
if platform in ['android', 'ios']:
import certifi
import ssl
ssl_ctx = ssl.create_default_context(cafile=certifi.where())
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
fd = urllib.request.urlopen(request, context=ssl_ctx)
if '#.' in filename:
# allow extension override from URL fragment
suffix = '.' + filename.split('#.')[-1]
else:
ctype = fd.info().get_content_type()
suffix = mimetypes.guess_extension(ctype)
suffix = LoaderBase.EXT_ALIAS.get(suffix, suffix)
if not suffix:
# strip query string and split on path
parts = filename.split('?')[0].split('/')[1:]
while len(parts) > 1 and not parts[0]:
# strip out blanks from '//'
parts = parts[1:]
if len(parts) > 1 and '.' in parts[-1]:
# we don't want '.com', '.net', etc. as the extension
suffix = '.' + parts[-1].split('.')[-1]
_out_osfd, _out_filename = tempfile.mkstemp(
prefix='kivyloader', suffix=suffix)
idata = fd.read()
fd.close()
fd = None
# write to local filename
write(_out_osfd, idata)
close(_out_osfd)
_out_osfd = None
# load data
data = self._load_local(_out_filename, kwargs)
# FIXME create a clean API for that
for imdata in data._data:
imdata.source = filename
except Exception as ex:
Logger.exception('Loader: Failed to load image <%s>' % filename)
# close file when remote file not found or download error
try:
if _out_osfd:
close(_out_osfd)
except OSError:
pass
# update client
for c_filename, client in self._client[:]:
if filename != c_filename:
continue
# got one client to update
client.image = self.error_image
client.dispatch('on_error', error=ex)
self._client.remove((c_filename, client))
return self.error_image
finally:
if fd:
fd.close()
if _out_osfd:
close(_out_osfd)
if _out_filename != '':
unlink(_out_filename)
return data
def _update(self, *largs):
'''(internal) Check if a data is loaded, and pass to the client.'''
# want to start it ?
if self._start_wanted:
if not self._running:
self.start()
self._start_wanted = False
# in pause mode, don't unqueue anything.
if self._paused:
self._trigger_update()
return
for x in range(self.max_upload_per_frame):
try:
filename, data = self._q_done.pop()
except IndexError:
return
# create the image
image = data # ProxyImage(data)
if not image.nocache:
Cache.append('kv.loader', filename, image)
# update client
for c_filename, client in self._client[:]:
if filename != c_filename:
continue
# got one client to update
client.image = image
client.loaded = True
client.dispatch('on_load')
self._client.remove((c_filename, client))
self._trigger_update()
def image(self, filename, load_callback=None, post_callback=None,
**kwargs):
'''Load a image using the Loader. A ProxyImage is returned with a
loading image. You can use it as follows::
from kivy.app import App
from kivy.uix.image import Image
from kivy.loader import Loader
class TestApp(App):
def _image_loaded(self, proxyImage):
if proxyImage.image.texture:
self.image.texture = proxyImage.image.texture
def build(self):
proxyImage = Loader.image("myPic.jpg")
proxyImage.bind(on_load=self._image_loaded)
self.image = Image()
return self.image
TestApp().run()
In order to cancel all background loading, call *Loader.stop()*.
'''
data = Cache.get('kv.loader', filename)
if data not in (None, False):
# found image, if data is not here, need to reload.
return ProxyImage(data,
loading_image=self.loading_image,
loaded=True, **kwargs)
client = ProxyImage(self.loading_image,
loading_image=self.loading_image, **kwargs)
self._client.append((filename, client))
if data is None:
# if data is None, this is really the first time
self._q_load.appendleft({
'filename': filename,
'load_callback': load_callback,
'post_callback': post_callback,
'kwargs': kwargs})
if not kwargs.get('nocache', False):
Cache.append('kv.loader', filename, False)
self._start_wanted = True
self._trigger_update()
else:
# already queued for loading
pass
return client
def remove_from_cache(self, filename):
Cache.remove('kv.loader', filename)
#
# Loader implementation
#
if 'KIVY_DOC' in environ:
Loader = None
else:
#
# Try to use pygame as our first choice for loader
#
from kivy.compat import queue
from threading import Thread
class _Worker(Thread):
'''Thread executing tasks from a given tasks queue
'''
def __init__(self, pool, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.pool = pool
self.start()
def run(self):
while self.pool.running:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class _ThreadPool(object):
'''Pool of threads consuming tasks from a queue
'''
def __init__(self, num_threads):
super(_ThreadPool, self).__init__()
self.running = True
self.tasks = queue.Queue()
for _ in range(num_threads):
_Worker(self, self.tasks)
def add_task(self, func, *args, **kargs):
'''Add a task to the queue
'''
self.tasks.put((func, args, kargs))
def stop(self):
self.running = False
self.tasks.join()
class LoaderThreadPool(LoaderBase):
def __init__(self):
super(LoaderThreadPool, self).__init__()
self.pool = None
def start(self):
super(LoaderThreadPool, self).start()
self.pool = _ThreadPool(self._num_workers)
Clock.schedule_interval(self.run, 0)
def stop(self):
super(LoaderThreadPool, self).stop()
Clock.unschedule(self.run)
self.pool.stop()
def run(self, *largs):
while self._running:
try:
parameters = self._q_load.pop()
except:
return
self.pool.add_task(self._load, parameters)
Loader = LoaderThreadPool()
Logger.info('Loader: using a thread pool of {} workers'.format(
Loader.num_workers)) | PypiClean |
/Caroline-presentation-0.2.4.tar.gz/Caroline-presentation-0.2.4/caroline/html_dist/js/mathjax/sre/mathmaps/fr/symbols/greek-mathfonts-sans-serif-bold-italic.js | [{"locale":"fr"},{"key":"1D790","mappings":{"default":{"default":"Alpha majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D791","mappings":{"default":{"default":"Bêta majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D792","mappings":{"default":{"default":"Gamma majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D793","mappings":{"default":{"default":"Delta majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D794","mappings":{"default":{"default":"Epsilon majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D795","mappings":{"default":{"default":"Dzêta majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D796","mappings":{"default":{"default":"Êta majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D797","mappings":{"default":{"default":"Thêta majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D798","mappings":{"default":{"default":"Iota majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D799","mappings":{"default":{"default":"Kappa majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79A","mappings":{"default":{"default":"Lambda majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79B","mappings":{"default":{"default":"Mu majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79C","mappings":{"default":{"default":"Nu majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79D","mappings":{"default":{"default":"Xi majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79E","mappings":{"default":{"default":"Omicron majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D79F","mappings":{"default":{"default":"Pi majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A0","mappings":{"default":{"default":"Rhô majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A2","mappings":{"default":{"default":"Sigma majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A3","mappings":{"default":{"default":"Tau majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A4","mappings":{"default":{"default":"Upsilon majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A5","mappings":{"default":{"default":"Phi majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A6","mappings":{"default":{"default":"Chi majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A7","mappings":{"default":{"default":"Psi majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7A8","mappings":{"default":{"default":"Oméga majuscule en sans empattement italique gras"}},"category":"Lu"},{"key":"1D7AA","mappings":{"default":{"default":"alpha en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7AB","mappings":{"default":{"default":"bêta en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7AC","mappings":{"default":{"default":"gamma en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7AD","mappings":{"default":{"default":"delta en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7AE","mappings":{"default":{"default":"epsilon en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7AF","mappings":{"default":{"default":"dzêta en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B0","mappings":{"default":{"default":"êta en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B1","mappings":{"default":{"default":"thêta en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B2","mappings":{"default":{"default":"iota en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B3","mappings":{"default":{"default":"kappa en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B4","mappings":{"default":{"default":"lambda en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B5","mappings":{"default":{"default":"mu en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B6","mappings":{"default":{"default":"nu en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B7","mappings":{"default":{"default":"xi en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B8","mappings":{"default":{"default":"omicron en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7B9","mappings":{"default":{"default":"pi en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7BA","mappings":{"default":{"default":"rhô en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7BB","mappings":{"default":{"default":"mathématique italique grasse sans empattement sigma final"}},"category":"Ll"},{"key":"1D7BC","mappings":{"default":{"default":"sigma en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7BD","mappings":{"default":{"default":"tau en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7BE","mappings":{"default":{"default":"upsilon en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7BF","mappings":{"default":{"default":"phi en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7C0","mappings":{"default":{"default":"chi en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7C1","mappings":{"default":{"default":"psi en sans empattement italique gras"}},"category":"Ll"},{"key":"1D7C2","mappings":{"default":{"default":"oméga en sans empattement italique gras"}},"category":"Ll"}] | PypiClean |
/CondConfigParser-1.0.5.tar.gz/CondConfigParser-1.0.5/condconfigparser/parser.py |
# parser.py --- Parser module of CondConfigParser
#
# Copyright (c) 2014, Florent Rougon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the CondConfigParser Project.
"""Parser module of CondConfigParser.
This module defines a :class:`Node` class, one subclass for every node
type that can occur in the abstract syntax tree, and a :class:`Parser`
class which implements the configuration file parsing using the output
of the lexer (:class:`condconfigparser.lexer.Lexer`).
"""
import re
import collections
import textwrap # minor use, could be easily replaced
from .exceptions import ParseError, UndefinedVariable, InTestTypeError
from .lexer import TokenType
class Node:
"""Node class for the abstract syntax tree.
This class defines all the usual :class:`list` methods; they act on
the list referenced to by the :attr:`children` attribute.
Data belonging to the node that is not in the form of a
:class:`Node` instance (or instance of a subclass) must be stored in
other attributes than :attr:`children`.
This class implements :meth:`object.__eq__` and
:meth:`object.__ne__` for equality testing. This allows one to
compare two abstract syntax trees by using ``==`` or ``!=`` with the
two root nodes.
"""
def __init__(self, children=None):
#: List of children nodes (:class:`Node` instances)
self.children = [] if children is None else children
def __eq__(self, other):
return type(self) == type(other) and \
self.children == other.children
def __ne__(self, other):
return not self.__eq__(other)
def __getattr__(self, name):
if name in ("append", "extend"):
return getattr(self.children, name)
def __len__(self):
return self.children.__len__()
def __getitem__(self, key):
return self.children.__getitem__(key)
def __delitem__(self, key):
return self.children.__delitem__(key)
def __setitem__(self, key, value):
return self.children.__setitem__(key, value)
def __iter__(self):
return self.children.__iter__()
def __reversed__(self):
return self.children.__reversed__()
def __contains__(self, item):
return self.children.__contains__(item)
def __str__(self):
return "<{}>".format(type(self).__name__)
def __repr__(self):
return "{0}.{1}({2!r})".format(__name__, type(self).__name__,
self.children)
def __format__(self, format_spec):
"""Support for :samp:`format({node}, "tree")` and \
:samp:`"{:tree}".format({node})`."""
if not format_spec:
return str(self)
else:
assert format_spec == "tree", format_spec
if self.children:
indent = 2
childLines = '\n'.join(
[ textwrap.indent("{:tree}".format(child), " "*indent)
for child in self.children ])
return "{}\n{}".format(self, childLines)
else:
return str(self)
@classmethod
def simplify(cls, node, parent, childNum):
"""Recursively replace useless :class:`AndTestNode` and \
:class:`OrTestNode` instances.
Every :class:`AndTestNode` or :class:`OrTestNode` that only has
one child is replaced with this only child. This process is
recursively done on all children of *node*.
Return the possibly-new root node of the tree.
"""
res = node
if isinstance(node, (AndTestNode, OrTestNode)) and \
len(node.children) == 1:
if parent is None:
res = cls.simplify(node.children[0], None, None)
else:
parent.children[childNum] = node.children[0]
cls.simplify(node.children[0], parent, childNum)
else:
for i, child in enumerate(node.children):
cls.simplify(child, node, i)
return res
def undefVariables(self, varNames):
"""Return the set of undefined variables for the tree rooted at *self*.
This method recursively explores the subtree rooted at *self*.
If any node encountered refers to a variable whose name is not
in *varNames*, it remembers that variable as undefined.
The return value is a :class:`frozenset` containing a
:class:`condconfigparser.lexer.VariableToken` instance for every
instance of an undefined variable in the tree rooted at *self*.
"""
return frozenset().union(
*[ child.undefVariables(varNames) for child in self.children ])
class RootNode(Node):
"""Node representing the root of the abstract syntax tree for a \
config file/stream.
Corresponds to the :token:`root` grammar production.
"""
def __init__(self, assignments, config):
Node.__init__(self, [assignments, config])
def __repr__(self):
return "{}.{}({!r}, {!r})".format(__name__, type(self).__name__,
self.assignments, self.config)
# Convenience properties
@property
def assignments(self):
""":class:`VarAssignmentsNode` instance."""
return self.children[0]
@assignments.setter
def assignments(self, value):
self.children[0] = value
@property
def config(self):
""":class:`ConfigNode` instance."""
return self.children[1]
@config.setter
def config(self, value):
self.children[1] = value
class VarAssignmentsNode(Node):
"""Node representing zero or more variable assignments.
Corresponds to the :token:`varAssignments` grammar production.
"""
class AssignmentNode(Node):
"""Node representing a variable assignment.
Corresponds to the :token:`variableAssignment` grammar production.
"""
def __init__(self, lhs, rhs):
Node.__init__(self, [lhs, rhs])
def __repr__(self):
return "{}.{}({!r}, {!r})".format(__name__, type(self).__name__,
self.lhs, self.rhs)
# Convenience properties to access the left hand side, variable name and
# right hand side of the assignment
@property
def lhs(self):
""":class:`VariableNode` instance for the assigned-to variable."""
return self.children[0]
@lhs.setter
def lhs(self, value):
self.children[0] = value
@property
def name(self):
"""Name of the assigned-to variable."""
return self.children[0].name
@property
def rhs(self):
""":class:`Node` instance representing the expression that defines \
the variable."""
return self.children[1]
@rhs.setter
def rhs(self, value):
self.children[1] = value
def _truncateConfigLines(cfgLines):
l = []
for line in cfgLines:
# Truncate the config lines since they will all be printed on a
# single line...
if len(line) >= 15:
# 10 + len(" ...") = 15 - 1
l.append("{} ...".format(line[:10]))
else:
l.append(line)
return l
class ConfigNode(Node):
"""Node representing the default, unconditional section of the config \
file as well as the list of its conditional sections.
Corresponds to the :token:`config` grammar production.
"""
def __init__(self, defaultConfigLines=None, sections=None):
defaultConfigLines = \
defaultConfigLines if defaultConfigLines is not None else []
sections = sections if sections is not None else []
Node.__init__(self, sections)
#: List of unconditional configuration lines (:class:`str` instances)
self.defaultConfigLines = defaultConfigLines
def __eq__(self, other):
return type(self) == type(other) and \
self.children == other.children and \
self.defaultConfigLines == other.defaultConfigLines
def __repr__(self):
return "{}.{}({!r}, {!r})".format(__name__, type(self).__name__,
self.defaultConfigLines,
self.sections)
def __str__(self):
return "<{} {!r}>".format(type(self).__name__,
_truncateConfigLines(self.defaultConfigLines))
# Convenience property
@property
def sections(self):
"""List of :class:`SectionNode` instances."""
return self.children
@sections.setter
def sections(self, value):
self.children = value
class SectionNode(Node):
"""Node representing a conditional section of the config file.
Corresponds to the :token:`section` grammar production.
"""
def __init__(self, startToken, predicate, rawConfigLines):
Node.__init__(self, [predicate])
for n in ("startToken", "rawConfigLines"):
setattr(self, n, locals()[n])
def __eq__(self, other):
return type(self) == type(other) and \
self.children == other.children and \
self.startToken == other.startToken and \
self.rawConfigLines == other.rawConfigLines
def __repr__(self):
return "{}.{}({!r}, {!r}, {!r})".format(
__name__, type(self).__name__,
self.startToken, self.predicate, self.rawConfigLines)
def __str__(self):
return "<{} {!r}>".format(type(self).__name__,
_truncateConfigLines(self.rawConfigLines))
# Convenience property
@property
def predicate(self):
"""Abstract syntax tree representing the predicate.
This may be an :class:`OrTestNode`, an :class:`AndTestNode`, a
:class:`ListLiteralNode`, etc. In any case, it is an instance of
a :class:`Node` subclass.
"""
return self.children[0]
@predicate.setter
def predicate(self, value):
self.children[0] = value
class OrTestNode(Node):
"""Node representing a short-circuit, logical ``or`` test.
The semantics are the same as in Python, including the result of the
evaluation. cf.
https://docs.python.org/3/reference/expressions.html#boolean-operations
Corresponds to the :token:`orTest` grammar production.
"""
def eval(self, context):
"""Return the node value, after recursive evaluation of its children."""
for child in self.children:
res = child.eval(context)
if res:
break
return res
class AndTestNode(Node):
"""Node representing a short-circuit, logical ``and`` test.
The semantics are the same as in Python, including the result of the
evaluation. cf.
https://docs.python.org/3/reference/expressions.html#boolean-operations
Corresponds to the :token:`andTest` grammar production.
"""
def eval(self, context):
"""Return the node value, after recursive evaluation of its children."""
for child in self.children:
res = child.eval(context)
if not res:
break
return res
class NotTestNode(Node):
"""Node representing a logical ``not`` test.
Related to the :token:`notTest` grammar production.
"""
def __init__(self, child):
Node.__init__(self, [child])
def __repr__(self):
return "{}.{}({!r})".format(__name__, type(self).__name__, self.child)
def eval(self, context):
"""Return the node value, after recursive evaluation of its child."""
return not self.child.eval(context)
# Convenience property
@property
def child(self):
"""Child node of the :class:`NotTestNode` instance.
It is an instance of a :class:`Node` subclass.
"""
return self.children[0]
@child.setter
def child(self, value):
self.children[0] = value
class BinOpNodeBase(Node):
"""Base class for nodes having ``lOp`` and ``rOp`` properties."""
def __init__(self, opToken, lOp, rOp):
"""Initialize a :class:`BinOpNodeBase` instance.
*opToken* is the binary operator token instance (lexeme). It may
be used to obtain the start/end line and column numbers of the
binary operator, for instance.
*lOp* and *rOp* are the left and right operands and must be
instances of a subclass of :class:`Node`.
"""
Node.__init__(self, [lOp, rOp])
self.opToken = opToken
def __repr__(self):
return "{}.{}({!r}, {!r}, {!r})".format(
__name__, type(self).__name__, self.opToken, self.lOp, self.rOp)
# Convenience properties to access the left and right operands of
# the binary operator
@property
def lOp(self):
"""Left operand of the binary operator.
It is an instance of a subclass of :class:`Node`.
"""
return self.children[0]
@lOp.setter
def lOp(self, value):
self.children[0] = value
@property
def rOp(self):
"""Right operand of the binary operator.
It is an instance of a subclass of :class:`Node`.
"""
return self.children[1]
@rOp.setter
def rOp(self, value):
self.children[1] = value
class EqualsTestNode(BinOpNodeBase):
"""Node representing an ``==`` test.
Corresponds to the :token:`equalsTest` grammar production.
"""
def eval(self, context):
"""Return the node value, after recursive evaluation of its children."""
return self.lOp.eval(context) == self.rOp.eval(context)
class NotEqualsTestNode(BinOpNodeBase):
"""Node representing an ``!=`` test.
Corresponds to the :token:`notEqualsTest` grammar production.
"""
def eval(self, context):
"""Return the node value, after recursive evaluation of its children."""
return self.lOp.eval(context) != self.rOp.eval(context)
class InTestNode(BinOpNodeBase):
"""Node representing an ``in`` test.
Corresponds to the :token:`inTest` grammar production.
"""
def eval(self, context):
"""Return the node value, after recursive evaluation of its children."""
try:
res = self.lOp.eval(context) in self.rOp.eval(context)
except TypeError as e:
raise InTestTypeError(self.opToken, e) from e
return res
class VariableNode(Node):
"""Node representing a variable reference.
Corresponds to the :token:`variable` grammar symbol.
"""
def __init__(self, variableToken):
Node.__init__(self)
#: :class:`condconfigparser.lexer.VariableToken` instance
self.token = variableToken
#: Variable name (:class:`str`)
self.name = variableToken.string
def __eq__(self, other):
return type(self) == type(other) and \
self.token == other.token
def __repr__(self):
return "{}.{}({!r})".format(__name__, type(self).__name__, self.token)
def __str__(self):
return "<{} {!r}>".format(type(self).__name__, self.name)
def eval(self, context):
"""Return the node value, according to *context*.
The node value is the value of the variable whose name is given
by :attr:`name`, according to *context*.
"""
try:
return context[self.name]
except KeyError as e:
raise UndefinedVariable(self.name) from e
def undefVariables(self, varNames):
"""Return the set of undefined variables.
The set of undefined variables is determined under the
assumption that every variable with a name in *varNames* is
defined.
"""
if self.name in varNames:
return frozenset()
else:
return frozenset((self.token,))
class StringLiteralNode(Node):
"""Node representing a string literal.
Corresponds to the :token:`stringLiteral` grammar symbol.
"""
def __init__(self, value):
Node.__init__(self)
#: The actual string represented by the node (:class:`str` instance)
self.value = value
def __eq__(self, other):
return type(self) == type(other) and \
self.value == other.value
def __repr__(self):
return "{}.{}({!r})".format(__name__, type(self).__name__, self.value)
def __str__(self):
return "<{} {!r}>".format(type(self).__name__, self.value)
def eval(self, context):
"""Return the node value, which is the string literal represented by \
the node."""
return self.value
class BoolLiteralNode(Node):
"""Node representing a boolean literal (i.e., ``True`` or ``False``).
Corresponds to the :token:`boolLiteral` grammar symbol.
"""
def __init__(self, value):
Node.__init__(self)
#: The actual boolean represented by the node (:class:`bool` instance)
self.value = value
def __eq__(self, other):
return type(self) == type(other) and \
self.value == other.value
def __repr__(self):
return "{}.{}({!r})".format(__name__, type(self).__name__, self.value)
def __str__(self):
return "<{} {!r}>".format(type(self).__name__, self.value)
def eval(self, context):
"""Return the node value, which is the boolean literal represented by \
the node."""
return self.value
class ListLiteralNode(Node):
"""Node representing a list literal.
The elements of the list are represented by the :class:`Node`
instances forming the list referred to by the :attr:`children`
attribute (same order, of course).
Corresponds to the :token:`listLiteral` grammar symbol.
"""
def __init__(self, items):
Node.__init__(self, items)
def eval(self, context):
"""Return the node value, which is the list literal represented by \
the node."""
return [ item.eval(context) for item in self.children ]
class Parser:
"""Parser class of CondConfigParser.
This class implements a recursive descent parser that performs
look-ahead in order to avoid any need for backtracking. The
algorithm is typical of a LL(1) parser that does not use parse
tables.
For more information, you may refer to:
http://dickgrune.com/Books/PTAPG_1st_Edition/
"""
def __init__(self, lexer):
#: :class:`condconfigparser.lexer.Lexer` instance
self.lexer = lexer
#: Queue holding the tokens generated by the :attr:`lexer`
self.queue = collections.deque()
#: Token generator from :attr:`lexer`
self.tokenGenerator = lexer.tokenGenerator()
#: Last token popped from :attr:`queue`, i.e., consumed by the parser
self.lastToken = None
def enqueue(self, num):
"""Pull *num* tokens from :attr:`tokenGenerator`, pushing them into \
:attr:`queue`."""
while len(self.queue) < num:
try:
self.queue.append(next(self.tokenGenerator))
except StopIteration:
break
return len(self.queue)
def peekAt(self, index):
"""Look at an element of :attr:`queue` without consuming it.
If *index* is 0, look at the first unprocessed token in
:attr:`queue`. If *index* is 1, look at the next unprocessed
token, etc..
Return the looked-at token, or ``None`` if it does not exist.
"""
queueLen = self.enqueue(index+1)
if index < queueLen:
return self.queue[index]
else:
return None
def peekSeveral(self, num):
"""Return the topmost *num* tokens without consuming them.
Return a list of length at most *num*. If EOF is reached before
enough tokens can be read, the returned list will have less than
*num* elements.
If the returned list *l* has at least one element,
:samp:`{l}[0]` is the first unprocessed token; if it has at
least two elements, :samp:`{l}[1]` is the second unprocessed
token, etc.
"""
queueLen = self.enqueue(num)
return list(self.queue)[:num]
def readToken(self):
"""Read a token from :attr:`queue`.
If there are no tokens in :attr:`queue`, return ``None``.
Otherwise, consume one token from :attr:`queue`, save it in
:attr:`lastToken` for later reference and return it.
"""
if not self.enqueue(1):
return None # No more token (EOF)
else:
self.lastToken = self.queue.popleft()
return self.lastToken
def match1(self, tokenType):
"""Match one token of type *tokenType*.
Read a token from :attr:`queue`. If none is available or if the
token is not of type *tokenType*, raise :exc:`ParseError`.
Otherwise, return the token that was read.
"""
t = self.readToken()
if t is None: # EOF
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read a <{}> token"
.format(tokenType.name))
elif t.type != tokenType:
raise ParseError(self.lastToken.pos(),
"expected a <{}> token, but found a <{}> instead"
.format(tokenType.name, t.type.name))
return t
def matchZeroOrMore(self, tokenType):
"""Match zero or more tokens of type *tokenType*.
Return the list of matched tokens (the match is greedy).
"""
l = []
while True:
t = self.peekAt(0)
if t is None:
break # EOF
elif t.type == tokenType:
l.append(self.match1(tokenType))
else:
break
return l
def matchOneOrMore(self, tokenType):
"""Match one or more tokens of type *tokenType*.
Return the list of matched tokens (the match is greedy). Raise
:exc:`ParseError` if the first unprocessed token is not of type
*tokenType*.
"""
return [self.match1(tokenType)] + self.matchZeroOrMore(tokenType)
def root(self):
"""Match a :token:`root` production."""
t = self.peekAt(0)
if t is None: # EOF
return RootNode(VarAssignmentsNode(), ConfigNode()) # empty
elif t.type == TokenType.varAssignmentsStart:
assignments = self.varAssignments()
else:
assignments = VarAssignmentsNode() # empty
return RootNode(assignments, self.config())
def varAssignments(self):
"""Match a :token:`varAssignments` production."""
self.match1(TokenType.varAssignmentsStart)
a = self.varAssigs()
self.match1(TokenType.varAssignmentsEnd)
self.matchOneOrMore(TokenType.newline)
return a
def varAssigs(self):
"""Match a :token:`varAssigs` production."""
self.matchZeroOrMore(TokenType.newline)
assignments = VarAssignmentsNode()
while True:
t = self.peekAt(0)
if t is None:
break
elif t.type == TokenType.variable:
assignments.append(self.variableAssignment())
nextToken = self.peekAt(0)
if nextToken is not None and \
nextToken.type != TokenType.varAssignmentsEnd:
self.matchOneOrMore(TokenType.newline)
elif t.type == TokenType.varAssignmentsEnd:
break
else:
raise ParseError(self.lastToken.pos(),
"unexpected token while parsing a "
"<varAssigs>: {}".format(self.lastToken))
return assignments
def variableAssignment(self):
"""Match a :token:`variableAssignment` production."""
variable = VariableNode(self.match1(TokenType.variable))
self.match1(TokenType.assignOp)
return AssignmentNode(variable, self.orTest())
def config(self):
"""Match a :token:`config` production."""
defaultConfigLines = [ t.string for t in
self.matchZeroOrMore(TokenType.rawConfigLine) ]
sections = []
while True:
t = self.peekAt(0)
if t is None:
break
else:
sections.append(self.section())
return ConfigNode(defaultConfigLines, sections)
def section(self):
"""Match a :token:`section` production."""
startToken, predicate = self.predicate()
rawConfigLines = [ t.string for t in
self.matchZeroOrMore(TokenType.rawConfigLine) ]
return SectionNode(startToken, predicate, rawConfigLines)
def predicate(self):
"""Match a :token:`predicate` production."""
startToken = self.match1(TokenType.predicateStart)
ot = self.orTest()
self.match1(TokenType.predicateEnd)
self.matchOneOrMore(TokenType.newline)
return (startToken, ot)
def orTest(self):
"""Match an :token:`orTest` production."""
l = [self.andTest()]
while True:
t = self.peekAt(0)
if t is None or t.type != TokenType.orOp:
break
else:
self.match1(TokenType.orOp)
l.append(self.andTest())
return OrTestNode(l)
def andTest(self):
"""Match an :token:`andTest` production."""
l = [self.notTest()]
while True:
t = self.peekAt(0)
if t is None or t.type != TokenType.andOp:
break
else:
self.match1(TokenType.andOp)
l.append(self.notTest())
return AndTestNode(l)
def notTest(self):
"""Match a :token:`notTest` production."""
t = self.peekAt(0)
if t is None:
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read a <notTest>")
elif t.type == TokenType.notOp:
self.match1(TokenType.notOp)
node = NotTestNode(self.notTest())
else:
node = self.atomicBool()
return node
def atomicBool(self):
"""Match an :token:`atomicBool` production."""
# All derivations of <atomicBool> start with an <expr>. In order to
# avoid any backtracking, we have to factor it out: eat an <expr> now
# and maybe pass it as parameter to equalsTest(), notEqualsTest() or
# inTest(), depending on the token following the <expr>.
expr = self.expr()
t = self.peekAt(0)
if t is None:
return expr
elif t.type == TokenType.equalsOp:
return self.equalsTest(expr)
elif t.type == TokenType.notEqualsOp:
return self.notEqualsTest(expr)
elif t.type == TokenType.inOp:
return self.inTest(expr)
else:
return expr
# cf. comments in atomicBool() concerning the lOp parameter
def equalsTest(self, lOp):
"""Match an :token:`equalsTest` production."""
opToken = self.match1(TokenType.equalsOp)
rOp = self.expr()
return EqualsTestNode(opToken, lOp, rOp)
# cf. comments in atomicBool() concerning the lOp parameter
def notEqualsTest(self, lOp):
"""Match a :token:`notEqualsTest` production."""
opToken = self.match1(TokenType.notEqualsOp)
rOp = self.expr()
return NotEqualsTestNode(opToken, lOp, rOp)
# cf. comments in atomicBool() concerning the lOp parameter
def inTest(self, lOp):
"""Match an :token:`inTest` production."""
opToken = self.match1(TokenType.inOp)
rOp = self.expr()
return InTestNode(opToken, lOp, rOp)
def expr(self):
"""Match an :token:`expr` production."""
t = self.peekAt(0)
if t is None:
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read an <expr>")
elif t.type == TokenType.variable:
node = VariableNode(self.match1(TokenType.variable))
elif t.type == TokenType.openParen:
self.match1(TokenType.openParen)
node = self.orTest()
self.match1(TokenType.closeParen)
else:
node = self.literal()
return node
def literal(self):
"""Match a :token:`literal` production."""
t = self.peekAt(0)
if t is None:
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read a <literal>")
elif t.type == TokenType.stringLiteral:
return StringLiteralNode(
self.match1(TokenType.stringLiteral).value)
elif t.type == TokenType.listStart:
return self.listLiteral()
else:
return self.boolLiteral()
def boolLiteral(self):
"""Match a :token:`boolLiteral` production."""
t = self.peekAt(0)
if t is None:
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read a <boolLiteral>")
elif t.type == TokenType.true:
self.match1(TokenType.true)
return BoolLiteralNode(True)
else:
self.match1(TokenType.false)
return BoolLiteralNode(False)
def listLiteral(self):
"""Match a :token:`listLiteral` production."""
self.match1(TokenType.listStart)
items = []
# Set to False after the first element has been read
first = True
while True:
t = self.peekAt(0)
if t is None:
raise ParseError((self.lexer.line, self.lexer.col),
"EOF reached while trying to read a "
"<listLiteral>")
elif t.type == TokenType.listEnd:
self.match1(TokenType.listEnd)
break
elif first:
items.append(self.orTest())
first = False
else:
self.match1(TokenType.comma)
items.append(self.orTest())
return ListLiteralNode(items)
def buildTree(self):
"""Parse a complete configuration file.
Return the root node of the corresponding abstract syntax tree,
after simplification (cf. :meth:`Node.simplify`).
"""
rootNode = self.root()
return Node.simplify(rootNode, None, None) | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/resources/coverages/management/commands/grid.py |
from django.core.management.base import CommandError, BaseCommand
from django.db import transaction
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, SubParserMixIn
)
class Command(CommandOutputMixIn, SubParserMixIn, BaseCommand):
""" Command to manage grids. This command uses sub-commands for the
specific tasks: create, delete
"""
def add_arguments(self, parser):
create_parser = self.add_subparser(parser, 'create')
delete_parser = self.add_subparser(parser, 'delete')
for parser in [create_parser, delete_parser]:
parser.add_argument(
'name', nargs=1, help='The grid name'
)
create_parser.add_argument(
'coordinate_reference_system', nargs=1,
help=(
'The definition of the coordinate reference system. Either '
'an integer (the EPSG code), or the URL, WKT or XML definiton.'
)
)
create_parser.add_argument(
'--name', '--axis-name', '-n', dest='axis_names', default=[],
action='append',
help=(
'The name of one axis. Must be passed at least once and up to '
'four times.'
)
)
create_parser.add_argument(
'--type', '--axis-type', '-t', dest='axis_types', default=[],
action='append',
choices=[choice[1] for choice in models.Grid.AXIS_TYPES],
help=(
'The type of one axis. Must be passed at least once and up to '
'four times.'
)
)
create_parser.add_argument(
'--offset', '--axis-offset', '-o', dest='axis_offsets', default=[],
action='append',
help=(
'The offset for one axis. Must be passed at least once and up '
'to four times.'
)
)
@transaction.atomic
def handle(self, subcommand, name, *args, **kwargs):
""" Dispatch sub-commands: create, delete.
"""
name = name[0]
if subcommand == "create":
self.handle_create(name, *args, **kwargs)
elif subcommand == "delete":
self.handle_delete(name, *args, **kwargs)
def handle_create(self, name, coordinate_reference_system, **kwargs):
""" Handle the creation of a new product
"""
axis_names = kwargs['axis_names']
axis_types = kwargs['axis_types']
axis_offsets = kwargs['axis_offsets']
if not axis_names:
raise CommandError('Must supply at least one axis definition.')
if len(axis_types) != len(axis_names):
raise CommandError(
'Invalid number of axis-types supplied. Expected %d, got %d.'
% (len(axis_names), len(axis_types))
)
if len(axis_offsets) != len(axis_names):
raise CommandError(
'Invalid number of axis-offsets supplied. Expected %d, got %d.'
% (len(axis_names), len(axis_offsets))
)
if len(axis_names) > 4:
raise CommandError('Currently only at most four axes are supported.')
type_name_to_id = dict(
(name, id_) for id_, name in models.Grid.AXIS_TYPES
)
iterator = enumerate(zip(axis_names, axis_types, axis_offsets), start=1)
definition = {
'name': name,
'coordinate_reference_system': coordinate_reference_system[0]
}
for i, (name, type_, offset) in iterator:
definition['axis_%d_name' % i] = name
definition['axis_%d_type' % i] = type_name_to_id[type_]
definition['axis_%d_offset' % i] = offset
models.Grid.objects.create(**definition)
def handle_delete(self, name, **kwargs):
""" Handle the deregistration a product
"""
try:
models.Grid.objects.get(name=name).delete()
except models.Grid.DoesNotExist:
raise CommandError('No such Grid %r' % name) | PypiClean |
/NewsLookout-2.1.0-py3-none-any.whl/newslookout/plugins/mod_in_gdelt.py |
##########################################################################################################
# File name: mod_in_gdelt.py #
# Application: The NewsLookout Web Scraping Application #
# Date: 2021-06-23 #
# Purpose: Plugin for GDELT news aggregator #
# Copyright 2021, The NewsLookout Web Scraping Application, Sandeep Singh Sandhu, sandeep.sandhu@gmx.com #
# #
# #
# Notice: #
# This software is intended for demonstration and educational purposes only. This software is #
# experimental and a work in progress. Under no circumstances should these files be used in #
# relation to any critical system(s). Use of these files is at your own risk. #
# #
# Before using it for web scraping any website, always consult that website's terms of use. #
# Do not use this software to fetch any data from any website that has forbidden use of web #
# scraping or similar mechanisms, or violates its terms of use in any other way. The author is #
# not liable for such kind of inappropriate use of this software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR #
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE #
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR #
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
##########################################################################################################
# import standard python libraries:
import logging
# import web retrieval and text processing python libraries:
import os
from datetime import datetime
import pandas as pd
import zipfile
from io import BytesIO
import scraper_utils
from data_structs import PluginTypes
from base_plugin import BasePlugin
from scraper_utils import deDupeList
##########
from session_hist import SessionHistory
logger = logging.getLogger(__name__)
class mod_in_gdelt(BasePlugin):
""" Web Scraping plugin: mod_in_gdelt
Description: GDELt news aggregator
Language: English
Country: India
"""
# define a minimum count of characters for text body, article content below this limit will be ignored
minArticleLengthInChars = 400
# implies web-scraper for news content, see data_structs.py for other types
pluginType = PluginTypes.MODULE_NEWS_AGGREGATOR
# main webpage URL
mainURL = "http://data.gdeltproject.org/events/YYYYMMDD.export.CSV.zip"
mainURLDateFormatted = "http://data.gdeltproject.org/events/%Y%m%d.export.CSV.zip"
# RSS feeds to pick up latest news article links
all_rss_feeds = ["http://data.gdeltproject.org/events/index.html"]
# fetch only URLs containing the following substrings:
validURLStringsToCheck = []
# this list of URLs will be visited to get links for articles,
# but their content will not be scraped to pick up news content
nonContentURLs = [
mainURL
]
nonContentStrings = []
# never fetch URLs containing these strings:
invalidURLSubStrings = []
# write regexps in three groups ()()() so that the third group
# gives a unique identifier such as a long integer at the end of a URL
# this third group will be selected as the unique identifier:
urlUniqueRegexps = [r"(^http\://data.gdeltproject.org\/)(events\/)([0-9]+)(\.export\.CSV\.zip$)"]
# write the following regexps dict with each key as regexp to match the required date text,
# group 2 of this regular expression should match the date string
# in this dict, put the key will be the date format expression
articleDateRegexps = {}
invalidTextStrings = []
allowedDomains = ['data.gdeltproject.org']
articleIndustryRegexps = []
authorRegexps = []
# members used by functions of the class:
authorMatchPatterns = []
urlMatchPatterns = []
dateMatchPatterns = dict()
listOfURLS = []
# --- Methods to be implemented ---
def __init__(self):
""" Initialize the object
Use base class's lists and dicts in searching for unique url and published date strings
"""
self.articleDateRegexps.update(super().articleDateRegexps)
self.urlUniqueRegexps = self.urlUniqueRegexps + super().urlUniqueRegexps
super().__init__()
def getURLsListForDate(self, runDate: datetime, sessionHistoryDB: SessionHistory) -> list:
""" Extract article list from the main URL.
Since this is only a news aggregator, sets the plugin state to PluginTypes.STATE_STOPPED
at the end of this method.
:param sessionHistoryDB: Not used in this function
:param runDate: Given query date for which news article URLs are to be retrieved
:type runDate: datetime.datetime
:return: List of URLs identified from this news source
:rtype: list
"""
urlList = []
try:
searchResultsURLForDate, dataDirForDate = self.prepare_url_datadir_for_date(runDate)
if searchResultsURLForDate is not None:
logger.debug('Downloading file from URL: %s', searchResultsURLForDate)
csv_zip = self.downloadDataArchive(searchResultsURLForDate, self.pluginName)
csv_files = mod_in_gdelt.extract_csvlist_from_archive(csv_zip, dataDirForDate)
for csv_filename in csv_files:
logger.debug("Expanded the fetched Zip archive to: %s", csv_filename)
url_items = mod_in_gdelt.extract_urls_from_csv(csv_filename, country_code='IN')
urlList = urlList + url_items
urlList = deDupeList(urlList)
logger.info("Added %s URLs from aggregated news from %s", len(urlList), searchResultsURLForDate)
except Exception as e:
logger.error("%s: When Extracting URL list from main URL, error was: %s",
self.pluginName, e)
self.pluginState = PluginTypes.STATE_STOPPED
return(urlList)
def prepare_url_datadir_for_date(self, rundate_obj: datetime) -> tuple:
""" Prepare URL from given Date.
:param rundate_obj: Date for the URL
:return: A tuple with the URL and the destination directory to save the data for this date.
"""
url_prepared_for_date = None
prevDay = scraper_utils.getPreviousDaysDate(rundate_obj)
prevToPrevDay = scraper_utils.getPreviousDaysDate(prevDay)
if 'mainURLDateFormatted' in dir(self) and self.mainURLDateFormatted is not None:
url_prepared_for_date = prevToPrevDay.strftime(self.mainURLDateFormatted)
dataDirForDate = BasePlugin.identifyDataPathForRunDate(self.app_config.data_dir,
prevToPrevDay)
return(url_prepared_for_date, dataDirForDate)
@staticmethod
def extract_csvlist_from_archive(archive_bytes: bytes, dataDirForDate: str) -> list:
""" Extract CSV file from compressed archive file
:param archive_bytes: bytes from the compressed archive downloaded from the website
:param dataDirForDate: Data directory where archive would be expanded into
:return: a list of CSV filenames extracted from the archive
"""
list_of_files = []
filebytes = BytesIO(archive_bytes)
zipDatafile = zipfile.ZipFile(filebytes, mode='r')
# unzip csv data, write to file:
for memberZipInfo in zipDatafile.infolist():
zipDatafile.extract(memberZipInfo, path=dataDirForDate)
csv_filename = os.path.join(dataDirForDate, memberZipInfo.filename)
logger.debug(f"Expanded the Zip archive to file: {csv_filename}")
list_of_files.append(csv_filename)
zipDatafile.close()
return(list_of_files)
@staticmethod
def extract_urls_from_csv(csv_filename: str, country_code='IN') -> list:
""" Extract URL list from CSV file
:param csv_filename: file to read from
:param country_code: ISO 2-character country code to filter news
:return: List of relevant URLs extracted from CSV file
"""
urlList = []
# load csv file in pandas:
# Columns (14,24) have mixed types. Specify dtype option on import or set low_memory=False.
urlDF = pd.read_csv(csv_filename, delimiter='\t', header=None, low_memory=False)
# filter and identify URLs for india:
# column 51 is country, column 57 is URL
for item in urlDF[urlDF.iloc[:, 51] == country_code].iloc[:, 57].values:
# put urls into list:
urlList.append(item.strip())
# delete csv file:
os.remove(csv_filename)
return(deDupeList(urlList))
# # end of file ## | PypiClean |
/DeepSpectrumLite-1.0.2.tar.gz/DeepSpectrumLite-1.0.2/README.md | 

[](https://badge.fury.io/py/DeepSpectrumLite)

**DeepSpectrumLite** is a Python toolkit to design and train light-weight Deep Neural Networks (DNNs) for classification tasks from raw audio data.
The trained models run on embedded devices.
DeepSpectrumLite features an extraction pipeline that first creates visual representations for audio data - plots of spectrograms.
The image plots are then fed into a DNN. This could be a pre-trained Image Convolutional Neural Network (CNN).
Activations of a specific layer then form the final feature vectors which are used for the final classification.
The trained models can be easily converted to a TensorFlow Lite model. During the converting process, the model becomes smaller and faster optimised for inference on embedded devices.
**(c) 2020-2021 Shahin Amiriparian, Tobias Hübner, Maurice Gerczuk, Sandra Ottl, Björn Schuller: Universität Augsburg**
Published under GPLv3, please see the `LICENSE` file for details.
Please direct any questions or requests to Shahin Amiriparian (shahin.amiriparian at informatik.uni-augsburg.de) or Tobias Hübner (tobias.huebner at informatik.uni-augsburg.de).
# Citing
If you use DeepSpectrumLite or any code from DeepSpectrumLite in your research work, you are kindly asked to acknowledge the use of DeepSpectrumLite in your publications.
> S. Amiriparian, T. Hübner, M. Gerczuk and S. Ottl and B. W. Schuller, “DeepSpectrumLite: A Power-Efficient Transfer Learning Framework for Embedded Speech and Audio Processing from Decentralised Data,” 2021. [https://arxiv.org/abs/2104.11629](https://arxiv.org/abs/2104.11629)
```
@misc{amiriparian2021deepspectrumlite,
title={DeepSpectrumLite: A Power-Efficient Transfer Learning Framework for Embedded Speech and Audio Processing from Decentralised Data},
author={Shahin Amiriparian and Tobias Hübner and Maurice Gerczuk and Sandra Ottl and Björn W. Schuller},
year={2021},
eprint={2104.11629},
archivePrefix={arXiv},
primaryClass={cs.SD}
}
```
# Why DeepSpectrumLite?
DeepSpectrumLite is built upon TensorFlow Lite which is a specialised version of TensorFlow that supports embedded devices.
However, TensorFlow Lite does not support all basic TensorFlow functions for audio signal processing and plot image generation. DeepSpectrumLite offers implementations for unsupported functions.
# Installation
You can install DeepSpectrumLite from PiPy.
```bash
pip install deepspectrumlite
```
Alternatively, you can clone this repository and install it from there:
```bash
git clone https://github.com/DeepSpectrum/DeepSpectrumLite.git
cd DeepSpectrumLite
```
## Virtual environment
We highly recommend you to create a virtual environment:
```bash
python -m venv ./venv
source ./venv/bin/activate
pip install .
```
## Conda environment
If you have Conda installed, you can create and install a environment from the included "environment.yml".
```bash
conda env create -f environment.yml
conda activate ./env
```
## GPU support
DeepSpectrumLite uses TensorFlow 2.4.0. GPU support should be automatically available, as long as you have CUDA version 11.0. If you cannot install cuda 11.0 globally, you can use Anaconda to install it in a virtual environment along DeepSpectrumLite.
# Getting started
## Training
To train a model use the following command and pass the path to your data directory (structured as above):
```bash
deepspectrumlite train -d [path/to/data] -md [save/models/to] -hc [path/to/hyperparameter_config.json] -cc [path/to/class_config.json] -l [path/to/labels.csv]
```
For a full rundown of all commandline arguments, call `deepspectrumlite train --help`.
Other training parameters including label parser file, problem definition, audio signal preprocessing, model configuration are defined in the hyperparameter config json file.
## Test
If you want to test your .h5 model against a specific audio .wav file, you can call `cli.test`:
```bash
deepspectrumlite predict -md [path/to/model.h5] -d [path/to/*.wav] -hc [path/to/hyperparameter_config.json] -cc [path/to/class_config.json]
```
### Slurm Job Array
DeepSpectrumLite supports training over a slurm job array. This is helpful when applying a grid search.
When you call the train process within your slurm job array, our system retrieves the `SLURM_ARRAY_TASK_ID` environment variable that is automatically set by slurm.
Each job array `i` trains the i'th combination of your grid. If you have a grid containing 24 combinations of parameters, you can define your slurm job as follows:
```bash
#SBATCH --partition=dfl
#SBATCH --mem=8000
#SBATCH --ntasks=1
#SBATCH --get-user-env
#SBATCH --export=ALL
#SBATCH -o slurm-%A_%a.txt
#SBATCH -J myjob
#SBATCH --cpus-per-task=2
#SBATCH --array=0-23
deepspectrumlite train
```
This script will create 24 independent training instances where job 0 trains the first combination, job 1 trains the second combination etc.
## Statistics about a model
If you want to check out the parameter quantity and FLOPS your .h5 model you can call `cli.stats`:
```bash
deepspectrumlite stats -d [path/to/model.h5] -hc [path/to/hyperparameter_config.json]
```
## Convert .h5 to .tflite
If you want to convert your trained model to TensorFlow Lite, use `cli.convert`:
```bash
deepspectrumlite convert -s [path/to/model.h5] -d [path/to/target/model.tflite]
```
## Configuration
Example configuration files are in the `config` directory.
## Hyper Parameter Configuration Grid
The hyper parameter configuration grid is defined in a json file. You can add more than configuration values for a variable. DeepSpectrumLite creates a grid of all possible combinations of variable values.
### Model Configurations
| Variable | Type | Description | Required | Default value |
|-----------------|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------|
| label_parser | string | The parser python file for the labels. The python file path is followed by a `:` and the class name. For example: `../lib/data/parser/ComParEParser.py:ComParEParser` | true | |
| prediction_type | enum ['categorical', 'regression'] | Define whether you have a categorical or a regression problem to solve. | false | categorical |
| model_name | string | The model class that is used for training. For Transfer Learning use 'TransferBaseModel' | false | TransferBaseModel |
| basemodel_name | string | The base model name that is used for training. Available models: `vgg16`, `vgg19`, `resnet50`, `xception`, `inception_v3`, `densenet121`, `densenet169`, `densenet201`, `mobilenet`, `mobilenet_v2`, `nasnet_large`, `nasnet_mobile`, `inception_resnet_v2`, `squeezenet_v1` | true | |
| weights | enum ['imagenet', ''] | If set to 'imagenet', the base model defined in basemodel_name uses weights from pre-trained on imagenet. Otherwise, the model defined in basemodel_name has random weights. | false | imagenet |
| tb_experiment | string | The name of the tensorboard dashboard. The name is used a directory. | true | |
| tb_run_id | string | The name of this experiment setting for the tensorboard dashboard. The name is used a subdirectory. You can define a generic tb_experiment which uses different runs with different configuration settings. When having more than one configuration within a grid, the tb_run_id is automatically extended by `_config_[NUMBER]`.| true | |
| num_unit| int | The number of units that are used for dense layer in the final MLP classifier. | true | |
| dropout | float | The rate of dropout applied after the dense layer and the final prediction layer. | true | |
| optimizer | string | The optimizer for the training process. Supported optimizers: `adam`, `adadelta`, `sgd`, `adagrad`, `ftrl`, `nadam`, `rmsprop`, `sgd` | true | |
| learning_rate | float | The initial learning rate of the optimizer. | true | |
| fine_learning_rate | float | The learning rate that is set after `pre_epochs`. This is only supported when model_name='TransferBaseModel' and pre_epochs>0 and weighted='imagenet' and fine_learning_rate>0.0 | false | |
| loss | string | The loss function that is used for training. All TensorFlow loss functions are supported. | true | |
| activation | string | The activation function that is used in the dense layers in the final MLP classifier. All activation functions from TensorFlow and "arelu" are supported. | true | |
| pre_epochs | int | The number of epochs of training. When the model_name is 'TransferBaseModel', the pre_epochs defines how long the base model is trained in a frozen state. After reaching the pre_epochs, the model the last layers (share defined in 'finetune_layer') are unfrozen and trained again for 'epochs' epochs. | true | |
| finetune_layer | float | The amount of layers (share in percent) of the last layers of the base model that are unfrozen after pre_epochs | true | |
| batch_size | int | The batch size of the training | true | |
### Preprocessing Configurations
| Variable | Type | Description | Required | Default value |
|-----------------|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------|
| sample_rate | int | The sample rate of the audio files (in samples per seconds). | true | |
| chunk_size | float | The desired chunk size of the audio files (in seconds). All audio files are split into chunks before spectrograms are calculated. | true | |
| chunk_hop_size | float | The hop size of the audio files (in seconds). | true | |
| stft_window_size | float | The window size used for the Short-Time Fourier Transform (STFT) (in seconds). | true | |
| stft_hop_size | float | The hop size used for the STFT (in seconds). | true | |
| stft_fft_length | float | The FFT length used for the STFT (in seconds). | true | |
| mel_scale | bool | If enable, mel spectrograms are calculated. Otherwise, power spectrograms are used. | true | |
| lower_edge_hertz | float | The lower bound of the frequency range. (in Hz) | true | |
| upper_edge_hertz | float | The upper bound of the frequency range. (in Hz) | true | |
| num_mel_bins | int | The number of mel bins used for the mel spectrogram generation | false | |
| num_mfccs | int | The number of mfcc bins. When num_mfccs is set to 0, no mfcc bins are generated and the pure mel spectrograms are used instead. | false | |
| cep_lifter | int | The number of frequencs for the cepstral lift. If set to 0, no cepstral lift is applied. | true | |
| db_scale | bool | When enabled, the spectrogram is scaled to the dB scale instead of the power scale. | true | |
| use_plot_images | bool | When enabled, the spectrogram values are plotted using the color map defined in color_map. These plot images are then used for training. When set to false, the pure spectrogram values are fed to the network. | true | |
| color_map | string | The color map used for plotting spectrograms. Available color maps: `viridis`, `cividis`, `inferno`, `magma`, `plasma`. | true | |
| image_width | int | The width of the spectrogram plot images (in px). | true | |
| image_height | int | The height of the spectrogram plot images (in px). | true | |
| anti_alias | bool | Enable anti alias when resizing the image plots. | false | true |
| resize_method | string | The resize method when resizing image plots. Supported methods: `bilinear`, `lanczos3`, `bicubic`, `gaussian`, `nearest`, `area`, `mitchellcubic` | false | bilinear |
### Data Augmentation Configuration
| Variable | Type | Description | Required | Default value |
|-----------------|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------|
| sap_aug_a | float | The a value of the SapAugment policy. Please check the SapAugment paper for details. | true | |
| sap_aug_s | float | The s value of the SapAugment policy. Please check the SapAugment paper for details. | true | |
| augment_cutmix | bool | Enable CutMix data augmentation. | true | |
| augment_specaug | bool | Enable SpecAugment data augmentation. | true | |
| da_prob_min | float | The minimum probability that data augmentation if applied at all. The probability depends on the lambda value from SapAugment. | false | |
| da_prob_max | float | The maximum probability that data augmentation if applied at all. The probability depends on the lambda value from SapAugment. | false | |
| cutmix_min | float | The minimum width of a squared frame that is used from another sample of the same batch. The concrete size depends on the lambda value from SapAugment. | false | |
| cutmix_max | float | The maximum width of a squared frame that is used from another sample of the same batch. The concrete size depends on the lambda value from SapAugment. | false | |
| specaug_freq_min | float | The minimum width of **all** SpecAugment frequency masks. If you have more than one mask, the single mask width is divided by the number of masks. Therefore, this variable defines the overall share of augmentation that is applied by SpecAugment. The concrete size depends on the lambda value from SapAugment. | false | |
| specaug_freq_max | float | The maximum width of **all** SpecAugment frequency masks. If you have more than one mask, the single mask width is divided by the number of masks. Therefore, this variable defines the overall share of augmentation that is applied by SpecAugment. The concrete size depends on the lambda value from SapAugment. | false | |
| specaug_time_min | float | The minimum width of **all** SpecAugment time masks. If you have more than one mask, the single mask width is divided by the number of masks. Therefore, this variable defines the overall share of augmentation that is applied by SpecAugment. The concrete size depends on the lambda value from SapAugment. | false | |
| specaug_time_max | float | The maximum width of **all** SpecAugment time masks. If you have more than one mask, the single mask width is divided by the number of masks. Therefore, this variable defines the overall share of augmentation that is applied by SpecAugment. The concrete size depends on the lambda value from SapAugment. | false | |
| specaug_freq_mask_num | int | The number of SpecAugment frequency masks that are added. | false | |
| specaug_time_mask_num | int | The number of SpecAugment time masks that are added. | false | |
Example file:
```json
{
"label_parser": ["../lib/data/parser/ComParEParser.py:ComParEParser"],
"model_name": ["TransferBaseModel"],
"prediction_type": ["categorical"],
"basemodel_name": ["densenet121"],
"weights": ["imagenet"],
"tb_experiment": ["densenet_iemocap"],
"tb_run_id": ["densenet121_run_0"],
"num_units": [512],
"dropout": [0.25],
"optimizer": ["adadelta"],
"learning_rate": [0.001],
"fine_learning_rate": [0.0001],
"finetune_layer": [0.7],
"loss": ["categorical_crossentropy"],
"activation": ["arelu"],
"pre_epochs": [40],
"epochs": [100],
"batch_size": [32],
"sample_rate": [16000],
"normalize_audio": [false],
"chunk_size": [4.0],
"chunk_hop_size": [2.0],
"stft_window_size": [0.128],
"stft_hop_size": [0.064],
"stft_fft_length": [0.128],
"mel_scale": [true],
"lower_edge_hertz": [0.0],
"upper_edge_hertz": [8000.0],
"num_mel_bins": [128],
"num_mfccs": [0],
"cep_lifter": [0],
"db_scale": [true],
"use_plot_images": [true],
"color_map": ["viridis"],
"image_width": [224],
"image_height": [224],
"resize_method": ["nearest"],
"anti_alias": [false],
"sap_aug_a": [0.5],
"sap_aug_s": [10],
"augment_cutmix": [true],
"augment_specaug": [true],
"da_prob_min": [0.1],
"da_prob_max": [0.5],
"cutmix_min": [0.075],
"cutmix_max": [0.25],
"specaug_freq_min": [0.1],
"specaug_freq_max": [0.3],
"specaug_time_min": [0.1],
"specaug_time_max": [0.3],
"specaug_freq_mask_num": [4],
"specaug_time_mask_num": [4]
}
```
## Class Configuration
The class configuration is defined in a json file. The indices define the internal variable of the class and the value defined the output name.
Example file for categorical problems:
```json
{
"negative": "Negative",
"positive": "Positive"
}
```
Example file for regression problems:
```json
{
"a": "Arousal"
}
```
| PypiClean |
/NBT-1.5.1.tar.gz/NBT-1.5.1/doc/changelog.rst | Changelog
=========
NBT currently only uses major and minor releases. Patch versions exist as
commits in the master trunk, but are not enumerated.
NBT Trunk
---------
Git trunk can be found at https://github.com/twoolie/NBT/tree/master
Bug Fixes since 1.5.1
~~~~~~~~~~~~~~~~~~~~~
* None
Known Bugs
~~~~~~~~~~
See https://github.com/twoolie/NBT/issues
* It is posible to access the NBT structure of any world folder, including
McRegion and Anvil worlds. However, chunk specifics (such as the location
of blocks in the NBT structure) are only available for McRegion, not for
Anvil.
* The name of a variable generally only supports 2-byte Unicode characters (the
Basic Multilingual Plane). For Full Unicode support, use Python 3.3 or higher,
or compile Python --with-wide-unicode.
NBT 1.5.1 (22 December 2021)
---------------------------
New Features since 1.5.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Generic block class in chunck.py (courtesy mfld-fr).
* chunk.py supports data version 1343 (Minecraft 1.12) (courtesy Steffen Kieß).
* chunk.py supports data version 1631 (Minecraft 1.16) (courtesy Steffen Kieß).
* chunk.py supports data version 2566 (Minecraft 1.16) (courtesy Marius Steffens).
Bug Fixes since 1.5.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Support for Python version 3.10 (courtesy einekratzekatze).
* Bug fix in getting BlockStates in old Anvil format (courtesy mkuw).
* Bug fix in reloading closed region files (courtesy Sylvain Leroux).
* Allow naming of Compund TAGs (courtesy Sylvain Leroux).
Changes in Auxiliary Scripts since 1.5.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Add examples/scoreboard.py script (courtesy mfld-fr).
* Add examples/block_finder.py script (courtesy jlsajfj).
NBT 1.5.0 (14 May 2018)
---------------------------
New Features since 1.4.1
~~~~~~~~~~~~~~~~~~~~~~~~
* Support for Long Arrays (used in Minecraft 1.13 and higher) (#95)
Bug Fixes since 1.4.1
~~~~~~~~~~~~~~~~~~~~~~~~
* Faster reading chunks with corrupt header. (#76)
Changes in Auxiliary Scripts since 1.4.1
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Add examples/player_print.py script (courtesy k1988).
* Automatic testing now also runs Python 3.4, 3.5, 3.6 and pypy3.
* Review and minor improvements of tests.
* PEP8-compatibility improvements of the code (courtesy suresttexas00)
NBT 1.4.1 (27 October 2013)
---------------------------
New Features since 1.4.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Change indentation from tabs to spaces.
NBT 1.4.0 (27 October 2013)
---------------------------
New Features since 1.3.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Added documentation.
* WorldFolder.iter_chunks() returns Chunk subclass (McRegionChunk / AnvilChunk)
* Add exception when opening files too small to be a region file.
* Examples/map.py example now works with Python 3 as well.
The recommended library is Pillow, a fork of PIL that supports Python 3.
* Rewrote chunk writing algorithm in nbt.region, and added lots of code checks
to verify that it never overwrite chunks.
* Support writing to corrupt region files, working around corrupt parts.
* Support reading uncompressed chunks in region files.
* Added detection for overlapping chunks in region files.
* Added RegionFileFormatError exception.
* Allow easy iteration over chunks in a RegionFile:
`for chunk in RegionFile(filename)`
* RegionFile.iter_chunks() now silently ignores unreadable chunks.
* Better display of filenames in NBTFile and RegionFiles when initialised with
a fileobject.
* Truncate region file size when possible.
* Add RegionFile.get_chunk_metadata() method.
* Expose more detailed read and write methods in RegionFile: get_blockdata(),
get_nbt(), get_chunk(), write_blockdata(), write_chunk().
Bug Fixes since 1.3.0
~~~~~~~~~~~~~~~~~~~~~
* generate_heightmap now ignores non-solid blocks (such as tall grass).
* Fix behavior of `__delitem__` in TAG_list and TAG_Compound.
* Fix infinite loop while writing a chunk changing the way in which free
space is searched in the region file.
* Fixed a bug that sometimes made write chunks in the region file header.
* Fixed a bug that corrupted the file when writing a chunk that was between
4093 and 4096 bytes after compression.
* Now possible to write and immediately read chunks in region files.
* Allow increase in region file size.
* Allow trailing slash in world folder in example scripts
* Replace all `import *` with specific imports.
* Support for (empty) TAG_Lists with TAG_End objects.
Changes in Auxiliary Scripts since 1.3.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Automatic testing now also runs example scripts
* Automatic testing now also runs Python 3.3
* Code for automatic documentation generation forked in a seperate package,
sphinxcontrib-restbuilder.
* Automatic testing for Python 2.6 now requires unittest2 package.
* Documented automatic code generation and simplified Makefile.
Backward Incompatible Changes since 1.3.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* WorldFolder is no longer a class, but a factory function that returns
(a subclass of) a _BaseWorldFolder instance.
* The fileobj parameter in `RegionFile(fileobj)` is no longer closed
(similar to the behaviour of e.g. GZipFile). It is the callers
responsibility to close these files.
* RegionFile.get_chunk() raises InconceivedChunk when a chunk does not exist
instead of returning None.
* Exceptions raised while reading chunks are always a RegionFileFormatError or
subclass thereof. GZip, zlib and nbt.MalformedFileError are no longer raised.
* init_header(), parse_header() and parse_chunk_headers() in RegionFile are no
longer public methods.
Deprecated features since 1.3.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Constants in nbt.region moved to module level. They are still available in
the class for backwards compatibility.
* `nbt.region.RegionFile.header` and `nbt.region.RegionFile.chunk_headers` are
deprecated in favour of `nbt.region.RegionFile.metadata`. They are still
available for backward compatibility.
* Deprecate `RegionFile.get_chunks()` and `RegionFile.get_chunk_coords()` in
favour of `RegionFile.get_metadata()`.
* RegionFile.get_chunk() method may later be changed to return a Chunk() object.
Use RegionFile.get_nbt() to retain the current behaviour.
NBT 1.3.0 (19 March 2012)
-------------------------
New Features since 1.2.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Python 3 support
* NBT_Tag objects behave like native Python objects
- TAG_Byte_Array, TAG_Int_Array and TAG_List are now a MutableSequence
- TAG_Compound is now a MutableMapping
- TAG_String is now a Sequence
* Improved printing of TAGs (`__str__` and `__repr__`) for easier debugging
* Added examples script for listing mobs, listing chest content, display
world seed, and counting Biome data
* Block analysis example takes data value of blocks into account
* Subclass of Worldfolder: McRegionWorldfolder and AnvilWorldFolder
* Added iterator functions: iter_chunks, iter_nbt, iter_regions in
WorlFolder and iter_nbt in RegionFile
* Move unit tests and sample file to tests directory
Bug Fixes since 1.2.0
~~~~~~~~~~~~~~~~~~~~~
* Travis (automatic testing) support
* Test file is no longer overwritten.
* Consistent Unix line-endings and tabs for indentation
* raise InconceivedChunk if a requested chunk was not yet generated
* Can instantiate a RegionFile without associating it with an existing file
* Use sysexit error codes instead of syserror codes in example scripts
Backward Incompatible Changes since 1.2.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Dropped support for Python 2.4 and 2.5
* Use native Python bytearray() to store TAG_Byte_Array().value, instead of
string, list or array.array
* NBT now expects Unicode instances for strings (e.g. for name in TAGs and
keys in TAG_Compound), while it expects bytes (or BytesIO) for byte
arrays and buffers.
* Instantiating a WorldFolder now returns either a McRegionWorldfolder or
AnvilWorldFolder
NBT 1.2.0 (7 March 2012)
------------------------
New Features since 1.1.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Support for TAG_Int_Array (required for Minecraft Anvil worlds)
* 15x Speed improvement of `BlockArray.__init__` in nbt.chunk
* Initial support for world folders: world.py
* Examples can be executed in-place, without installing NBT
* Map example prints entire world (only works for McRegion worlds)
Bug Fixes since 1.1.0
~~~~~~~~~~~~~~~~~~~~~
* Support for data bits (this was previously broken)
* Region file checks for inconsistent chunk lengths (this may detect
truncated region files)
* TAG_List behave like a Python list (is iterable and has a length)
Backward Incompatible Changes since 1.1.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Method `RegionFile.get_chunks()` is deprecated in favour of
`RegionFile.get_chunk_coords()`
NBT 1.1.0 (23 September 2011)
-----------------------------
New Features since 1.0.0
~~~~~~~~~~~~~~~~~~~~~~~~
* Region file support
* Chunk convenience class
* Example scripts for block analysis and level metadata generation
Bug Fixes since 1.0.0
~~~~~~~~~~~~~~~~~~~~~
* Allow reading and writing on the same NBTFile object
* Same init function for TAG_Byte_Array as other classes
* Unit tests for NBT class
Backward Incompatible Changes since 1.0.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Change order of `TAG_Byte_Array.__init__()` parameters
NBT 1.0.0 (28 February 2011)
----------------------------
* First stable release
* Reads and Parses NBT files
* Generates and Writes NBT files
* Reads and writes GZipped NBT files or uncompressed File objects
NBT 0.9.0 (15 December 2010)
----------------------------
See https://github.com/twoolie/NBT/tree/fe3467fec6d18a6445bc850e9386e1be9e4e1299
NBT 0.8.0 (27 November 2010)
----------------------------
See https://github.com/twoolie/NBT/tree/67e5f0acdad838e4652d68e7342c362d786411a0
NBT 0.7.0 (2 November 2010)
----------------------------
See https://github.com/twoolie/NBT/tree/8302ab1040fca8aabd4cf0ab1f40105889c24464
NBT 0.6.0 (29 October 2010)
----------------------------
See https://github.com/twoolie/NBT/tree/0f0cae968f1fc2d5e5f2cabb37f79bb7910ca7e3
NBT 0.5.0 (8 August 2010)
----------------------------
See https://github.com/twoolie/NBT/tree/7d289f0cc4cf91197108569ba361cff934ebaf38
* First public release
* Pre-release (not stable yet)
| PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-fileinput/themes/explorer-fas/theme.js | (function ($) {
'use strict';
$.fn.fileinput.defaults.theme = 'explorer-fas';
$.fn.fileinputThemes['explorer-fas'] = {
layoutTemplates: {
footer: '<div class="file-details-cell">' +
'<div class="explorer-caption" title="{caption}">{caption}</div> ' + '{size}{progress}' +
'</div>' +
'<div class="file-actions-cell">{indicator} {actions}</div>',
actions: '{drag}\n' +
'<div class="file-actions">\n' +
' <div class="file-footer-buttons">\n' +
' {upload} {download} {delete} {zoom} {other} ' +
' </div>\n' +
'</div>',
fileIcon: '<i class="fas fa-file kv-caption-icon"></i> '
},
previewSettings: {
html: {width: '100px', height: '60px'},
text: {width: '100px', height: '60px'},
video: {width: 'auto', height: '60px'},
audio: {width: 'auto', height: '60px'},
flash: {width: '100%', height: '60px'},
object: {width: '100%', height: '60px'},
pdf: {width: '100px', height: '60px'},
other: {width: '100%', height: '60px'}
},
frameClass: 'explorer-frame',
fileActionSettings: {
removeIcon: '<i class="fas fa-trash-alt"></i>',
uploadIcon: '<i class="fas fa-upload"></i>',
uploadRetryIcon: '<i class="fas fa-redo-alt"></i>',
downloadIcon: '<i class="fas fa-download"></i>',
zoomIcon: '<i class="fas fa-search-plus"></i>',
dragIcon: '<i class="fas fa-arrows-alt"></i>',
indicatorNew: '<i class="fas fa-plus-circle text-warning"></i>',
indicatorSuccess: '<i class="fas fa-check-circle text-success"></i>',
indicatorError: '<i class="fas fa-exclamation-circle text-danger"></i>',
indicatorLoading: '<i class="fas fa-hourglass text-muted"></i>',
indicatorPaused: '<i class="fa fa-pause text-info"></i>'
},
previewZoomButtonIcons: {
prev: '<i class="fas fa-caret-left fa-lg"></i>',
next: '<i class="fas fa-caret-right fa-lg"></i>',
toggleheader: '<i class="fas fa-fw fa-arrows-alt-v"></i>',
fullscreen: '<i class="fas fa-fw fa-arrows-alt"></i>',
borderless: '<i class="fas fa-fw fa-external-link-alt"></i>',
close: '<i class="fas fa-fw fa-times"></i>'
},
previewFileIcon: '<i class="fas fa-file"></i>',
browseIcon: '<i class="fas fa-folder-open"></i>',
removeIcon: '<i class="fas fa-trash-alt"></i>',
cancelIcon: '<i class="fas fa-ban"></i>',
pauseIcon: '<i class="fas fa-pause"></i>',
uploadIcon: '<i class="fas fa-upload"></i>',
msgValidationErrorIcon: '<i class="fas fa-exclamation-circle"></i> '
};
})(window.jQuery); | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-fileinput/js/locales/ja.js | (function ($) {
"use strict";
$.fn.fileinputLocales['ja'] = {
fileSingle: 'ファイル',
filePlural: 'ファイル',
browseLabel: 'ファイルを選択 …',
removeLabel: '削除',
removeTitle: '選択したファイルを削除',
cancelLabel: 'キャンセル',
cancelTitle: 'アップロードをキャンセル',
pauseLabel: 'Pause',
pauseTitle: 'Pause ongoing upload',
uploadLabel: 'アップロード',
uploadTitle: '選択したファイルをアップロード',
msgNo: 'いいえ',
msgNoFilesSelected: 'ファイルが選択されていません',
msgPaused: 'Paused',
msgCancelled: 'キャンセル',
msgPlaceholder: 'Select {files} ...',
msgZoomModalHeading: 'プレビュー',
msgFileRequired: 'ファイルを選択してください',
msgSizeTooSmall: 'ファイル"{name}" (<b>{size} KB</b>)はアップロード可能な下限容量<b>{minSize} KB</b>より小さいです',
msgSizeTooLarge: 'ファイル"{name}" (<b>{size} KB</b>)はアップロード可能な上限容量<b>{maxSize} KB</b>を超えています',
msgFilesTooLess: '最低<b>{n}</b>個の{files}を選択してください',
msgFilesTooMany: '選択したファイルの数<b>({n}個)</b>はアップロード可能な上限数<b>({m}個)</b>を超えています',
msgTotalFilesTooMany: 'You can upload a maximum of <b>{m}</b> files (<b>{n}</b> files detected).',
msgFileNotFound: 'ファイル"{name}"はありませんでした',
msgFileSecured: 'ファイル"{name}"は読み取り権限がないため取得できません',
msgFileNotReadable: 'ファイル"{name}"は読み込めません',
msgFilePreviewAborted: 'ファイル"{name}"のプレビューを中止しました',
msgFilePreviewError: 'ファイル"{name}"の読み込み中にエラーが発生しました',
msgInvalidFileName: 'ファイル名に無効な文字が含まれています "{name}".',
msgInvalidFileType: '"{name}"は無効なファイル形式です。"{types}"形式のファイルのみサポートしています',
msgInvalidFileExtension: '"{name}"は無効な拡張子です。拡張子が"{extensions}"のファイルのみサポートしています',
msgFileTypes: {
'image': 'image',
'html': 'HTML',
'text': 'text',
'video': 'video',
'audio': 'audio',
'flash': 'flash',
'pdf': 'PDF',
'object': 'object'
},
msgUploadAborted: 'ファイルのアップロードが中止されました',
msgUploadThreshold: '処理中 …',
msgUploadBegin: '初期化中 …',
msgUploadEnd: '完了',
msgUploadResume: 'Resuming upload …',
msgUploadEmpty: 'アップロードに有効なデータがありません',
msgUploadError: 'Upload Error',
msgDeleteError: 'Delete Error',
msgProgressError: 'エラー',
msgValidationError: '検証エラー',
msgLoading: '{files}個中{index}個目のファイルを読み込み中 …',
msgProgress: '{files}個中{index}個のファイルを読み込み中 - {name} - {percent}% 完了',
msgSelected: '{n}個の{files}を選択',
msgFoldersNotAllowed: 'ドラッグ&ドロップが可能なのはファイルのみです。{n}個のフォルダ-は無視されました',
msgImageWidthSmall: '画像ファイル"{name}"の幅が小さすぎます。画像サイズの幅は少なくとも{size}px必要です',
msgImageHeightSmall: '画像ファイル"{name}"の高さが小さすぎます。画像サイズの高さは少なくとも{size}px必要です',
msgImageWidthLarge: '画像ファイル"{name}"の幅がアップロード可能な画像サイズ({size}px)を超えています',
msgImageHeightLarge: '画像ファイル"{name}"の高さがアップロード可能な画像サイズ({size}px)を超えています',
msgImageResizeError: 'リサイズ時に画像サイズが取得できませんでした',
msgImageResizeException: '画像のリサイズ時にエラーが発生しました。<pre>{errors}</pre>',
msgAjaxError: '{operation}実行中にエラーが発生しました。時間をおいてもう一度お試しください。',
msgAjaxProgressError: '{operation} failed',
msgDuplicateFile: 'File "{name}" of same size "{size} KB" has already been selected earlier. Skipping duplicate selection.',
msgResumableUploadRetriesExceeded: 'Upload aborted beyond <b>{max}</b> retries for file <b>{file}</b>! Error Details: <pre>{error}</pre>',
msgPendingTime: '{time} remaining',
msgCalculatingTime: 'calculating time remaining',
ajaxOperations: {
deleteThumb: 'ファイル削除',
uploadThumb: 'ファイルアップロード',
uploadBatch: '一括ファイルアップロード',
uploadExtra: 'フォームデータアップロード'
},
dropZoneTitle: 'ファイルをドラッグ&ドロップ …',
dropZoneClickTitle: '<br>(または クリックして{files}を選択 )',
slugCallback: function(text) {
return text ? text.split(/(\\|\/)/g).pop().replace(/[^\w\u4e00-\u9fa5\u3040-\u309f\u30a0-\u30ff\u31f0-\u31ff\u3200-\u32ff\uff00-\uffef\-.\\\/ ]+/g, '') : '';
},
fileActionSettings: {
removeTitle: 'ファイルを削除',
uploadTitle: 'ファイルをアップロード',
uploadRetryTitle: '再アップロード',
zoomTitle: 'プレビュー',
dragTitle: '移動 / 再配置',
indicatorNewTitle: 'まだアップロードされていません',
indicatorSuccessTitle: 'アップロード済み',
indicatorErrorTitle: 'アップロード失敗',
indicatorPausedTitle: 'Upload Paused',
indicatorLoadingTitle: 'アップロード中 …'
},
previewZoomButtonTitles: {
prev: '前のファイルを表示',
next: '次のファイルを表示',
toggleheader: 'ファイル情報の表示/非表示',
fullscreen: 'フルスクリーン表示の開始/終了',
borderless: 'フルウィンドウ表示の開始/終了',
close: 'プレビューを閉じる'
}
};
})(window.jQuery); | PypiClean |
/Graph_RL-0.1.2.tar.gz/Graph_RL-0.1.2/graph_rl/subtasks/return_maxim_subtask.py | from copy import copy
import numpy as np
from . import Subtask
class ReturnMaximSubtask(Subtask):
"""Subtask is to maximize expected return."""
def __init__(self, name, subtask_spec):
"""Args:
subtask_spec (ReturnMaximSubtaskSpec): Specification of the return maximization subtask.
"""
super().__init__(name, subtask_spec)
self.task_spec = subtask_spec
self._n_actions_taken = 0
self._return = 0
#self._min = np.array([np.inf]*8)
#self._max = np.array([-np.inf]*8)
@property
def observation_space(self):
return self.task_spec.obs_space
@property
def parent_action_space(self):
return self.task_spec.parent_action_space
def reset(self):
self._n_actions_taken = 0
self._return = 0
self.task_spec.reset()
def get_observation(self, env_obs, parent_info, sess_info):
return self.task_spec.map_to_obs(env_obs, parent_info, sess_info.ep_step)
def check_interruption(self, env_info, new_subtask_obs, parent_info, sess_info):
# update internal state of subtask spec in each time step
self.task_spec.step_update(env_info, parent_info, sess_info)
# return maximization subtask does not interrupt child nodes
return False
def evaluate_transition(self, env_obs, env_info, subtask_trans, parent_info, algo_info, sess_info):
new_obs = self.task_spec.map_to_obs(
env_info.new_obs, parent_info, sess_info.ep_step
)
#self._min = np.minimum(self._min, new_obs)
#self._max = np.maximum(self._max, new_obs)
#print(self._max)
reward = self.task_spec.get_reward(
subtask_trans.obs, subtask_trans.action, new_obs, parent_info,
sess_info.ep_step, env_info
)
# add auxiliary rewards
if new_obs is not None and subtask_trans.action is not None:
reward += self.get_aux_rewards(new_obs, subtask_trans.action)
self._n_actions_taken += 1
self._return += reward
# subtask ended if running out of actions
if (self.task_spec.max_n_actions is not None and
self._n_actions_taken >= self.task_spec.max_n_actions):
# tensorboard logging
if self._tb_writer is not None:
mode = "test" if sess_info.testing else "train"
self._tb_writer.add_scalar(f"{self.name}/{mode}/return", self._return, sess_info.total_step)
self._return = 0
self._n_actions_taken = 0
ended = True
else:
ended = False
info = {}
feedback = {}
complete_subtask_trans = copy(subtask_trans)
complete_subtask_trans.reward = reward
complete_subtask_trans.ended = ended
complete_subtask_trans.info = info
return complete_subtask_trans, feedback | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/airtest/utils/apkparser/apk.py |
from .axmlprinter import AXMLPrinter
import zipfile
from xml.dom import minidom
class APK:
"""APK manages apk file format"""
def __init__(self, filename):
"""
@param filename: specify the path of the file, or raw data
@param raw: specify (boolean) if the filename is a path or raw data
"""
self.filename = filename
self.xml = {}
self.package = ""
self.androidversion = {}
self._permissions = []
self.valid_apk = False
with open(filename, "rb") as fd:
self.__raw = fd.read()
self.zip = zipfile.ZipFile(filename)
for i in self.zip.namelist():
if i == "AndroidManifest.xml":
self.xml[i] = minidom.parseString(AXMLPrinter(self.zip.read(i)).getBuff())
self.package = self.xml[i].documentElement.getAttribute("package")
self.androidversion["Code"] = self.xml[i].documentElement.getAttribute("android:versionCode")
self.androidversion["Name"] = self.xml[i].documentElement.getAttribute("android:versionName")
for item in self.xml[i].getElementsByTagName("uses-permission"):
self._permissions.append(str(item.getAttribute("android:name")))
self.valid_apk = True
def is_valid_apk(self):
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
"""
return self.filename
def get_package(self):
"""
Return the name of the package
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
"""
return self.androidversion["Code"]
androidversion_code = property(get_androidversion_code)
def get_androidversion_name(self):
"""
Return the android version name
"""
return self.androidversion["Name"]
androidversion_name = property(get_androidversion_name)
def get_files(self):
"""
Return the files inside the APK
"""
return self.zip.namelist()
files = property(get_files)
def get_files_types(self):
"""
Return the files inside the APK with their types (by using python-magic)
"""
try:
import magic
except ImportError:
return {}
l = {}
builtin_magic = 0
try:
getattr(magic, "Magic")
except AttributeError:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
l[ i ] = ms.buffer(self.zip.read(i))
else:
m = magic.Magic()
for i in self.get_files():
l[ i ] = m.from_buffer(self.zip.read(i))
return l
files_types = property(get_files_types)
def get_raw(self):
"""
Return raw bytes of the APK
"""
return self.__raw
raw = property(get_raw)
def get_file(self, filename):
"""
Return the raw data of the specified filename
"""
try:
return self.zip.read(filename)
except KeyError:
return ""
def get_dex(self):
"""
Return the raw data of the classes dex file
"""
return self.get_file("classes.dex")
dex = property(get_dex)
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
@param tag_name: a string which specify the tag name
@param attribute: a string which specify the attribute
"""
l = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttribute(attribute)
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
l.append(str(value))
return l
def get_element(self, tag_name, attribute):
"""
Return element in xml files which match with the tag name and the specific attribute
@param tag_name: a string which specify the tag name
@param attribute: a string which specify the attribute
"""
l = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttribute(attribute)
if len(value) > 0:
return value
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
"""
return self.get_elements("activity", "android:name")
activities = property(get_activities)
def get_services(self):
"""
Return the android:name attribute of all services
"""
return self.get_elements("service", "android:name")
services = property(get_services)
def get_receivers(self):
"""
Return the android:name attribute of all receivers
"""
return self.get_elements("receiver", "android:name")
receivers = property(get_receivers)
def get_providers(self):
"""
Return the android:name attribute of all providers
"""
return self.get_elements("provider", "android:name")
providers = property(get_providers)
def get_permissions(self):
"""
Return permissions
"""
return self._permissions
permissions = property(get_permissions)
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
"""
return self.get_element("uses-sdk", "android:minSdkVersion")
min_sdk_version = property(get_min_sdk_version)
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
"""
return self.get_element("uses-sdk", "android:targetSdkVersion")
target_sdk_version = property(get_target_sdk_version)
def get_libraries(self):
"""
Return the android:name attributes for libraries
"""
return self.get_elements("uses-library", "android:name")
libraries = property(get_libraries)
def show(self):
print("FILES: ", self.get_files_types())
print("ACTIVITIES: ", self.get_activities())
print("SERVICES: ", self.get_services())
print("RECEIVERS: ", self.get_receivers())
print("PROVIDERS: ", self.get_providers()) | PypiClean |
/Hanalytics_python_package-0.3.1.tar.gz/Hanalytics_python_package-0.3.1/hanalytics/hanalytics_keywords_generator/googleAds_keywords.py | import openai
import pandas as pd
"""" use this to test
#Your context and prompt must be as precise as possible. Give every details possible.
# To get "wow" result you can tell GPT to consider itself as a 20 or 30 years experienced person in the field you want.
import googleAds_keywords
context = 'YOUR CONTEXT HERE'
prompt = 'YOUR PROMPT HERE'
api_key = "your_API_key"
temperature = 0.7 #standard used by OPENAI (you can change it)
keywords, headlines, descriptions, headline_counts, description_counts = mymodule.run_model(prompt, context, api_key, temperature)
"""
"""
1. temperature: In the context of a language model, the temperature is a parameter that controls the randomness of the model's output.
It determines the sharpness of the probability distribution used when sampling the next word to generate in a sentence.
If the temperature is low (e.g., close to 0), the model is more deterministic and more likely to select the most probable word at each step.
If the temperature is high (e.g., closer to 1), the model becomes more random and likely to select less probable words(becomes more creative).
This parameter can help balance between repetitive but accurate output (low temperature) and diverse but potentially inaccurate output (high temperature).
2. frequency_penalty: This parameter, which is sometimes used in models like GPT-3 and similar architectures, is used to penalize the selection of tokens that have been used more frequently.
It's a way of controlling the repetitiveness of the output.
A higher frequency penalty encourages more unique responses, while a lower frequency penalty may result in more repetitive responses.
3. presence_penalty: The presence penalty, another configuration parameter in some language models, penalizes the selection of new tokens (words) that have not appeared in the output so far.
A higher presence penalty would encourage the model to reuse words that have already appeared in the output,
while a lower presence penalty would encourage more varied language usage and the introduction of new words.
Please note that these parameters are very specific to certain language models and might not be used in other types of machine learning models.
"""
def count_characters(headlines, descriptions):
"""Counts number of characters in the headlines and descriptions generated by CHATGPT API (space is taken into account)
Args:
headlines (string): headlines generated by openAI CHATGPT API
descriptions (string): descriptions generated by openAI CHATGPT API
Returns:
headlines and descriptions number of characters
"""
headline_char_counts = [len(headline) for headline in headlines]
description_char_counts = [len(description) for description in descriptions]
return headline_char_counts, description_char_counts
def run_model(prompt, context, api_key, temperature):
""" The `run_model` function generates ad headlines and descriptions using OpenAI's "text-davinci-003" model for a given prompt and context.
It processes the model's output into keywords, then generates a headline and a French description for each keyword.
It also counts the characters in each headline and description, and it prints these alongside the original keywords.
The function compiles all the generated data into a pandas DataFrame, which it then writes to a CSV file.
Args:
prompt (string): your prompt
context (string): contetxt of your prompt
api_key: your API key
temperature (float) : temperature
Returns:
Generated keywords, headlines, descriptions, and their character counts in a csv file.
"""
openai.api_key = api_key
prompt_with_context = f'{context}\n{prompt}'
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt_with_context,
temperature=temperature,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the keywords from the text
keywords = response['choices'][0]['text'].split('\n')
# Remove empty keywords
keywords = [keyword for keyword in keywords if keyword.strip() != '']
# Generate a headline for each keyword
headlines = []
descriptions = []
for i, keyword in enumerate(keywords):
headline_prompt = f'Generate a unique expert ad headline for the keyword "{keyword}"(alternate french and english). The goal is to attract many clients.Limit to 20 characters.'
headline_response = openai.Completion.create(
engine="text-davinci-003",
prompt=headline_prompt,
temperature=0.7,
max_tokens=20,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
headline = headline_response['choices'][0]['text'].strip()
headlines.append(headline)
# Generate a description in French for each headline, alternate the language
description_prompt = f"Generate a unique description in French for the title '{headline}'. Limit of 40 characters."
description_response = openai.Completion.create(
engine="text-davinci-003",
prompt=description_prompt,
temperature=0.7,
max_tokens=30,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
description = description_response['choices'][0]['text'].strip()
descriptions.append(description)
# Calculate character counts for each headline and description
headline_counts, description_counts = count_characters(headlines, descriptions)
# Print the keywords, their headlines, descriptions and character counts
for keyword, headline, description, headline_count, description_count in zip(keywords, headlines, descriptions, headline_counts, description_counts):
print(f"Keyword: {keyword}\nHeadline: {headline} (Length: {headline_count} characters)\nDescription: {description} (Length: {description_count} characters)\n")
# Convert the results to a DataFrame
data = {
"Keywords": keywords,
"Headline": headlines,
"Headline Length": headline_counts,
"Description": descriptions,
"Description Length": description_counts
}
df = pd.DataFrame(data)
# Write the DataFrame to a CSV file
df.to_csv("mdpparis.csv", index=False)
return keywords, headlines, descriptions, headline_counts, description_counts | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/migrations/versions/2022-10-14_43c52d23845a_projects.py | from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
import mindsdb.interfaces.storage.db as db
# revision identifiers, used by Alembic.
revision = '43c52d23845a'
down_revision = 'cada7d2be947'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'project',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'company_id', name='unique_project_name_company_id')
)
conn = op.get_bind()
session = sa.orm.Session(bind=conn)
project_record = db.Project(name='mindsdb')
session.add(project_record)
session.commit()
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text('''
update predictor set project_id = :project_id
'''), {'project_id': project_record.id})
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text('''
update view set project_id = :project_id
'''), {'project_id': project_record.id})
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
views = conn.execute(sa.text('''
select id, name from view
where exists (select 1 from predictor where view.name = predictor.name)
''')).fetchall()
for row in views:
conn.execute(
text("""
update view
set name = :name
where id = :view_id
"""), {
'name': f"{row['name']}_view",
'view_id': row['id']
}
)
session.commit()
def downgrade():
conn = op.get_bind()
session = sa.orm.Session(bind=conn)
view_integration = db.Integration.query.filter_by(name='views').first()
if view_integration is None:
views_integration = db.Integration(
name='views',
data={},
engine='views',
company_id=None
)
session.add(views_integration)
session.commit()
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.drop_constraint('fk_project_id', type_='foreignkey')
batch_op.drop_column('project_id')
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.drop_constraint('fk_project_id', type_='foreignkey')
batch_op.drop_column('project_id')
op.drop_table('project') | PypiClean |
/BYOSED-0.0.3-py3-none-any.whl/byosed/perturber_io.py | import numpy as np
import pandas,glob,sncosmo,os,sys,scipy
from scipy.interpolate import interpn,interp1d
from scipy.signal import savgol_filter
from sncosmo.constants import HC_ERG_AA, MODEL_BANDFLUX_SPACING
from sncosmo.utils import integration_grid
from astropy.io import ascii
from astropy.table import Table
import matplotlib.pyplot as plt
__all__ = ['generate_ND_grids','read_ND_grids','kaepora_to_sed']
def kaepora_to_sed(data_folder,perturber_keys,base_sed='hsiao',minWave=0,maxWave=np.inf,minPhase=-np.inf,
maxPhase=np.inf,waveStep=10,scale_band='bessellb'):
waves={}
trans={}
dwaves={}
for b in ['bessellb','bessellv','bessellr']:
band=sncosmo.get_bandpass(b)
wave, dwave = integration_grid(band.minwave(), band.maxwave(),
MODEL_BANDFLUX_SPACING)
waves[b]=wave
dwaves[b]=dwave
trans[b]=band(wave)
if isinstance(base_sed,str):
base_sed=sncosmo.Model(base_sed)
base_sed_wave=base_sed._source._wave
filelists=[]
for k in perturber_keys:
filelists.append(glob.glob(os.path.join(data_folder,'*%s*'%k)))
seds={}
phase_pairs={}
phase_lists={}
scale_factors={}
for i in range(len(filelists)):
filelist=filelists[i]
temp_key=perturber_keys[i]
temp_phase=[]
j=0
for f in filelist:
dat=ascii.read(f)
phase=f[f.find('phase=')+6:f.find('phase=')+6+(f[f.find('phase=')+6:]).find('_')]
phase=-1*float(phase[1:]) if phase[0]=='m' else float(phase[1:])
temp_phase.append(phase)
dat=dat[dat['Wavelength']>=minWave]
dat=dat[dat['Wavelength']<=maxWave]
if i ==0:
key_phase=np.round(phase,2)
phase_pairs[key_phase]=[{'wave':dat['Wavelength'],
'flux':dat['Flux'],'interp':interp1d(dat['Wavelength'],dat['Flux'],kind='cubic')}]
else:
pair_list=np.array(list(phase_pairs.keys()))
match=pair_list[np.abs(pair_list-phase).argmin()]
key_phase=np.round(match,2)
phase_pairs[key_phase].append({'wave':dat['Wavelength'],
'flux':dat['Flux'],'interp':interp1d(dat['Wavelength'],dat['Flux'],kind='cubic')})
if np.min(np.abs(dat['Flux']-10))<.001:
scale_factors[key_phase]=_get_kaepora_scale(phase_pairs[key_phase][i]['interp'],
base_sed,waves,trans,dwaves,key_phase)
if len(np.unique(temp_phase))!=len(temp_phase):
print('You have more than one file for at least one phase.')
sys.exit(1)
phase_lists[temp_key]=temp_phase
final_wavelength=np.arange(base_sed_wave[0],base_sed_wave[-1]+waveStep/10,waveStep)
to_save={}
for j in range(len(perturber_keys)):
temp_phase=np.array(phase_lists[perturber_keys[j]])
bound_inds=np.where(np.logical_and(temp_phase>=minPhase,temp_phase<=maxPhase))[0]
final_phase=np.sort(temp_phase[bound_inds])
pair_list=np.array(list(phase_pairs.keys()))
match=np.round(np.array([pair_list[np.abs(pair_list-phase).argmin()] for phase in final_phase]),2)
final_flux=[]
for i in range(len(match)):
scaled_flux=interp1d(phase_pairs[match[i]][j]['wave'],
phase_pairs[match[i]][j]['flux']*scale_factors[match[i]],kind='cubic')
to_save[final_phase[i]]=scale_factors[match[i]]
temp=np.zeros(len(final_wavelength))
for w in range(len(final_wavelength)):
if final_wavelength[w]<np.min(phase_pairs[match[i]][j]['wave']) or final_wavelength[w]>np.max(phase_pairs[match[i]][j]['wave']):
temp[w]=base_sed._source._flux(final_phase[i],final_wavelength[w])
else:
temp[w]=scaled_flux(final_wavelength[w])
final_flux.append(temp)
seds[perturber_keys[j]]=[np.sort(final_phase),final_wavelength,sncosmo.TimeSeriesSource(final_phase,final_wavelength,
np.array(final_flux))]
return(seds)
def _band_min(scale,args):
"""
Minimization function for _get_kaepora_scale
"""
ktotal,btotal=args
return(np.abs(scale*ktotal-btotal))
def _get_kaepora_scale(kinterp,bsed,waves,trans,dwaves,phase):
"""
Minimizes difference between BVR filters for baseline SED and
kaepora composite spectra.
"""
kflux=0
bflux=0
bsed['amplitude']=1/np.max(bsed.flux(phase,waves['bessellb']))
for b in ['bessellb','bessellv','bessellr']:
kflux+=(np.sum(waves[b] * trans[b] * kinterp(waves[b]).flatten())*dwaves[b] / HC_ERG_AA)
bflux+=bsed.bandflux(b,phase)
x=scipy.optimize.minimize(_band_min,
[.1],
[kflux,bflux],bounds=[[0,10]])
final_scale=x.x/bsed['amplitude']
bsed['amplitude']=1
return final_scale
def _meshgrid2(*arrs):
arrs = tuple(arrs) #edit
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz*=s
ans = []
for i, arr in enumerate(arrs):
slc = [1]*dim
slc[i] = lens[i]
arr2 = np.asarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j!=i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
return tuple(ans)
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
def generate_ND_grids(func,filename=None,colnames=None,*arrs):
g=_meshgrid2(*arrs)
positions = np.vstack(list(map(np.ravel, g))).T
res=func(*(positions[:,i] for i in range(positions.shape[1]))).reshape((positions.shape[0],1))
gridded=np.hstack([positions,res])
if filename is not None:
if colnames is not None:
header=' '.join(colnames)
else:
header=''
np.savetxt(filename,gridded,fmt='%f',header=header)
#Table(gridded).write(filename,format='ascii',header=header)
return(gridded)
def read_ND_grids(filename,scale_factor=1.):
with open(filename,'r') as f:
temp=f.readline()
if temp[0]=='#':
names=temp.strip('#').split()
gridded=pandas.read_csv(filename,sep=' ',names=names,comment='#',header=None)
else:
gridded=pandas.read_csv(filename,sep=' ',comment='#',header=None)
arrs=tuple(np.unique(gridded.values[:,i]) for i in range(len(gridded.columns)-1))
dim=[len(x) for x in arrs]
theta=np.array(gridded[gridded.columns[-1]]).reshape(dim)*scale_factor#-1.
return([x.upper() for x in gridded.columns][:-1],lambda interp_array:interpn(arrs,theta,xi=interp_array,method='linear',bounds_error=False,fill_value=0)) | PypiClean |
/ML_APIs-0.0.3.tar.gz/ML_APIs-0.0.3/ML_APIs/local/vision_local.py | from ML_APIs.utils import *
class vision_classifier_local:
def __init__(self, path, model, credentials):
self.path = path
self.model = model
self.credentials = credentials
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = f"{self.credentials}"
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_logo_detection]
with io.open(self.path, "rb") as image_file:
content = image_file.read()
self.image = vision.types.Image(content=content)
if self.model == 'labels':
"""Detects labels in the file."""
response = client.label_detection(image=self.image)
labels = response.label_annotations
df = pd.DataFrame(
columns=["media", "media_type", "description", "score", "topicality"])
for label in labels:
df = df.append(
dict(
media=self.path,
media_type='photo',
description=label.description,
score=label.score,
topicality=label.topicality
), ignore_index=True
)
self.labels = df
elif self.model == 'logos':
"""Detects logos in the file."""
response = client.logo_detection(image=self.image)
logos = response.logo_annotations
df = pd.DataFrame(
columns=["media", "media_type", "description", "score", "bounding_poly"])
for logo in logos:
df = df.append(
dict(
media=self.path,
media_type='photo',
description=logo.description,
score=logo.score,
bounding_poly=logo.bounding_poly
), ignore_index=True
)
self.logos = df
elif self.model == 'objects':
"""Detects logos in the file."""
response = client.logo_detection(image=self.image)
logos = response.logo_annotations
df = pd.DataFrame(
columns=["media", "media_type", "description", "score", "bounding_poly"])
for logo in logos:
df = df.append(
dict(
media=self.path,
media_type='photo',
description=logo.description,
score=logo.score,
bounding_poly=logo.bounding_poly
), ignore_index=True
)
self.localize_objects = df
elif self.model == 'landmarks':
"""Detects landmarks in the file."""
response = client.landmark_detection(image=self.image)
landmarks = response.landmark_annotations
df = pd.DataFrame(
columns=["media", "media_type", "description", "score", "bounding_poly", "locations"])
for landmark in landmarks:
df = df.append(
dict(
media=self.path,
media_type='photo',
description=landmark.description,
score=landmark.score,
bounding_poly=landmark.bounding_poly,
locations=landmark.locations
), ignore_index=True
)
self.landmarks = df
elif self.model == 'faces':
"""Detects faces in an image."""
response = client.face_detection(image=self.image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
df = pd.DataFrame(columns=["media", "media_type", "bounding_poly", "joy", "sorrow",
"anger", "surprise", "under_exposed", "blurred", "headwear"])
for face in faces:
df = df.append(
dict(
media=self.path,
media_type='photo',
bounding_poly=face.bounding_poly,
joy=likelihood_name[face.joy_likelihood],
sorrow=likelihood_name[face.sorrow_likelihood],
anger=likelihood_name[face.anger_likelihood],
surprise=likelihood_name[face.surprise_likelihood],
under_exposed=likelihood_name[face.under_exposed_likelihood],
blurred=likelihood_name[face.blurred_likelihood],
headwear=likelihood_name[face.headwear_likelihood]
), ignore_index=True
)
self.faces = df
else:
print(
"Couldn't find the model. Are you sure it's in the below following" + "\n\n" +
"1. labels" + "\n" +
"2. logos" + "\n" +
"3. objects" + "\n" +
"4. landmarks" + "\n" +
"5. faces"
) | PypiClean |
/Flask_Unchained-0.9.0-py3-none-any.whl/flask_unchained/bundles/__init__.py | import importlib
import os
from types import FunctionType
from typing import *
from ..flask_unchained import FlaskUnchained
from ..string_utils import right_replace, slugify, snake_case
from ..unchained import unchained
def _normalize_module_name(module_name):
if module_name.endswith('.bundle'):
return right_replace(module_name, '.bundle', '')
return module_name
class BundleMetaclass(type):
def __new__(mcs, name, bases, clsdict):
# check if the user explicitly set module_name
module_name = clsdict.get('module_name')
if isinstance(module_name, str):
clsdict['module_name'] = _normalize_module_name(module_name)
return super().__new__(mcs, name, bases, clsdict)
class _BundleModuleNameDescriptor:
def __get__(self, instance, cls):
return _normalize_module_name(cls.__module__)
def __set__(self, instance, value):
raise AttributeError
class _BundleIsSingleModuleDescriptor:
def __get__(self, instance, cls):
return not importlib.util.find_spec(cls.module_name).submodule_search_locations
def __set__(self, instance, value):
raise AttributeError
class _BundleRootPathDescriptor:
def __get__(self, instance, cls):
module = importlib.import_module(cls.module_name)
return os.path.dirname(module.__file__)
def __set__(self, instance, value):
raise AttributeError
class _BundleNameDescriptor:
def __init__(self, *, strip_bundle_suffix: bool = False):
self.strip_bundle_suffix = strip_bundle_suffix
def __get__(self, instance, cls):
if self.strip_bundle_suffix:
return snake_case(right_replace(cls.__name__, 'Bundle', ''))
return snake_case(cls.__name__)
class _BundleStaticFolderDescriptor:
def __get__(self, instance, cls):
if cls.is_single_module and issubclass(cls, AppBundle):
return None # this would be the same as the top-level static folder registered with Flask
if not hasattr(instance, '_static_folder'):
instance._static_folder = os.path.join(instance.root_path, 'static')
if not os.path.exists(instance._static_folder):
instance._static_folder = None
return instance._static_folder
class _BundleStaticUrlPathDescriptor:
def __get__(self, instance, cls):
if instance._static_folders:
return f'/{slugify(cls.name)}/static'
class _BundleTemplateFolderDescriptor:
def __get__(self, instance, cls):
if not hasattr(instance, '_template_folder'):
instance._template_folder = os.path.join(instance.root_path, 'templates')
if not os.path.exists(instance._template_folder):
instance._template_folder = None
return instance._template_folder
class Bundle(metaclass=BundleMetaclass):
"""
Base class for bundles.
Should be placed in your package's root or its ``bundle`` module::
# your_bundle_package/__init__.py or your_bundle_package/bundle.py
class YourBundle(Bundle):
pass
"""
name: str = _BundleNameDescriptor(strip_bundle_suffix=False)
"""
Name of the bundle. Defaults to the snake_cased class name.
"""
module_name: str = _BundleModuleNameDescriptor()
"""
Top-level module name of the bundle (dot notation).
Automatically determined; read-only.
"""
root_path: str = _BundleRootPathDescriptor()
"""
Root directory path of the bundle's package.
Automatically determined; read-only.
"""
template_folder: Optional[str] = _BundleTemplateFolderDescriptor()
"""
Root directory path of the bundle's template folder. By default, if there exists
a folder named ``templates`` in the bundle package
:attr:`~flask_unchained.Bundle.root_path`, it will be used, otherwise ``None``.
"""
static_folder: Optional[str] = _BundleStaticFolderDescriptor()
"""
Root directory path of the bundle's static assets folder. By default, if there exists
a folder named ``static`` in the bundle package
:attr:`~flask_unchained.Bundle.root_path`, it will be used, otherwise ``None``.
"""
static_url_path: Optional[str] = _BundleStaticUrlPathDescriptor()
"""
Url path where this bundle's static assets will be served from. If
:attr:`~flask_unchained.Bundle.static_folder` is set, this will default to
``/<bundle.name>/static``, otherwise ``None``.
"""
is_single_module: bool = _BundleIsSingleModuleDescriptor()
"""
Whether or not the bundle is a single module (Python file).
Automatically determined; read-only.
"""
default_load_from_module_name: Optional[str] = None
"""
The default module name for hooks to load from. Set hooks' bundle modules override
attributes for the modules you want in separate files.
.. admonition:: WARNING - EXPERIMENTAL
:class: danger
Using this feature may cause mysterious exceptions to be thrown!!
Best practice is to organize your code in separate modules.
"""
_deferred_functions: List[FunctionType] = []
"""
Deferred functions to be registered with the
:class:`~flask_unchained.bundles.controller.bundle_blueprint.BundleBlueprint`
that gets created for this bundle.
The :class:`~flask_unchained.Unchained` extension copies these values from the
:class:`DeferredBundleBlueprintFunctions` instance it created for this bundle.
"""
def before_init_app(self, app: FlaskUnchained) -> None:
"""
Override this method to perform actions on the
:class:`~flask_unchained.FlaskUnchained` app instance *before* the
``unchained`` extension has initialized the application.
"""
pass
def after_init_app(self, app: FlaskUnchained) -> None:
"""
Override this method to perform actions on the
:class:`~flask_unchained.FlaskUnchained` app instance *after* the
``unchained`` extension has initialized the application.
"""
pass
def _iter_class_hierarchy(self, include_self: bool = True, mro: bool = False):
"""
Iterate over the bundle classes in the hierarchy. Yields base-most
instances first (aka opposite of Method Resolution Order).
For internal use only.
:param include_self: Whether or not to yield the top-level bundle.
:param mro: Pass True to yield bundles in Method Resolution Order.
"""
supers = self.__class__.__mro__[(0 if include_self else 1):]
for bundle_cls in (supers if mro else reversed(supers)):
if bundle_cls not in {object, AppBundle, Bundle}:
if bundle_cls == self.__class__:
yield self
else:
yield bundle_cls()
@property
def _has_views(self) -> bool:
"""
Returns True if any of the bundles in the hierarchy has a views module.
For internal use only.
"""
if self.is_single_module and isinstance(self, AppBundle):
return True
from ..hooks.views_hook import ViewsHook
for bundle in self._iter_class_hierarchy():
if ViewsHook.import_bundle_modules(bundle):
return True
return False
@property
def _blueprint_name(self) -> str:
"""
Get the name to use for the blueprint for this bundle.
For internal use only.
"""
if self._is_top_bundle or not self._has_hierarchy_name_conflicts:
return self.name
for i, bundle in enumerate(self._iter_class_hierarchy()):
if bundle.__class__ == self.__class__:
return f'{self.name}_{i}'
@property
def _static_folders(self) -> List[str]:
"""
Get the list of static folders for this bundle.
For internal use only.
"""
if not self._has_hierarchy_name_conflicts:
return [self.static_folder] if self.static_folder else []
elif not self._is_top_bundle:
return []
return [b.static_folder for b in self._iter_class_hierarchy(mro=True)
if b.static_folder and b.name == self.name]
@property
def _is_top_bundle(self) -> bool:
"""
Whether or not this bundle is the top-most bundle in the hierarchy.
For internal use only.
"""
return not self.__class__.__subclasses__()
@property
def _has_hierarchy_name_conflicts(self) -> bool:
"""
Whether or not there are any name conflicts between bundles in the hierarchy.
For internal use only.
"""
top_bundle = self.__class__
subclasses = top_bundle.__subclasses__()
while subclasses:
top_bundle = subclasses[0]
subclasses = top_bundle.__subclasses__()
return any(b.name == self.name and b.__class__ != self.__class__
for b in top_bundle()._iter_class_hierarchy())
def __getattr__(self, name):
if name in {'before_request', 'after_request', 'teardown_request',
'context_processor', 'url_defaults', 'url_value_preprocessor',
'errorhandler'}:
from warnings import warn
warn('The app has already been initialized. Please register '
f'{name} sooner.')
return
raise AttributeError(name)
def __repr__(self) -> str:
return (f'<{self.__class__.__name__} '
f'name={self.name!r} '
f'module={self.module_name!r}>')
class AppBundleMetaclass(BundleMetaclass):
"""
Metaclass for :class:`~flask_unchained.AppBundle` to automatically set the
user's subclass on the :class:`~flask_unchained.Unchained` extension instance.
"""
def __init__(cls, name, bases, clsdict):
super().__init__(name, bases, clsdict)
unchained._app_bundle_cls = cls
class AppBundle(Bundle, metaclass=AppBundleMetaclass):
"""
Like :class:`~flask_unchained.Bundle`, except used for the top-most
application bundle.
"""
name: str = _BundleNameDescriptor(strip_bundle_suffix=True)
"""
Name of the bundle. Defaults to the snake_cased class name, excluding any
"Bundle" suffix.
"""
__all__ = [
'AppBundle',
'AppBundleMetaclass',
'Bundle',
'BundleMetaclass',
] | PypiClean |
/Brian2-2.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/brian2/core/clocks.py | __docformat__ = "restructuredtext en"
import numpy as np
from brian2.core.names import Nameable
from brian2.core.variables import Variables
from brian2.groups.group import VariableOwner
from brian2.units.allunits import second
from brian2.units.fundamentalunits import Quantity, check_units
from brian2.utils.logger import get_logger
__all__ = ["Clock", "defaultclock"]
logger = get_logger(__name__)
def check_dt(new_dt, old_dt, target_t):
"""
Check that the target time can be represented equally well with the new
dt.
Parameters
----------
new_dt : float
The new dt value
old_dt : float
The old dt value
target_t : float
The target time
Raises
------
ValueError
If using the new dt value would lead to a difference in the target
time of more than `Clock.epsilon_dt` times ``new_dt`` (by default,
0.01% of the new dt).
Examples
--------
>>> from brian2 import *
>>> check_dt(float(17*ms), float(0.1*ms), float(0*ms)) # For t=0s, every dt is fine
>>> check_dt(float(0.05*ms), float(0.1*ms), float(10*ms)) # t=10*ms can be represented with the new dt
>>> check_dt(float(0.2*ms), float(0.1*ms), float(10.1*ms)) # t=10.1ms cannot be represented with dt=0.2ms # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Cannot set dt from 100. us to 200. us, the time 10.1 ms is not a multiple of 200. us.
"""
old_t = np.int64(np.round(target_t / old_dt)) * old_dt
new_t = np.int64(np.round(target_t / new_dt)) * new_dt
error_t = target_t
if abs(new_t - old_t) / new_dt > Clock.epsilon_dt:
old = str(old_dt * second)
new = str(new_dt * second)
t = str(error_t * second)
raise ValueError(
f"Cannot set dt from {old} to {new}, the "
f"time {t} is not a multiple of {new}."
)
class Clock(VariableOwner):
"""
An object that holds the simulation time and the time step.
Parameters
----------
dt : float
The time step of the simulation as a float
name : str, optional
An explicit name, if not specified gives an automatically generated name
Notes
-----
Clocks are run in the same `Network.run` iteration if `~Clock.t` is the
same. The condition for two
clocks to be considered as having the same time is
``abs(t1-t2)<epsilon*abs(t1)``, a standard test for equality of floating
point values. The value of ``epsilon`` is ``1e-14``.
"""
def __init__(self, dt, name="clock*"):
# We need a name right away because some devices (e.g. cpp_standalone)
# need a name for the object when creating the variables
Nameable.__init__(self, name=name)
self._old_dt = None
self.variables = Variables(self)
self.variables.add_array(
"timestep", size=1, dtype=np.int64, read_only=True, scalar=True
)
self.variables.add_array(
"t",
dimensions=second.dim,
size=1,
dtype=np.float64,
read_only=True,
scalar=True,
)
self.variables.add_array(
"dt",
dimensions=second.dim,
size=1,
values=float(dt),
dtype=np.float64,
read_only=True,
constant=True,
scalar=True,
)
self.variables.add_constant("N", value=1)
self._enable_group_attributes()
self.dt = dt
logger.diagnostic(f"Created clock {self.name} with dt={self.dt}")
@check_units(t=second)
def _set_t_update_dt(self, target_t=0 * second):
new_dt = self.dt_
old_dt = self._old_dt
target_t = float(target_t)
if old_dt is not None and new_dt != old_dt:
self._old_dt = None
# Only allow a new dt which allows to correctly set the new time step
check_dt(new_dt, old_dt, target_t)
new_timestep = self._calc_timestep(target_t)
# Since these attributes are read-only for normal users, we have to
# update them via the variables object directly
self.variables["timestep"].set_value(new_timestep)
self.variables["t"].set_value(new_timestep * new_dt)
logger.diagnostic(f"Setting Clock {self.name} to t={self.t}, dt={self.dt}")
def _calc_timestep(self, target_t):
"""
Calculate the integer time step for the target time. If it cannot be
exactly represented (up to 0.01% of dt), round up.
Parameters
----------
target_t : float
The target time in seconds
Returns
-------
timestep : int
The target time in integers (based on dt)
"""
new_i = np.int64(np.round(target_t / self.dt_))
new_t = new_i * self.dt_
if new_t == target_t or np.abs(new_t - target_t) / self.dt_ <= Clock.epsilon_dt:
new_timestep = new_i
else:
new_timestep = np.int64(np.ceil(target_t / self.dt_))
return new_timestep
def __repr__(self):
return f"Clock(dt={self.dt!r}, name={self.name!r})"
def _get_dt_(self):
return self.variables["dt"].get_value().item()
@check_units(dt_=1)
def _set_dt_(self, dt_):
self._old_dt = self._get_dt_()
self.variables["dt"].set_value(dt_)
@check_units(dt=second)
def _set_dt(self, dt):
self._set_dt_(float(dt))
dt = property(
fget=lambda self: Quantity(self.dt_, dim=second.dim),
fset=_set_dt,
doc="""The time step of the simulation in seconds.""",
)
dt_ = property(
fget=_get_dt_,
fset=_set_dt_,
doc="""The time step of the simulation as a float (in seconds)""",
)
@check_units(start=second, end=second)
def set_interval(self, start, end):
"""
set_interval(self, start, end)
Set the start and end time of the simulation.
Sets the start and end value of the clock precisely if
possible (using epsilon) or rounding up if not. This assures that
multiple calls to `Network.run` will not re-run the same time step.
"""
self._set_t_update_dt(target_t=start)
end = float(end)
self._i_end = self._calc_timestep(end)
if self._i_end > 2**40:
logger.warn(
"The end time of the simulation has been set to "
f"{str(end*second)}, which based on the dt value of "
f"{str(self.dt)} means that {self._i_end} "
"time steps will be simulated. This can lead to "
"numerical problems, e.g. the times t will not "
"correspond to exact multiples of "
"dt.",
"many_timesteps",
)
#: The relative difference for times (in terms of dt) so that they are
#: considered identical.
epsilon_dt = 1e-4
class DefaultClockProxy:
"""
Method proxy to access the defaultclock of the currently active device
"""
def __getattr__(self, name):
if name == "_is_proxy":
return True
from brian2.devices.device import active_device
return getattr(active_device.defaultclock, name)
def __setattr__(self, key, value):
from brian2.devices.device import active_device
setattr(active_device.defaultclock, key, value)
#: The standard clock, used for objects that do not specify any clock or dt
defaultclock = DefaultClockProxy() | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/backend/static/cubane/backend/tinymce/js/tinymce/plugins/toc/plugin.min.js | !function(){"use strict";var t,e,n=tinymce.util.Tools.resolve("tinymce.PluginManager"),o=tinymce.util.Tools.resolve("tinymce.dom.DOMUtils"),r=tinymce.util.Tools.resolve("tinymce.util.I18n"),i=tinymce.util.Tools.resolve("tinymce.util.Tools"),c=function(t){return t.getParam("toc_class","mce-toc")},l=function(t){var e=t.getParam("toc_header","h2");return/^h[1-6]$/.test(e)?e:"h2"},a=function(t){var e=parseInt(t.getParam("toc_depth","3"),10);return e>=1&&e<=9?e:3},d=(t="mcetoc_",e=0,function(){var n=(new Date).getTime().toString(32);return t+n+(e++).toString(32)}),u=function(t){var e=c(t),n=l(t),o=function(t){var e,n=[];for(e=1;e<=t;e++)n.push("h"+e);return n.join(",")}(a(t)),r=t.$(o);return r.length&&/^h[1-9]$/i.test(n)&&(r=r.filter(function(n,o){return!t.dom.hasClass(o.parentNode,e)})),i.map(r,function(e){return{id:e.id?e.id:d(),level:parseInt(e.nodeName.replace(/^H/i,""),10),title:t.$.text(e),element:e}})},s=function(t){var e,n,i,c,a,d,s,f="",m=u(t),v=function(t){var e,n=9;for(e=0;e<t.length;e++)if(t[e].level<n&&(n=t[e].level),1===n)return n;return n}(m)-1;if(!m.length)return"";for(f+=(a=l(t),d=r.translate("Table of Contents"),s="</"+a+">","<"+a+' contenteditable="true">'+o.DOM.encode(d)+s),e=0;e<m.length;e++){if((i=m[e]).element.id=i.id,c=m[e+1]&&m[e+1].level,v===i.level)f+="<li>";else for(n=v;n<i.level;n++)f+="<ul><li>";if(f+='<a href="#'+i.id+'">'+i.title+"</a>",c!==i.level&&c)for(n=i.level;n>c;n--)f+="</li></ul><li>";else f+="</li>",c||(f+="</ul>");v=i.level}return f},f=function(t){var e=c(t),n=t.$("."+e);n.length&&t.undoManager.transact(function(){n.html(s(t))})},m={hasHeaders:function(t){return u(t).length>0},insertToc:function(t){var e,n,o,r,i=c(t),l=t.$("."+i);o=t,!(r=l).length||o.dom.getParents(r[0],".mce-offscreen-selection").length>0?t.insertContent((n=s(e=t),'<div class="'+e.dom.encode(c(e))+'" contenteditable="false">'+n+"</div>")):f(t)},updateToc:f},v=function(t){t.addCommand("mceInsertToc",function(){m.insertToc(t)}),t.addCommand("mceUpdateToc",function(){m.updateToc(t)})},h=function(t){var e=t.$,n=c(t);t.on("PreProcess",function(t){var o=e("."+n,t.node);o.length&&(o.removeAttr("contentEditable"),o.find("[contenteditable]").removeAttr("contentEditable"))}),t.on("SetContent",function(){var t=e("."+n);t.length&&(t.attr("contentEditable",!1),t.children(":first-child").attr("contentEditable",!0))})},g=function(t){return function(e){var n=e.control;t.on("LoadContent SetContent change",function(){n.disabled(t.readonly||!m.hasHeaders(t))})}},T=function(t){var e;t.addButton("toc",{tooltip:"Table of Contents",cmd:"mceInsertToc",icon:"toc",onPostRender:g(t)}),t.addButton("tocupdate",{tooltip:"Update",cmd:"mceUpdateToc",icon:"reload"}),t.addMenuItem("toc",{text:"Table of Contents",context:"insert",cmd:"mceInsertToc",onPostRender:g(t)}),t.addContextToolbar((e=t,function(t){return t&&e.dom.is(t,"."+c(e))&&e.getBody().contains(t)}),"tocupdate")};n.add("toc",function(t){v(t),T(t),h(t)})}(); | PypiClean |
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model/redirect.py | import re # noqa: F401
import sys # noqa: F401
from freeclimb.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from freeclimb.exceptions import ApiAttributeError
def lazy_import():
from freeclimb.model.add_to_conference import AddToConference
from freeclimb.model.create_conference import CreateConference
from freeclimb.model.dequeue import Dequeue
from freeclimb.model.enqueue import Enqueue
from freeclimb.model.get_digits import GetDigits
from freeclimb.model.get_speech import GetSpeech
from freeclimb.model.hangup import Hangup
from freeclimb.model.out_dial import OutDial
from freeclimb.model.park import Park
from freeclimb.model.pause import Pause
from freeclimb.model.percl_command import PerclCommand
from freeclimb.model.play import Play
from freeclimb.model.play_early_media import PlayEarlyMedia
from freeclimb.model.record_utterance import RecordUtterance
from freeclimb.model.redirect import Redirect
from freeclimb.model.redirect_all_of import RedirectAllOf
from freeclimb.model.reject import Reject
from freeclimb.model.remove_from_conference import RemoveFromConference
from freeclimb.model.say import Say
from freeclimb.model.send_digits import SendDigits
from freeclimb.model.set_listen import SetListen
from freeclimb.model.set_talk import SetTalk
from freeclimb.model.sms import Sms
from freeclimb.model.start_record_call import StartRecordCall
from freeclimb.model.terminate_conference import TerminateConference
from freeclimb.model.unpark import Unpark
globals()['AddToConference'] = AddToConference
globals()['CreateConference'] = CreateConference
globals()['Dequeue'] = Dequeue
globals()['Enqueue'] = Enqueue
globals()['GetDigits'] = GetDigits
globals()['GetSpeech'] = GetSpeech
globals()['Hangup'] = Hangup
globals()['OutDial'] = OutDial
globals()['Park'] = Park
globals()['Pause'] = Pause
globals()['PerclCommand'] = PerclCommand
globals()['Play'] = Play
globals()['PlayEarlyMedia'] = PlayEarlyMedia
globals()['RecordUtterance'] = RecordUtterance
globals()['Redirect'] = Redirect
globals()['RedirectAllOf'] = RedirectAllOf
globals()['Reject'] = Reject
globals()['RemoveFromConference'] = RemoveFromConference
globals()['Say'] = Say
globals()['SendDigits'] = SendDigits
globals()['SetListen'] = SetListen
globals()['SetTalk'] = SetTalk
globals()['Sms'] = Sms
globals()['StartRecordCall'] = StartRecordCall
globals()['TerminateConference'] = TerminateConference
globals()['Unpark'] = Unpark
class Redirect(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'action_url': (str,), # noqa: E501
'command': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'action_url': 'actionUrl', # noqa: E501
'command': 'command', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Redirect - a model defined in OpenAPI
Keyword Args:
action_url (str): URL to request a new PerCL script to continue with the current Call's processing. When `Redirect` invokes the `actionUrl`, an `inbound` Webhook is sent. This request therefore looks identical to the initial request (made to the `voiceUrl` of the number that was called) for an inbound Call.
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Redirect - a model defined in OpenAPI
Keyword Args:
action_url (str): URL to request a new PerCL script to continue with the current Call's processing. When `Redirect` invokes the `actionUrl`, an `inbound` Webhook is sent. This request therefore looks identical to the initial request (made to the `voiceUrl` of the number that was called) for an inbound Call.
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def command():
mappings = PerclCommand.discriminator['command']
mapping = next((mapping for mapping,schema in mappings.items() if schema == Redirect), None)
if mapping == None:
raise ApiAttributeError("{0} has no mapping '{1}'".format(Redirect.__class__.name, 'command'))
return mapping
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
PerclCommand,
RedirectAllOf,
],
'oneOf': [
],
} | PypiClean |
/HEBO-0.3.2-py3-none-any.whl/hebo/models/boosting/catboost.py |
# This program is free software; you can redistribute it and/or modify it under
# the terms of the MIT license.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
import torch
from catboost import CatBoostRegressor, Pool, FeaturesData
from torch import FloatTensor, LongTensor
import numpy as np
from ..base_model import BaseModel
from ..util import filter_nan
class CatBoost(BaseModel):
def __init__(self, num_cont, num_enum, num_out, **conf):
super().__init__(num_cont, num_enum, num_out, **conf)
self.num_epochs = self.conf.get('num_epochs', 100) # maximum number of trees
self.lr = self.conf.get('lr', 0.2)
self.depth = self.conf.get('depth', 10) # recommended [1, 10]
self.loss_function = self.conf.get('loss_function', 'RMSEWithUncertainty')
self.posterior_sampling = self.conf.get('posterior_sampling', True)
self.verbose = self.conf.get('verbose', False)
self.random_seed = self.conf.get('random_seed', 42)
self.num_ensembles = self.conf.get('num_ensembles', 10)
if self.num_epochs < 2 * self.num_ensembles:
self.num_epochs = self.num_ensembles * 2
self.model = CatBoostRegressor(iterations=self.num_epochs,
learning_rate=self.lr,
depth=self.depth,
loss_function=self.loss_function,
posterior_sampling=self.posterior_sampling,
verbose=self.verbose,
random_seed=self.random_seed,
allow_writing_files=False)
def xtrans(self, Xc: FloatTensor, Xe: LongTensor) -> FeaturesData:
num_feature_data = Xc.numpy().astype(np.float32) if self.num_cont != 0 else None
cat_feature_data = Xe.numpy().astype(str).astype(object) if self.num_enum != 0 else None
return FeaturesData(num_feature_data=num_feature_data,
cat_feature_data=cat_feature_data)
def fit(self, Xc: FloatTensor, Xe: LongTensor, y: FloatTensor):
Xc, Xe, y = filter_nan(Xc, Xe, y, 'all')
train_data = Pool(data=self.xtrans(Xc=Xc, Xe=Xe), label=y.numpy().reshape(-1))
self.model.fit(train_data)
def predict(self, Xc: FloatTensor, Xe: LongTensor) -> (FloatTensor, FloatTensor):
test_data = Pool(data=self.xtrans(Xc=Xc, Xe=Xe))
preds = self.model.virtual_ensembles_predict(data=test_data,
prediction_type='TotalUncertainty',
virtual_ensembles_count=self.num_ensembles)
mean = preds[:, 0]
var = preds[:, 1] + preds[:, 2]
return torch.FloatTensor(mean.reshape([-1,1])), \
torch.FloatTensor(var.reshape([-1,1])) | PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/core/definitions.py |
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import six.moves.cPickle as pickle
import os
import base64
import re
import bisect
from collections import defaultdict
from mathics.core.expression import Expression, Symbol, String, fully_qualified_symbol_name, strip_context
from mathics.core.characters import letters, letterlikes
names_wildcards = "@*"
base_names_pattern = r'((?![0-9])([0-9${0}{1}{2}])+)'.format(letters, letterlikes, names_wildcards)
full_names_pattern = r'(`?{0}(`{0})*)'.format(base_names_pattern)
def get_file_time(file):
try:
return os.stat(file).st_mtime
except OSError:
return 0
def valuesname(name):
" 'NValues' -> 'n' "
assert name.startswith('System`'), name
if name == 'System`Messages':
return 'messages'
else:
return name[7:-6].lower()
class Definitions(object):
def __init__(self, add_builtin=False, builtin_filename=None):
super(Definitions, self).__init__()
self.builtin = {}
self.user = {}
self.definitions_cache = {}
self.lookup_cache = {}
self.proxy = defaultdict(set)
self.now = 0 # increments whenever something is updated
if add_builtin:
from mathics.builtin import modules, contribute
from mathics.core.evaluation import Evaluation
from mathics.settings import ROOT_DIR
loaded = False
if builtin_filename is not None:
builtin_dates = [get_file_time(module.__file__)
for module in modules]
builtin_time = max(builtin_dates)
if get_file_time(builtin_filename) > builtin_time:
builtin_file = open(builtin_filename, 'rb')
self.builtin = pickle.load(builtin_file)
loaded = True
if not loaded:
contribute(self)
if builtin_filename is not None:
builtin_file = open(builtin_filename, 'wb')
pickle.dump(self.builtin, builtin_file, -1)
for root, dirs, files in os.walk(os.path.join(ROOT_DIR, 'autoload')):
for path in [os.path.join(root, f) for f in files if f.endswith('.m')]:
Expression('Get', String(path)).evaluate(Evaluation(self))
# Move any user definitions created by autoloaded files to
# builtins, and clear out the user definitions list. This
# means that any autoloaded definitions become shared
# between users and no longer disappear after a Quit[].
#
# Autoloads that accidentally define a name in Global`
# could cause confusion, so check for this.
#
for name in self.user:
if name.startswith('Global`'):
raise ValueError("autoload defined %s." % name)
self.builtin.update(self.user)
self.user = {}
self.clear_cache()
def clear_cache(self, name=None):
# the definitions cache (self.definitions_cache) caches (incomplete and complete) names -> Definition(),
# e.g. "xy" -> d and "MyContext`xy" -> d. we need to clear this cache if a Definition() changes (which
# would happen if a Definition is combined from a builtin and a user definition and some content in the
# user definition is updated) or if the lookup rules change and we could end up at a completely different
# Definition.
# the lookup cache (self.lookup_cache) caches what lookup_name() does. we only need to update this if some
# change happens that might change the result lookup_name() calculates. we do not need to change it if a
# Definition() changes.
# self.proxy keeps track of all the names we cache. if we need to clear the caches for only one name, e.g.
# 'MySymbol', then we need to be able to look up all the entries that might be related to it, e.g. 'MySymbol',
# 'A`MySymbol', 'C`A`MySymbol', and so on. proxy identifies symbols using their stripped name and thus might
# give us symbols in other contexts that are actually not affected. still, this is a safe solution.
if name is None:
self.definitions_cache = {}
self.lookup_cache = {}
self.proxy = defaultdict(set)
else:
definitions_cache = self.definitions_cache
lookup_cache = self.lookup_cache
tail = strip_context(name)
for k in self.proxy.pop(tail, []):
definitions_cache.pop(k, None)
lookup_cache.pop(k, None)
def clear_definitions_cache(self, name):
definitions_cache = self.definitions_cache
tail = strip_context(name)
for k in self.proxy.pop(tail, []):
definitions_cache.pop(k, None)
def last_changed(self, expr):
# timestamp for the most recently changed part of a given expression.
if isinstance(expr, Symbol):
symb = self.get_definition(expr.get_name(), only_if_exists=True)
if symb is None:
# symbol doesn't exist so it was never changed
return 0
try:
return symb.changed
except AttributeError:
# must be system symbol
symb.changed = 0
return 0
result = 0
head = expr.get_head()
head_changed = self.last_changed(head)
result = max(result, head_changed)
for leaf in expr.get_leaves():
leaf_changed = self.last_changed(leaf)
result = max(result, leaf_changed)
return result
def get_current_context(self):
# It's crucial to specify System` in this get_ownvalue() call,
# otherwise we'll end up back in this function and trigger
# infinite recursion.
context_rule = self.get_ownvalue('System`$Context')
context = context_rule.replace.get_string_value()
assert context is not None, "$Context somehow set to an invalid value"
return context
def get_context_path(self):
context_path_rule = self.get_ownvalue('System`$ContextPath')
context_path = context_path_rule.replace
assert context_path.has_form('System`List', None)
context_path = [c.get_string_value() for c in context_path.leaves]
assert not any([c is None for c in context_path])
return context_path
def set_current_context(self, context):
assert isinstance(context, six.string_types)
self.set_ownvalue('System`$Context', String(context))
self.clear_cache()
def set_context_path(self, context_path):
assert isinstance(context_path, list)
assert all([isinstance(c, six.string_types) for c in context_path])
self.set_ownvalue('System`$ContextPath',
Expression('System`List',
*[String(c) for c in context_path]))
self.clear_cache()
def get_builtin_names(self):
return set(self.builtin)
def get_user_names(self):
return set(self.user)
def get_names(self):
return self.get_builtin_names() | self.get_user_names()
def get_accessible_contexts(self):
"Return the contexts reachable though $Context or $ContextPath."
accessible_ctxts = set(self.get_context_path())
accessible_ctxts.add(self.get_current_context())
return accessible_ctxts
def get_matching_names(self, pattern):
"""
Return a list of the symbol names matching a string pattern.
A pattern containing a context mark (of the form
"ctx_pattern`short_pattern") matches symbols whose context and
short name individually match the two patterns. A pattern
without a context mark matches symbols accessible through
$Context and $ContextPath whose short names match the pattern.
'*' matches any sequence of symbol characters or an empty
string. '@' matches a non-empty sequence of symbol characters
which aren't uppercase letters. In the context pattern, both
'*' and '@' match context marks.
"""
if re.match(full_names_pattern, pattern) is None:
# The pattern contained characters which weren't allowed
# in symbols and aren't valid wildcards. Hence, the
# pattern can't match any symbols.
return []
# If we get here, there aren't any regexp metacharacters in
# the pattern.
if '`' in pattern:
ctx_pattern, short_pattern = pattern.rsplit('`', 1)
ctx_pattern = ((ctx_pattern + '`')
.replace('@', '[^A-Z`]+')
.replace('*', '.*')
.replace('$', r'\$'))
else:
short_pattern = pattern
# start with a group matching the accessible contexts
ctx_pattern = "(?:%s)" % "|".join(
re.escape(c) for c in self.get_accessible_contexts())
short_pattern = (short_pattern
.replace('@', '[^A-Z]+')
.replace('*', '[^`]*')
.replace('$', r'\$'))
regex = re.compile('^' + ctx_pattern + short_pattern + '$')
return [name for name in self.get_names() if regex.match(name)]
def lookup_name(self, name):
"""
Determine the full name (including context) for a symbol name.
- If the name begins with a context mark, it's in the context
given by $Context.
- Otherwise, if it contains a context mark, it's already fully
specified.
- Otherwise, it doesn't contain a context mark: try $Context,
then each element of $ContextPath, taking the first existing
symbol.
- Otherwise, it's a new symbol in $Context.
"""
cached = self.lookup_cache.get(name, None)
if cached is not None:
return cached
assert isinstance(name, six.string_types)
# Bail out if the name we're being asked to look up is already
# fully qualified.
if fully_qualified_symbol_name(name):
return name
current_context = self.get_current_context()
if '`' in name:
if name.startswith('`'):
return current_context + name.lstrip('`')
return name
with_context = current_context + name
if not self.have_definition(with_context):
for ctx in self.get_context_path():
n = ctx + name
if self.have_definition(n):
return n
return with_context
def shorten_name(self, name_with_ctx):
if '`' not in name_with_ctx:
return name_with_ctx
def in_ctx(name, ctx):
return name.startswith(ctx) and '`' not in name[len(ctx):]
if in_ctx(name_with_ctx, self.get_current_context()):
return name_with_ctx[len(self.get_current_context()):]
for ctx in self.get_context_path():
if in_ctx(name_with_ctx, ctx):
return name_with_ctx[len(ctx):]
return name_with_ctx
def have_definition(self, name):
return self.get_definition(name, only_if_exists=True) is not None
def get_definition(self, name, only_if_exists=False):
definition = self.definitions_cache.get(name, None)
if definition is not None:
return definition
original_name = name
name = self.lookup_name(name)
user = self.user.get(name, None)
builtin = self.builtin.get(name, None)
if user is None and builtin is None:
definition = None
elif builtin is None:
definition = user
elif user is None:
definition = builtin
else:
if user:
attributes = user.attributes
elif builtin:
attributes = builtin.attributes
else:
attributes = set()
if not user:
user = Definition(name=name)
if not builtin:
builtin = Definition(name=name)
options = builtin.options.copy()
options.update(user.options)
formatvalues = builtin.formatvalues.copy()
for form, rules in six.iteritems(user.formatvalues):
if form in formatvalues:
formatvalues[form].extend(rules)
else:
formatvalues[form] = rules
definition = Definition(name=name,
ownvalues=user.ownvalues + builtin.ownvalues,
downvalues=user.downvalues + builtin.downvalues,
subvalues=user.subvalues + builtin.subvalues,
upvalues=user.upvalues + builtin.upvalues,
formatvalues=formatvalues,
messages=user.messages + builtin.messages,
attributes=attributes,
options=options,
nvalues=user.nvalues + builtin.nvalues,
defaultvalues=user.defaultvalues +
builtin.defaultvalues,
)
if definition is not None:
self.proxy[strip_context(original_name)].add(original_name)
self.definitions_cache[original_name] = definition
self.lookup_cache[original_name] = name
elif not only_if_exists:
definition = Definition(name=name)
return definition
def get_attributes(self, name):
return self.get_definition(name).attributes
def get_ownvalues(self, name):
return self.get_definition(name).ownvalues
def get_downvalues(self, name):
return self.get_definition(name).downvalues
def get_subvalues(self, name):
return self.get_definition(name).subvalues
def get_upvalues(self, name):
return self.get_definition(name).upvalues
def get_formats(self, name, format=''):
formats = self.get_definition(name).formatvalues
result = formats.get(format, []) + formats.get('', [])
result.sort()
return result
def get_nvalues(self, name):
return self.get_definition(name).nvalues
def get_defaultvalues(self, name):
return self.get_definition(name).defaultvalues
def get_value(self, name, pos, pattern, evaluation):
assert isinstance(name, six.string_types)
assert '`' in name
rules = self.get_definition(name).get_values_list(valuesname(pos))
for rule in rules:
result = rule.apply(pattern, evaluation)
if result is not None:
return result
def get_user_definition(self, name, create=True):
assert not isinstance(name, Symbol)
existing = self.user.get(name)
if existing:
return existing
else:
if not create:
return None
builtin = self.builtin.get(name)
if builtin:
attributes = builtin.attributes
else:
attributes = set()
self.user[name] = Definition(name=name, attributes=attributes)
self.clear_cache(name)
return self.user[name]
def mark_changed(self, definition):
self.now += 1
definition.changed = self.now
def reset_user_definition(self, name):
assert not isinstance(name, Symbol)
fullname = self.lookup_name(name)
del self.user[fullname]
self.clear_cache(fullname)
# TODO fix changed
def add_user_definition(self, name, definition):
assert not isinstance(name, Symbol)
self.mark_changed(definition)
fullname = self.lookup_name(name)
self.user[fullname] = definition
self.clear_cache(fullname)
def set_attribute(self, name, attribute):
definition = self.get_user_definition(self.lookup_name(name))
definition.attributes.add(attribute)
self.mark_changed(definition)
self.clear_definitions_cache(name)
def set_attributes(self, name, attributes):
definition = self.get_user_definition(self.lookup_name(name))
definition.attributes = set(attributes)
self.mark_changed(definition)
self.clear_definitions_cache(name)
def clear_attribute(self, name, attribute):
definition = self.get_user_definition(self.lookup_name(name))
if attribute in definition.attributes:
definition.attributes.remove(attribute)
self.mark_changed(definition)
self.clear_definitions_cache(name)
def add_rule(self, name, rule, position=None):
definition = self.get_user_definition(self.lookup_name(name))
if position is None:
result = definition.add_rule(rule)
else:
result = definition.add_rule_at(rule, position)
self.mark_changed(definition)
self.clear_definitions_cache(name)
return result
def add_format(self, name, rule, form=''):
definition = self.get_user_definition(self.lookup_name(name))
if isinstance(form, tuple) or isinstance(form, list):
forms = form
else:
forms = [form]
for form in forms:
if form not in definition.formatvalues:
definition.formatvalues[form] = []
insert_rule(definition.formatvalues[form], rule)
self.mark_changed(definition)
self.clear_definitions_cache(name)
def add_nvalue(self, name, rule):
definition = self.get_user_definition(self.lookup_name(name))
definition.add_rule_at(rule, 'n')
self.mark_changed(definition)
self.clear_definitions_cache(name)
def add_default(self, name, rule):
definition = self.get_user_definition(self.lookup_name(name))
definition.add_rule_at(rule, 'default')
self.mark_changed(definition)
self.clear_definitions_cache(name)
def add_message(self, name, rule):
definition = self.get_user_definition(self.lookup_name(name))
definition.add_rule_at(rule, 'messages')
self.mark_changed(definition)
self.clear_definitions_cache(name)
def set_values(self, name, values, rules):
pos = valuesname(values)
definition = self.get_user_definition(self.lookup_name(name))
definition.set_values_list(pos, rules)
self.mark_changed(definition)
self.clear_definitions_cache(name)
def get_options(self, name):
return self.get_definition(self.lookup_name(name)).options
def reset_user_definitions(self):
self.user = {}
self.clear_cache()
# TODO changed
def get_user_definitions(self):
if six.PY2:
return base64.encodestring(pickle.dumps(self.user, protocol=2)).decode('ascii')
else:
return base64.encodebytes(pickle.dumps(self.user, protocol=2)).decode('ascii')
def set_user_definitions(self, definitions):
if definitions:
if six.PY2:
self.user = pickle.loads(base64.decodestring(definitions.encode('ascii')))
else:
self.user = pickle.loads(base64.decodebytes(definitions.encode('ascii')))
else:
self.user = {}
self.clear_cache()
def get_ownvalue(self, name):
ownvalues = self.get_definition(self.lookup_name(name)).ownvalues
if ownvalues:
return ownvalues[0]
return None
def set_ownvalue(self, name, value):
from .expression import Symbol
from .rules import Rule
name = self.lookup_name(name)
self.add_rule(name, Rule(Symbol(name), value))
self.clear_cache(name)
def set_options(self, name, options):
definition = self.get_user_definition(self.lookup_name(name))
definition.options = options
self.mark_changed(definition)
self.clear_definitions_cache(name)
def unset(self, name, expr):
definition = self.get_user_definition(self.lookup_name(name))
result = definition.remove_rule(expr)
self.mark_changed(definition)
self.clear_definitions_cache(name)
return result
def get_config_value(self, name, default=None):
'Infinity -> None, otherwise returns integer.'
value = self.get_definition(name).ownvalues
if value:
try:
value = value[0].replace
except AttributeError:
return None
if value.get_name() == 'System`Infinity' or value.has_form('DirectedInfinity', 1):
return None
return int(value.get_int_value())
else:
return default
def set_config_value(self, name, new_value):
from mathics.core.expression import Integer
self.set_ownvalue(name, Integer(new_value))
def set_line_no(self, line_no):
self.set_config_value('$Line', line_no)
def get_line_no(self):
return self.get_config_value('$Line', 0)
def get_history_length(self):
history_length = self.get_config_value('$HistoryLength', 100)
if history_length is None or history_length > 100:
history_length = 100
return history_length
def get_tag_position(pattern, name):
if pattern.get_name() == name:
return 'own'
elif pattern.is_atom():
return None
else:
head_name = pattern.get_head_name()
if head_name == name:
return 'down'
elif head_name == 'System`Condition' and len(pattern.leaves) > 0:
return get_tag_position(pattern.leaves[0], name)
elif pattern.get_lookup_name() == name:
return 'sub'
else:
for leaf in pattern.leaves:
if leaf.get_lookup_name() == name:
return 'up'
return None
def insert_rule(values, rule):
for index, existing in enumerate(values):
if existing.pattern.same(rule.pattern):
del values[index]
break
# use insort_left to guarantee that if equal rules exist, newer rules will
# get higher precedence by being inserted before them. see DownValues[].
bisect.insort_left(values, rule)
class Definition(object):
def __init__(self, name, rules=None, ownvalues=None, downvalues=None,
subvalues=None, upvalues=None, formatvalues=None,
messages=None, attributes=(), options=None, nvalues=None,
defaultvalues=None, builtin=None):
super(Definition, self).__init__()
self.name = name
if rules is None:
rules = []
if ownvalues is None:
ownvalues = []
if downvalues is None:
downvalues = []
if subvalues is None:
subvalues = []
if upvalues is None:
upvalues = []
if formatvalues is None:
formatvalues = {}
if options is None:
options = {}
if nvalues is None:
nvalues = []
if defaultvalues is None:
defaultvalues = []
if messages is None:
messages = []
self.ownvalues = ownvalues
self.downvalues = downvalues
self.subvalues = subvalues
self.upvalues = upvalues
for rule in rules:
self.add_rule(rule)
self.formatvalues = dict((name, list)
for name, list in formatvalues.items())
self.messages = messages
self.attributes = set(attributes)
for a in self.attributes:
assert '`' in a, "%s attribute %s has no context" % (name, a)
self.options = options
self.nvalues = nvalues
self.defaultvalues = defaultvalues
self.builtin = builtin
def get_values_list(self, pos):
assert pos.isalpha()
if pos == 'messages':
return self.messages
else:
return getattr(self, '%svalues' % pos)
def set_values_list(self, pos, rules):
assert pos.isalpha()
if pos == 'messages':
self.messages = rules
else:
setattr(self, '%svalues' % pos, rules)
def add_rule_at(self, rule, position):
values = self.get_values_list(position)
insert_rule(values, rule)
return True
def add_rule(self, rule):
pos = get_tag_position(rule.pattern, self.name)
if pos:
return self.add_rule_at(rule, pos)
return False
def remove_rule(self, lhs):
position = get_tag_position(lhs, self.name)
if position:
values = self.get_values_list(position)
for index, existing in enumerate(values):
if existing.pattern.expr.same(lhs):
del values[index]
return True
return False
def __repr__(self):
s = '<Definition: name: {}, downvalues: {}, formats: {}, attributes: {}>'.format(
self.name, self.downvalues, self.formatvalues, self.attributes)
return s.encode('unicode_escape') | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.