id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/client/uilayer/uicomponents/BaseWindow.py |
# Copyright (c) 2016, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import curses
import curses.ascii
from .Component import Component
class BaseWindow(Component):
"""
A window without border and without title. It can contain other components.
KEY_TAB, KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN: navigate between components
ENTER: exit an editable component or execute an actionable component
shortcuts (Ctrl + key): execute an actionable component
ESC: close the window
other keys: start edition of an editable component
Attribute(s):
- h: the window height
- w: the window width
- window: the curses window
- items: the ordered list of inner components
- shortcuts: the ordered list of shortcut keys
- index: the actual inner component that gets focus
- menu: the window works like a menu (KEY_LEFT and KEY_RIGHT close the menu)
- modal: the window is modal window or not
"""
def __init__(self, parent, h, w, y, x, menu=False, modal=False):
"""Create base window"""
Component.__init__(self, parent, h, w, y, x, modal=modal)
self.items = []
self.shortcuts = []
self.index = 0
self.window.keypad(1)
self.menu = menu
def start(self, timeout=-1):
"""Start interaction loop of the window"""
self.window.timeout(timeout) # timeout for getch function
nbitems = len(self.items)
if nbitems > 0:
self.items[self.index].focus_on() # Focus on component at index
reset = False # Reset timeout
while True:
c = self.window.getch()
# Timeout ?
if c == -1 and reset is False:
return 'timeout'
elif c == -1 and reset is True:
return 'reset'
elif c != -1:
reset = True
# Next component
if c in [curses.KEY_DOWN, curses.ascii.TAB]:
self.items[self.index].focus_off()
if self.menu and (self.index + 1) >= nbitems:
return 1
self.index = (self.index + 1) % nbitems
if self.items[self.index].is_editable() or \
self.items[self.index].is_actionable():
self.items[self.index].focus_on()
else:
curses.ungetch(curses.KEY_DOWN)
# Previous component
elif c in [curses.KEY_UP]:
self.items[self.index].focus_off()
if self.menu and (self.index - 1) < 0:
return -1
self.index = (self.index - 1) % nbitems
if self.items[self.index].is_editable() or \
self.items[self.index].is_actionable():
self.items[self.index].focus_on()
else:
curses.ungetch(curses.KEY_UP)
# Next actionable component or edit editable component
elif c in [curses.KEY_LEFT] and self.items[self.index].is_actionable():
if self.menu:
curses.ungetch(curses.KEY_LEFT)
return False
else:
curses.ungetch(curses.KEY_UP)
# Previous actionable component or edit editable component
elif c in [curses.KEY_RIGHT] and \
self.items[self.index].is_actionable():
if self.menu:
curses.ungetch(curses.KEY_RIGHT)
return False
else:
curses.ungetch(curses.KEY_DOWN)
# Validation
elif c in [curses.ascii.CR]:
return self.items[self.index]
# Cancel
elif c in [curses.ascii.ESC] and self.modal:
self.items[self.index].focus_off()
return False
# Shortcut keys
elif curses.ascii.isctrl(c):
c += 64 # Add 64 to get upper key
for number, shortcut in enumerate(self.shortcuts):
if shortcut == chr(c) and \
self.items[number].is_actionable():
self.items[self.index].focus_off()
self.index = number
self.items[self.index].focus_on()
return self.items[self.index]
# Other case : start edition of editable components
else:
if self.items[self.index].is_editable():
self.items[self.index].focus_off()
curses.ungetch(c)
self.items[self.index].edit()
def redraw(self):
"""See the mother class"""
for item in self.items:
item.redraw() | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/bomregions2011.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bomregions2011(path):
"""Australian and Related Historical Annual Climate Data, by region
Australian regional temperature data, Australian regional rainfall data,
and Annual SOI, are given for the years 1900-2008 or 1900-2011 or
1900-2012. The regional rainfall and temperature data are area-weighted
averages for the respective regions. The Southern Oscillation Index
(SOI) is the difference in barometric pressure at sea level between
Tahiti and Darwin.
This data frame contains the following columns:
Year
Year
eastAVt
Eastern temperature
seAVt
Southeastern region average temperature (degrees C)
southAVt
Southern temperature
swAVt
Southwestern temperature
westAVt
Western temperature
northAVt
Northern temperature
mdbAVt
Murray-Darling basin temperature
auAVt
Australian average temperature, area-weighted mean
eastRain
Eastern rainfall
seRain
Southeast Australian annual rainfall (mm)
southRain
Southern rainfall
swRain
Southwest rainfall
westRain
Western rainfall
northRain
Northern rainfall
mdbRain
Murray-Darling basin rainfall
auRain
Australian average rainfall, area weighted
SOI
Annual average Southern Oscillation Index
co2mlo
Moana Loa CO2 concentrations, from 1959
co2law
Moana Loa CO2 concentrations, 1900 to 1978
CO2
CO2 concentrations, composite series
sunspot
Annual average sunspot counts
Australian Bureau of Meteorology web pages:
http://www.bom.gov.au/climate/change/index.shtml
The CO2 series `co2law`, for Law Dome ice core data. is from
http://cdiac.ornl.gov/trends/co2/lawdome.html.
The CO2 series `co2mlo` is from Dr. Pieter Tans, NOAA/ESRL
(`www.esrl.noaa.gov/gmd/ccgg/trends/ <www.esrl.noaa.gov/gmd/ccgg/trends/>`__)
The series `CO2` is a composite series, obtained by adding 0.46 to he
Law data for 1900 to 1958, then following this with the Moana Loa data
that is avaiable from 1959. The addition of 0.46 is designed so that the
averages from the two series agree for the period 1959 to 1968
Sunspot data is from http://sidc.oma.be/sunspot-data/
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bomregions2011.csv`.
Returns:
Tuple of np.ndarray `x_train` with 112 rows and 22 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bomregions2011.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/bomregions2011.csv'
maybe_download_and_extract(path, url,
save_file_name='bomregions2011.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/Flask-AppBuilder-redirect-2.1.13.tar.gz/Flask-AppBuilder-redirect-2.1.13/flask_appbuilder/api/convert.py | from marshmallow import fields
from marshmallow_enum import EnumField
from marshmallow_sqlalchemy import field_for
from marshmallow_sqlalchemy.schema import ModelSchema
class TreeNode:
def __init__(self, data):
self.data = data
self.childs = list()
def __repr__(self):
return "{}.{}".format(self.data, str(self.childs))
class Tree:
"""
Simplistic one level Tree
"""
def __init__(self):
self.root = TreeNode('+')
def add(self, data):
node = TreeNode(data)
self.root.childs.append(node)
def add_child(self, parent, data):
node = TreeNode(data)
for n in self.root.childs:
if n.data == parent:
n.childs.append(node)
return
root = TreeNode(parent)
self.root.childs.append(root)
root.childs.append(node)
def __repr__(self):
ret = ""
for node in self.root.childs:
ret += str(node)
return ret
def columns2Tree(columns):
tree = Tree()
for column in columns:
if '.' in column:
tree.add_child(
column.split('.')[0],
column.split('.')[1]
)
else:
tree.add(column)
return tree
class BaseModel2SchemaConverter(object):
def __init__(self, datamodel, validators_columns):
"""
:param datamodel: SQLAInterface
"""
self.datamodel = datamodel
self.validators_columns = validators_columns
def convert(self, columns, **kwargs):
pass
class Model2SchemaConverter(BaseModel2SchemaConverter):
"""
Class that converts Models to marshmallow Schemas
"""
def __init__(self, datamodel, validators_columns):
"""
:param datamodel: SQLAInterface
"""
super(Model2SchemaConverter, self).__init__(datamodel, validators_columns)
@staticmethod
def _debug_schema(schema):
for k, v in schema._declared_fields.items():
print(k, v)
def _meta_schema_factory(self, columns, model, class_mixin):
"""
Creates ModelSchema marshmallow-sqlalchemy
:param columns: a list of columns to mix
:param model: Model
:param class_mixin: a marshamallow Schema to mix
:return: ModelSchema
"""
_model = model
if columns:
class MetaSchema(ModelSchema, class_mixin):
class Meta:
model = _model
fields = columns
strict = True
sqla_session = self.datamodel.session
else:
class MetaSchema(ModelSchema, class_mixin):
class Meta:
model = _model
strict = True
sqla_session = self.datamodel.session
return MetaSchema
def _column2field(self, datamodel, column, nested=True, enum_dump_by_name=False):
"""
:param datamodel: SQLAInterface
:param column: TreeNode column (childs are dotted column)
:param nested: Boolean if will create nested fields
:param enum_dump_by_name:
:return: Schema.field
"""
_model = datamodel.obj
# Handle relations
if datamodel.is_relation(column.data) and nested:
required = not datamodel.is_nullable(column.data)
nested_model = datamodel.get_related_model(column.data)
lst = [item.data for item in column.childs]
nested_schema = self.convert(
lst,
nested_model,
nested=False
)
if datamodel.is_relation_many_to_one(column.data):
many = False
elif datamodel.is_relation_many_to_many(column.data):
many = True
required = False
else:
many = False
field = fields.Nested(nested_schema, many=many, required=required)
field.unique = datamodel.is_unique(column.data)
return field
# Handle bug on marshmallow-sqlalchemy #163
elif datamodel.is_relation(column.data):
if (datamodel.is_relation_many_to_many(column.data) or
datamodel.is_relation_one_to_many(column.data)):
if datamodel.get_info(column.data).get('required', False):
required = True
else:
required = False
else:
required = not datamodel.is_nullable(column.data)
field = field_for(_model, column.data)
field.required = required
field.unique = datamodel.is_unique(column.data)
return field
# Handle Enums
elif datamodel.is_enum(column.data):
required = not datamodel.is_nullable(column.data)
enum_class = datamodel.list_columns[column.data].info.get(
'enum_class',
datamodel.list_columns[column.data].type
)
if enum_dump_by_name:
enum_dump_by = EnumField.NAME
else:
enum_dump_by = EnumField.VALUE
field = EnumField(enum_class, dump_by=enum_dump_by, required=required)
field.unique = datamodel.is_unique(column.data)
return field
# is custom property method field?
if hasattr(getattr(_model, column.data), 'fget'):
return fields.Raw(dump_only=True)
# is a normal model field not a function?
if not hasattr(getattr(_model, column.data), '__call__'):
field = field_for(_model, column.data)
field.unique = datamodel.is_unique(column.data)
if column.data in self.validators_columns:
field.validate.append(self.validators_columns[column.data])
return field
@staticmethod
def get_column_child_model(column):
if '.' in column:
return column.split('.')[0]
return column
@staticmethod
def is_column_dotted(column):
return '.' in column
def convert(self, columns, model=None, nested=True, enum_dump_by_name=False):
"""
Creates a Marshmallow ModelSchema class
:param columns: List with columns to include, if empty converts all on model
:param model: Override Model to convert
:param nested: Generate relation with nested schemas
:return: ModelSchema object
"""
super(Model2SchemaConverter, self).convert(
columns,
model=model,
nested=nested
)
class SchemaMixin:
pass
_model = model or self.datamodel.obj
_datamodel = self.datamodel.__class__(_model)
ma_sqla_fields_override = {}
_columns = list()
tree_columns = columns2Tree(columns)
for column in tree_columns.root.childs:
# Get child model is column is dotted notation
ma_sqla_fields_override[column.data] = self._column2field(
_datamodel,
column,
nested,
enum_dump_by_name
)
_columns.append(column.data)
for k, v in ma_sqla_fields_override.items():
setattr(SchemaMixin, k, v)
return self._meta_schema_factory(_columns, _model, SchemaMixin)() | PypiClean |
/OASYS1_HALF_SRW-0.0.3-py3-none-any.whl/orangecontrib/srw/widgets/tools/ow_dabam_height_profile.py | import os, sys
from PyQt5.QtWidgets import QApplication
import orangecanvas.resources as resources
from oasys.widgets.abstract.error_profile.abstract_dabam_height_profile import OWAbstractDabamHeightProfile
from orangecontrib.srw.util.srw_objects import SRWPreProcessorData, SRWErrorProfileData
import orangecontrib.srw.util.srw_util as SU
class OWdabam_height_profile(OWAbstractDabamHeightProfile):
name = "DABAM Height Profile"
id = "dabam_height_profile"
description = "Calculation of mirror surface error profile"
icon = "icons/dabam.png"
author = "Luca Rebuffi"
maintainer_email = "srio@esrf.eu; luca.rebuffi@elettra.eu"
priority = 2
category = ""
keywords = ["dabam_height_profile"]
outputs = [OWAbstractDabamHeightProfile.get_dabam_output(),
{"name": "PreProcessor_Data",
"type": SRWPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}]
usage_path = os.path.join(resources.package_dirname("orangecontrib.srw.widgets.gui"), "misc", "dabam_height_profile_usage.png")
def __init__(self):
super().__init__()
if not self.heigth_profile_file_name is None:
if self.heigth_profile_file_name.endswith("hdf5"):
self.heigth_profile_file_name = self.heigth_profile_file_name[:-4] + "dat"
def get_usage_path(self):
return self.usage_path
def write_error_profile_file(self):
SU.write_error_profile_file(self.zz, self.xx, self.yy, self.heigth_profile_file_name)
def send_data(self, dimension_x, dimension_y):
self.send("PreProcessor_Data", SRWPreProcessorData(error_profile_data=SRWErrorProfileData(error_profile_data_file=self.heigth_profile_file_name,
error_profile_x_dim=dimension_x,
error_profile_y_dim=dimension_y)))
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWdabam_height_profile()
w.si_to_user_units = 100
w.show()
app.exec()
w.saveSettings() | PypiClean |
/BactInspectorMax-0.1.3-py3-none-any.whl/bactinspector/mash_functions.py | import os, glob, sys
import pandas
from bactinspector.utility_functions import add_new_file_extension, get_base_name, run_command
import pandas as pd
from io import StringIO
def run_mash_sketch(file, filetype, output_dir = None, mash_path = ''):
"""
run mash sketch on a fasta file and return the path to the resulting sketch file
"""
if output_dir:
sketch_file = os.path.join(output_dir, '{0}.msh'.format(get_base_name(file)))
else:
sketch_file = add_new_file_extension(file, 'msh')
if not os.path.exists(sketch_file) or os.path.getsize(sketch_file) == 0:
sys.stderr.write('Sketching {0}\n'.format(get_base_name(file)))
if filetype == 'fasta':
command_and_arguments = [os.path.join(mash_path, 'mash'), 'sketch', file, '-o', sketch_file]
else:
command_and_arguments = [os.path.join(mash_path, 'mash'), 'sketch', '-m', '3', file, '-o', sketch_file]
ret_code, out, err = run_command(command_and_arguments)
if ret_code != 0:
sys.stderr.write('Error whilst performing mash sketch: {0}\n'.format(err))
sys.exit(ret_code)
return sketch_file
def get_best_mash_matches(sample_sketch, ref_seq_sketch, refseq_species_info, output_dir = None, mash_path = '', number_of_best_matches = 10):
"""
run mash dist sample sketch file vs the ref_seq sketches and return the best matches
"""
match_file = add_new_file_extension(sample_sketch, 'best_matches.txt')
if not os.path.exists(match_file) or os.path.getsize(match_file) == 0:
sys.stderr.write('Getting best match for {0}\n'.format(get_base_name(sample_sketch)))
command_and_arguments = [os.path.join(mash_path, 'mash'), 'dist', sample_sketch, ref_seq_sketch ]
ret_code, out, err = run_command(command_and_arguments)
if ret_code != 0:
print('Error whilst performing mash dist: {0}'.format(err))
sys.exit(ret_code)
distances_fh = StringIO(out.decode("utf-8"))
mash_dists = pd.read_csv(distances_fh, sep = "\t", names = ['query', 'subject', 'distance', 'p-value', 'shared-hashes'])
# merge with refseq matches for potential filtering
mash_dists = mash_dists.merge(refseq_species_info, left_on = 'subject', right_on = 'filename', how = 'right')
mash_dists = mash_dists.filter(['query', 'subject', 'distance', 'p-value', 'shared-hashes'])
# sort by distance and output the subjects (match in refseq)
matches = mash_dists.sort_values('distance', ascending=True).head(number_of_best_matches)
matches = matches.rename(columns = {'subject' : 'filename'}).filter(items = ['filename', 'distance', 'p-value', 'shared-hashes'])
return (get_base_name(sample_sketch), matches)
def get_species_match_details(matches, refseq_species_info):
"""
use pandas to merge best matches with ref species info and return the merged data frame
"""
best_match_species_df = matches.merge(
refseq_species_info,
on = ['filename']
)
return best_match_species_df
def get_most_frequent_species_match(matches, refseq_species_info, distance_cutoff = 0.05):
"""
use pandas to merge best match file with ref species info and report the most frequent species
return species and count
"""
best_match_species_df = get_species_match_details(matches, refseq_species_info)
# filter for close matches
best_match_species_df = best_match_species_df.loc[best_match_species_df['distance'] <= distance_cutoff]
if len(best_match_species_df) == 0:
return 'No significant matches', None, None, None, None, None
else:
# get most frequent species and count
most_frequent_species_name = best_match_species_df['curated_organism_name'].value_counts().index[0]
most_frequent_species_count = best_match_species_df['curated_organism_name'].value_counts()[0]
# get top hit of the most frequent species as measured by distance
top_hit = best_match_species_df.loc[best_match_species_df['curated_organism_name'] == most_frequent_species_name].sort_values('distance').iloc[0,:]
return (most_frequent_species_name,
most_frequent_species_count,
len(best_match_species_df),
top_hit['distance'],
top_hit['p-value'],
top_hit['shared-hashes']
) | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/toolkits/regression/linear_regression.py | import graphlab.connect as _mt
import graphlab.toolkits._supervised_learning as _sl
from graphlab.toolkits._supervised_learning import SupervisedLearningModel as \
_SupervisedLearningModel
from graphlab.toolkits._internal_utils import _toolkit_repr_print, \
_toolkit_get_topk_bottomk, \
_summarize_coefficients, \
_raise_error_evaluation_metric_is_valid
from graphlab.toolkits._model_workflow import _collect_model_workflow
from graphlab.toolkits._model import _get_default_options_wrapper
_DEFAULT_SOLVER_OPTIONS = {
'convergence_threshold': 1e-2,
'step_size': 1.0,
'lbfgs_memory_level': 11,
'max_iterations': 10}
get_default_options = _get_default_options_wrapper(
'regression_linear_regression',
'linear_regression',
'LinearRegression')
def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
validation_set = "auto",
verbose=True):
"""
Create a :class:`~graphlab.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~graphlab.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, graphlab.boosted_trees_regression.BoostedTreesRegression, graphlab.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only availiable when `solver=newton`
or when the default `auto` solver option choses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<http://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<http://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~graphlab.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~graphlab.linear_regression.LinearRegression` as follows:
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = graphlab.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = graphlab.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = graphlab.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5)
"""
_mt._get_metric_tracker().track('toolkit.regression.linear_regression.create')
# Regression model names.
model_name = "regression_linear_regression"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set,
solver = solver, verbose = verbose,
l2_penalty=l2_penalty, l1_penalty = l1_penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
step_size = step_size,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations)
return LinearRegression(model.__proxy__)
class LinearRegression(_SupervisedLearningModel):
"""
Linear regression is an approach for modeling a scalar target :math:`y` as
a linear function of one or more explanatory variables denoted :math:`X`.
Given a set of features :math:`x_i`, and a label :math:`y_i`, linear
regression interprets the probability that the label is in one class as
a linear function of a linear combination of the features.
.. math::
f_i(\\theta) = \\theta^T x + \epsilon_i
where :math:`\epsilon_i` is noise. An intercept term is added by appending
a column of 1's to the features. Regularization is often required to
prevent overfitting by penalizing models with extreme parameter values. The
linear regression module supports l1 and l2 regularization, which are added
to the loss function.
The composite objective being optimized for is the following:
.. math::
\min_{\\theta} \sum_{i = 1}^{n} (\\theta^Tx - y_i)^2 + \lambda_1 ||\\theta||_1 + \lambda_2 ||\\theta||^{2}_{2}
where :math:`\lambda_1` is the ``l1_penalty`` and :math:`\lambda_2` is the
``l2_penalty``.
This model cannot be constructed directly. Instead, use
:func:`graphlab.linear_regression.create` to create an instance
of this model. A detailed list of parameter options and code samples
are available in the documentation for the create function.
Examples
--------
.. sourcecode:: python
# Load the data (From an S3 bucket)
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Make a linear regression model
>>> model = graphlab.linear_regression.create(data, target='price', features=['bath', 'bedroom', 'size'])
# Extract the coefficients
>>> coefficients = model['coefficients']
# Make predictions
>>> predictions = model.predict(data)
# Evaluate the model
>>> results = model.evaluate(data)
See Also
--------
create
"""
def __init__(self, model_proxy):
'''__init__(self)'''
self.__proxy__ = model_proxy
self.__name__ = "regression_linear_regression"
def _get_wrapper(self):
def model_wrapper(model_proxy):
return LinearRegression(model_proxy)
return model_wrapper
def __str__(self):
"""
Return a string description of the model, including a description of
the training data, training statistics, and model hyper-parameters.
Returns
-------
out : string
A description of the model.
"""
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of coefficients', 'num_coefficients'),
('Number of examples', 'num_examples'),
('Number of feature columns', 'num_features'),
('Number of unpacked features', 'num_unpacked_features')]
hyperparam_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Residual sum of squares", 'training_loss'),
("Training RMSE", 'training_rmse')]
coefs = self.get('coefficients')
top_coefs, bottom_coefs = _toolkit_get_topk_bottomk(coefs,k=5)
(coefs_list, titles_list) = _summarize_coefficients(top_coefs, \
bottom_coefs)
return ([model_fields, hyperparam_fields,
solver_fields, training_fields] + coefs_list, \
[ 'Schema', 'Hyperparameters', \
'Training Summary', 'Settings' ] + titles_list )
def __repr__(self):
"""
Return a string description of the model, including a description of
the training data, training statistics, and model hyper-parameters.
Returns
-------
out : string
A description of the model.
"""
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections, section_titles, width=30)
def get(self, field):
"""
Get the value of a given field. The list of all queryable fields is
detailed below, and can be obtained programmatically using the
:func:`~graphlab.linear_regression.LinearRegression.list_fields`
method.
+------------------------+-------------------------------------------------------------+
| Field | Description |
+========================+=============================================================+
| coefficients | Regression coefficients |
+------------------------+-------------------------------------------------------------+
| convergence_threshold | Desired solver accuracy |
+------------------------+-------------------------------------------------------------+
| feature_rescaling | Bool indicating if features were rescaled during training |
+------------------------+-------------------------------------------------------------+
| features | Feature column names |
+------------------------+-------------------------------------------------------------+
| l1_penalty | l1 regularization weight |
+------------------------+-------------------------------------------------------------+
| l2_penalty | l2 regularization weight |
+------------------------+-------------------------------------------------------------+
| lbfgs_memory_level | LBFGS memory level ('lbfgs only') |
+------------------------+-------------------------------------------------------------+
| max_iterations | Maximum number of solver iterations |
+------------------------+-------------------------------------------------------------+
| num_coefficients | Number of coefficients in the model |
+------------------------+-------------------------------------------------------------+
| num_examples | Number of examples used for training |
+------------------------+-------------------------------------------------------------+
| num_features | Number of dataset columns used for training |
+------------------------+-------------------------------------------------------------+
| num_unpacked_features | Number of features (including expanded list/dict features) |
+------------------------+-------------------------------------------------------------+
| solver | Type of solver |
+------------------------+-------------------------------------------------------------+
| step_size | Initial step size for the solver |
+------------------------+-------------------------------------------------------------+
| target | Target column name |
+------------------------+-------------------------------------------------------------+
| training_iterations | Number of solver iterations |
+------------------------+-------------------------------------------------------------+
| training_loss | Residual sum-of-squares training loss |
+------------------------+-------------------------------------------------------------+
| training_rmse | Training root-mean-squared-error (RMSE) |
+------------------------+-------------------------------------------------------------+
| training_solver_status | Solver status after training |
+------------------------+-------------------------------------------------------------+
| training_time | Training time (excludes preprocessing) |
+------------------------+-------------------------------------------------------------+
| unpacked_features | Feature names (including expanded list/dict features) |
+------------------------+-------------------------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out : [various]
The current value of the requested field.
See Also
--------
list_fields
Examples
--------
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> print model['num_features']
3
>>> print model.get('num_features') # equivalent to previous line
3
"""
_mt._get_metric_tracker().track('toolkit.regression.linear_regression.get')
return super(LinearRegression, self).get(field)
def get_current_options(self):
"""
A dictionary describing the options requested during training.
Returns
-------
out : dict
A dictionary with option (name, value) pairs requested during
train time.
see also
--------
get_current_options, list_fields, get
Examples
--------
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> current_options = model.get_current_options()
"""
_mt._get_metric_tracker().track('toolkit.regression.linear_regression.get_options')
return super(LinearRegression, self).get_current_options()
def predict(self, dataset, missing_value_action='auto'):
"""
Return target value predictions for ``dataset``, using the trained
linear regression model. This method can be used to get fitted values
for the model by inputting the training dataset.
Parameters
----------
dataset : SFrame | pandas.Dataframe
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate
Examples
----------
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.predict(data)
"""
_mt._get_metric_tracker().track('toolkit.regression.linear_regression.predict')
return super(LinearRegression, self).predict(dataset, missing_value_action=missing_value_action)
@_collect_model_workflow
def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted values.
Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N`
(number of examples) with actual and predicted values. The RMSE is
defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2}
while the max-error is defined as
.. math::
max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\|
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Results from model evaluation procedure.
See Also
----------
create, predict
Examples
----------
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.evaluate(data)
"""
_mt._get_metric_tracker().track(
'toolkit.regression.linear_regression.evaluate')
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'rmse', 'max_error'])
return super(LinearRegression, self).evaluate(dataset, missing_value_action=missing_value_action,
metric=metric)
def list_fields(self):
"""
List of fields stored in the model. Each of these fields can be queried
using the ``get`` function.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
See Also
--------
get
Examples
--------
>>> data = graphlab.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = graphlab.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> fields = model.list_fields()
"""
_mt._get_metric_tracker().track(
'toolkit.regression.linear_regression.list_fields')
return super(LinearRegression, self).list_fields() | PypiClean |
/CrossTex-0.7.0.tar.gz/CrossTex-0.7.0/crosstex/style/common.py | from crosstex.objects import *
_wordre = re.compile('(-+|\s+|\\\w+|\\\W+|\$[^\$]*\$|[{}])', re.IGNORECASE)
_spacere = re.compile(r'^\s*$')
_specialre = re.compile(r'^(\\.*|\$[^\$]*\$)$')
_punctuationre = re.compile('([:!.?]|-{2,})[\'}\n]*$')
_linkre = re.compile("[a-zA-Z][-+.a-zA-Z0-9]*://([:/?#[\]@!$&'()*+,;=a-zA-Z0-9_\-.~]|%[0-9a-fA-F][0-9a-fA-F]|\\-|\s)*")
_linksub = re.compile('\\-\s')
_protectre = re.compile(r'[\\{}]')
_endre = re.compile(r"(\\end\{[^}]*\}|['\s}])*$")
_bibtexkinds = ['article', 'book', 'booklet', 'conference', 'inbook', \
'incollection', 'inproceedings', 'manual', 'mastersthesis', 'phdthesis', \
'proceedings', 'techreport', 'unpublished']
# Clean up a string to be a search for the literal
def _sanitize(r):
value = ''
for char in r:
if char == '^':
value += r'\^'
else:
value += '[' + char + ']'
return value
# Piece an entry together.
def _punctuate(string, punctuation='', tail=' '):
if string == None:
string = ''
i = _endre.search(string).start()
end = string[i:]
string = string[:i]
if string and not (string.endswith('?') or string.endswith('!') or string.endswith(':') or string.endswith('--') or string.endswith(punctuation)):
string += punctuation
if string or end:
end += tail
return string + end
def _names(name, short=False, plain=False):
value = ''
lastchar = ' '
names = []
nesting = 0
if isinstance(name, Formatter):
name = name._format('value') or ''
for i in range(0, len(name)):
charc = name[i]
if nesting == 0 and lastchar != '\\' and lastchar != ' ' and charc == ' ':
names.append(value)
value = ''
elif lastchar != '\\' and charc == '}':
if not plain:
value += charc
if nesting == 0:
names.append(value)
value = ''
else:
nesting -= 1
elif lastchar != '\\' and charc == '{':
if not plain:
value += charc
nesting += 1
elif nesting == 0 and lastchar != '\\' and charc == ',':
pass
else:
if not plain or (charc != '\\' and lastchar != '\\'):
value += charc
lastchar = charc
names.append(value)
# extract lastname, check suffixes and last name modifiers
# extract also a list of first names
snames = ['Jr.', 'Sr.', 'Jr', 'Sr', 'III', 'IV']
mnames = ['van', 'von', 'de', 'bin', 'ibn']
sname = ''
snameoffset = len(names)
while snameoffset > 0 and names[snameoffset - 1] in snames:
snameoffset -= 1
mnameoffset = 0
while mnameoffset < snameoffset and names[mnameoffset] not in mnames:
mnameoffset += 1
lnameoffset = snameoffset
while lnameoffset > 0 and names[lnameoffset - 1] not in mnames:
lnameoffset -= 1
if lnameoffset <= mnameoffset:
lnameoffset = mnameoffset = snameoffset - 1
# return the person info as a tuple
(fnames, mnames, lnames, snames) = (names[:mnameoffset], names[mnameoffset:lnameoffset], names[lnameoffset:snameoffset], names[snameoffset:])
if short:
fnamesabbr = []
for n in fnames:
abbr = ''
initial = 0
sep = ''
while initial < len(n):
if n[initial] == '\\':
initial += 1
elif n[initial] in '{}':
pass
elif n[initial] == '~':
abbr += n[initial]
elif n[initial] in '-.':
sep = n[initial]
elif sep != None:
if sep != '.':
abbr += sep
abbr += n[initial] + '.'
sep = None
initial += 1
if abbr:
fnamesabbr.append(abbr)
return (['~'.join(fnamesabbr)], mnames, lnames, snames)
else:
return (fnames, mnames, lnames, snames)
def _last_initials(name, size):
(fnames, mnames, lnames, snames) = _names(name)
mnamestr = ''
for mname in mnames:
first = 0
while first < len(mname):
if mname[first] not in '{}\\':
mnamestr += mname[first]
break
elif mname[first] == '\\':
first += 2
else:
first += 1
lnamestr = ''
for lname in lnames:
if len(lnamestr) >= size:
break
first = 0
while first < len(lname):
if lname[first] not in '{}\\':
lnamestr += lname[first]
if mnamestr != '' or len(lnamestr) >= size:
break
else:
first += 1
elif lname[first] == '\\':
first += 2
else:
first += 1
return mnamestr + lnamestr
def _fieldval(field, value):
if isinstance(value, Object):
return '%s = %s' % (field, value.citation)
else:
value = str(value)
try:
return '%s = %d' % (field, int(value))
except:
if '{' in value or '}' in value:
return '%s = {%s}' % (field, value)
else:
return '%s = "%s"' % (field, value)
def makegetterproducer(field):
def getterproducer(obj, value, context):
return obj._format(*(context + (field,)))
return getterproducer
def makejoinproducer(punctuation, space, final, finalspace, *fields):
def joinproducer(obj, value, context):
value = ''
for field in fields:
fieldvalue = obj._format(*(context + (field,)))
if fieldvalue:
value = _punctuate(value, punctuation, space) + fieldvalue
return _punctuate(value, final, finalspace)
return joinproducer
def bibtexproducer(obj, value, context):
kind = obj.kind
if kind not in _bibtexkinds:
kind = 'misc'
value = '@%s{%s' % (kind, obj.citation)
for field in obj.fields:
fieldvalue = obj._format(*(context + (field,)))
if fieldvalue:
value += ',\n\t' + _fieldval(field, fieldvalue)
value += '}\n\n'
return value
def crosstexproducer(obj, value, context):
if not obj.keys:
return ''
value = '@%s{%s' % (obj.kind, ' = '.join(obj.keys))
for field in obj.fields:
value += ',\n\t' + _fieldval(field, obj.fields[field])
value += '}\n\n'
return value
def makelinksproducer(fields):
def linksproducer(obj, value, context):
links = ''
for field in fields:
myfield = field.lower()
fieldvalue = obj._format(*(context + (myfield,)))
if fieldvalue:
for m in _linkre.finditer(str(fieldvalue)):
uri = m.group()
_linksub.sub(uri, '')
links = _punctuate(links) + '\\href{%s}{\\small\\textsc{%s}}' % (uri, field)
return links
return linksproducer
def extrasproducer(obj, value, context):
extras = ''
abstractvalue = obj._format(*(context + ('abstract',)))
keywordsvalue = obj._format(*(context + ('keywords',)))
if abstractvalue:
extras = _punctuate(extras, '\n', tail='') + '\\noindent\\begin{small}%s\\end{small}' % abstractvalue
if keywordsvalue:
extras = _punctuate(extras, '\n\n', tail='') + '\\noindent\\begin{small}\\textsc{Keywords:} %s\\end{small}' % keywordsvalue
if extras:
extras = '\\begin{quotation}' + extras + '\\end{quotation}'
return extras
def authoryearproducer(obj, value, context):
authorvalue = obj._format(*(context + ('author',)))
yearvalue = obj._format(*(context + ('year',)))
if yearvalue == None:
yearvalue = ''
else:
yearvalue = str(yearvalue)
if authorvalue == None:
authorvalue = ''
else:
authorvalue = str(authorvalue)
return authorvalue + yearvalue or None
def longauthoryearproducer(obj, value, context):
authorvalue = obj._format(*(context + ('author',)))
yearvalue = obj._format(*(context + ('year',)))
if yearvalue == None:
yearvalue = ''
else:
yearvalue = str(yearvalue)
if authorvalue == None:
authorvalue = ''
else:
authorvalue = str(authorvalue)
return _punctuate(authorvalue) + yearvalue or None
def authoreditorproducer(obj, value, context):
authorvalue = obj._format(*(context + ('author',)))
if authorvalue == None:
authorvalue = obj._format(*(context + ('editor',)))
if authorvalue == None:
return None
if ' and ' in authorvalue:
authorvalue = str(authorvalue) + ', eds.'
else:
authorvalue = str(authorvalue) + ', ed.'
else:
authorvalue = str(authorvalue)
return authorvalue
def dateproducer(obj, value, context):
value = ''
monthvalue = obj._format(*(context + ('month',)))
if monthvalue:
value = _punctuate(value, ',') + str(monthvalue)
yearvalue = obj._format(*(context + ('year',)))
if yearvalue:
value = _punctuate(value, ',') + str(yearvalue)
return value
def fullpublicationproducer(obj, value, context):
value = obj._format(*(context + ('publication',)))
booktitlevalue = obj._format(*(context + ('booktitle',)))
journalvalue = obj._format(*(context + ('journal',)))
if booktitlevalue:
value = _punctuate(value, '.') + str(booktitlevalue)
volumevalue = obj._format(*(context + ('volume',)))
if volumevalue:
value += ', volume %s' % volumevalue
seriesvalue = obj._format(*(context + ('series',)))
if seriesvalue :
value += ' of \\emph{%s}' % seriesvalue
chaptervalue = obj._format(*(context + ('chapter',)))
if chaptervalue:
value += ', chapter %s' % chaptervalue
elif journalvalue:
value = _punctuate(value, ',') + str(journalvalue)
numbervalue = obj._format(*(context + ('number',)))
volumevalue = obj._format(*(context + ('volume',)))
pagesvalue = obj._format(*(context + ('pages',)))
if numbervalue or volumevalue or pagesvalue:
value = _punctuate(value, ',')
if volumevalue:
value += str(volumevalue)
if numbervalue:
value += '(%s)' % numbervalue
if pagesvalue:
if volumevalue or numbervalue:
value += ':%s' % pagesvalue
elif pagesvalue :
value += 'page %s' % pagesvalue
else:
try:
pagenum = int(pagesvalue)
value += 'page %d' % pagenum
except ValueError:
value += 'pages %s' % pagesvalue
institutionvalue = obj._format(*(context + ('institution',)))
if institutionvalue:
value = _punctuate(value, ',') + str(institutionvalue)
schoolvalue = obj._format(*(context + ('school',)))
if schoolvalue:
value = _punctuate(value, ',') + str(schoolvalue)
if not journalvalue:
numbervalue = obj._format(*(context + ('number',)))
if numbervalue:
value = _punctuate(value, ',')
numbertypevalue = obj._format(*(context + ('numbertype',)))
if numbertypevalue:
value = _punctuate(value + str(numbertypevalue))
value += str(numbervalue)
pagesvalue = obj._format(*(context + ('pages',)))
if pagesvalue:
try:
pagenum = int(pagesvalue)
value = _punctuate(value, ',') + ('page %d' % pagenum)
except ValueError:
value = _punctuate(value, ',') + ('pages %s' % pagesvalue)
authorvalue = obj._format(*(context + ('author',)))
editorvalue = obj._format(*(context + ('editor',)))
if authorvalue and editorvalue:
value = _punctuate(value, ',') + str(editorvalue)
publishervalue = obj._format(*(context + ('publisher',)))
if publishervalue:
value = _punctuate(value, ',') + str(publishervalue)
addressvalue = obj._format(*(context + ('address',)))
if addressvalue:
value = _punctuate(value, ',') + str(addressvalue)
notevalue = obj._format(*(context + ('note',)))
if notevalue:
value = _punctuate(value, ',') + str(notevalue)
yearvalue = obj._format(*(context + ('year',)))
if yearvalue:
value = _punctuate(value, ',')
monthvalue = obj._format(*(context + ('month',)))
if monthvalue:
value = _punctuate(value + str(monthvalue))
value += str(yearvalue)
return value
def acmfullpublicationproducer(obj, value, context):
value = obj._format(*(context + ('publication',)))
booktitlevalue = obj._format(*(context + ('booktitle',)))
journalvalue = obj._format(*(context + ('journal',)))
if booktitlevalue:
value = _punctuate(value, '.') + str(booktitlevalue)
volumevalue = obj._format(*(context + ('volume',)))
if volumevalue:
value += ', vol. %s' % volumevalue
seriesvalue = obj._format(*(context + ('series',)))
if seriesvalue :
value += ' of \\emph{%s}' % seriesvalue
chaptervalue = obj._format(*(context + ('chapter',)))
if chaptervalue:
value += ', chap. %s' % chaptervalue
elif journalvalue:
value = _punctuate(value, ',') + str(journalvalue)
volumevalue = obj._format(*(context + ('volume',)))
if volumevalue:
value = _punctuate(value) + '\\emph{%s}' % str(volumevalue)
numbervalue = obj._format(*(context + ('number',)))
if numbervalue:
value = _punctuate(value, ',') + str(numbervalue)
addryearvalue = ''
addressvalue = obj._format(*(context + ('address',)))
if addressvalue:
addryearvalue = _punctuate(addryearvalue, ',') + str(addressvalue)
datevalue = ''
monthvalue = obj._format(*(context + ('month',)))
if monthvalue:
datevalue = _punctuate(datevalue) + str(monthvalue)
yearvalue = obj._format(*(context + ('year',)))
if yearvalue:
datevalue = _punctuate(datevalue) + str(yearvalue)
if datevalue:
addryearvalue = _punctuate(addryearvalue, ',') + str(datevalue)
if addryearvalue:
value = _punctuate(value) + '(%s)' % addryearvalue
institutionvalue = obj._format(*(context + ('institution',)))
if institutionvalue:
value = _punctuate(value, ',') + str(institutionvalue)
schoolvalue = obj._format(*(context + ('school',)))
if schoolvalue:
value = _punctuate(value, ',') + str(schoolvalue)
publishervalue = obj._format(*(context + ('publisher',)))
if publishervalue:
value = _punctuate(value, ',') + str(publishervalue)
authorvalue = obj._format(*(context + ('author',)))
editorvalue = obj._format(*(context + ('editor',)))
if authorvalue and editorvalue:
value = _punctuate(value, ',') + str(editorvalue)
pagesvalue = obj._format(*(context + ('pages',)))
if pagesvalue:
if not journalvalue:
try:
pagenum = int(pagesvalue)
pagesvalue = 'p. ' + pagenum
except ValueError:
pagesvalue = 'pp. ' + pagesvalue
value = _punctuate(value, ',') + str(pagesvalue)
notevalue = obj._format(*(context + ('note',)))
if notevalue:
value = _punctuate(value, ',') + str(notevalue)
return value
def accessedproducer(obj, value, context):
urlvalue = str(obj._format(*(context + ('url',))))
dayvalue = obj._format(*(context + ('accessday',)))
yearvalue = obj._format(*(context + ('accessyear',)))
monthvalue = obj._format(*(context + ('accessmonth',)))
if yearvalue or monthvalue:
urlvalue = _punctuate(urlvalue, ',') + 'Accessed'
if monthvalue:
urlvalue = _punctuate(urlvalue) + monthvalue
if dayvalue:
urlvalue = _punctuate(urlvalue) + dayvalue
if yearvalue:
urlvalue = _punctuate(urlvalue, ',') + yearvalue
elif yearvalue:
urlvalue = _punctuate(urlvalue) + yearvalue
return urlvalue
def citystatecountryproducer(obj, value, context):
cityvalue = obj._format(*(context + ('city',)))
statevalue = obj._format(*(context + ('state',)))
countryvalue = obj._format(*(context + ('country',)))
value = ''
if cityvalue:
value = _punctuate(value, ',') + str(cityvalue)
if statevalue:
value = _punctuate(value, ',') + str(statevalue)
if countryvalue:
value = _punctuate(value, ',') + str(countryvalue)
return value
def thesistypeproducer(obj, value, context):
typevalue = obj._format(*(context + ('type',)))
if typevalue:
return str(typevalue)
typevalue = obj._format(*(context + ('thesistype',)))
if typevalue:
return _punctuate(typevalue) + 'Thesis'
return None
def emptyproducer(obj, value, context):
return ''
def lastfirstfilter(obj, objvalue, context):
(fnames, mnames, lnames, snames) = _names(objvalue)
namestr = ''
for n in mnames:
namestr = _punctuate(namestr) + n
for n in lnames:
namestr = _punctuate(namestr) + n
if len(fnames) > 0:
namestr = _punctuate(namestr, ',')
for n in fnames:
namestr = _punctuate(namestr) + n
for n in snames:
namestr = _punctuate(namestr) + n
return namestr
def shortnamesfilter(obj, objvalue, context):
(fnames, mnames, lnames, snames) = _names(objvalue, short=True)
namestr = ''
for n in fnames:
namestr = _punctuate(namestr) + n
for n in mnames:
namestr = _punctuate(namestr) + n
for n in lnames:
namestr = _punctuate(namestr) + n
if len(snames) > 0:
namestr = _punctuate(namestr, ',')
for n in snames:
namestr = _punctuate(namestr) + n
return namestr
def shortnameslistfilter(obj, objvalue, context):
for i in range(len(objvalue)):
objvalue[i] = shortnamesfilter(obj, objvalue[i], context)
return objvalue
def lastfirstlistfilter(obj, objvalue, context):
if objvalue:
objvalue = copy(objvalue)
objvalue[0] = lastfirstfilter(obj, objvalue[0], context)
return objvalue
def alllastfirstlistfilter(obj, objvalue, context):
if objvalue:
objvalue = copy(objvalue)
for i in range(len(objvalue)):
objvalue[i] = lastfirstfilter(obj, objvalue[i], context)
return objvalue
def alllastlistfilter(obj, objvalue, context):
if objvalue:
objvalue = copy(objvalue)
for i in range(len(objvalue)):
(fnames, mnames, lnames, snames) = _names(objvalue[i])
objvalue[i] = ''
for n in lnames:
objvalue[i] = _punctuate(objvalue[i]) + n
return objvalue
def plainlistformatter(obj, objvalue, context):
value = ''
for i in range(len(objvalue)):
value = _punctuate(value, ',') + str(objvalue[i])
return value
def commalistformatter(obj, objvalue, context):
value = ''
for i in range(len(objvalue)):
if value:
if len(objvalue) > 2:
value += ','
value += ' '
if i == len(objvalue) - 1:
value += 'and '
value += str(objvalue[i])
return value
def andlistformatter(obj, objvalue, context):
return ' and '.join([str(element) for element in objvalue])
def andcrosstexlistformatter(obj, objvalue, context):
return ' and '.join([isinstance(element, Object) and element._primarykey or str(element) for element in objvalue])
def initialslistformatter(obj, objvalue, context):
value = ''
if len(objvalue) == 1:
value = _last_initials(objvalue[0], 3)
elif len(objvalue) <= 4:
for i in range(0, min(len(objvalue), 5)):
value += _last_initials(objvalue[i], 1)
else:
for i in range(0, 4):
value += _last_initials(objvalue[i], 1)
value += '{\etalchar{+}}'
return value
def fullnameslistformatter(obj, objvalue, context):
value = ''
if len(objvalue) == 2:
(fnames1, mnames1, lnames1, snames1) = _names(objvalue[0])
(fnames2, mnames2, lnames2, snames2) = _names(objvalue[1])
value = ' '.join(mnames1 + lnames1) + ' \& ' + ' '.join(mnames2 + lnames2)
elif objvalue:
(fnames1, mnames1, lnames1, snames1) = _names(objvalue[0])
value = ' '.join(mnames1 + lnames1)
if len(objvalue) > 2:
value += ' et al.'
return value
def makebracketfilter(left, right):
def bracketfilter(obj, objvalue, context):
if objvalue:
return '%s%s%s' % (left, objvalue.strip(), right)
return objvalue
return bracketfilter
def makesuffixfilter(suffix):
def suffixfilter(obj, objvalue, context):
if objvalue:
return '%s%s' % (objvalue.strip(), suffix)
return objvalue
return suffixfilter
def edfilter(obj, objvalue, context):
if objvalue:
if ' and ' in objvalue:
objvalue = objvalue + ', eds.'
else:
objvalue = objvalue + ', ed.'
return objvalue
def makeprefixfilter(prefix):
def prefixfilter(obj, objvalue, context):
if objvalue:
return '%s%s' % (prefix, objvalue.strip())
return objvalue
return prefixfilter
def bibitemfilter(obj, objvalue, context):
if objvalue:
label = obj._format(*(context + ('label',)))
if label:
label = '[%s]' % label
return '\\bibitem%s{%s}\n%s\n\n' % (label, obj.citation, objvalue.strip())
return objvalue
def emptyfilter(obj, objvalue, context):
return ''
def makeuniquefilter():
used = []
def uniquefilter(obj, objvalue, context):
if objvalue != '':
if objvalue in used:
for char in list('abcdefghijklmnopqrstuvwxyz'):
if objvalue + char not in used:
objvalue += char
break
else:
raise ValueError, 'too many citations with key %s' % objvalue
used.append(objvalue)
return objvalue
return uniquefilter
def twodigitfilter(obj, objvalue, context):
return objvalue[-2:]
infilter = makeprefixfilter('In ')
procfilter = makeprefixfilter('Proc. of ')
proceedingsfilter = makeprefixfilter('Proceedings of the ')
emphfilter = makebracketfilter('\\emph{', '}')
boldfilter = makebracketfilter('\\textbf{', '}')
scfilter = makebracketfilter('\\textsc{', '}')
bracesfilter = makebracketfilter('{', '}')
quotefilter = makebracketfilter("``", "''")
def conferencetrackfilter(obj, objvalue, context):
value = obj._format(*(context + ('conference',)))
value = _punctuate(value, ',') + objvalue
return value
def killfilter(obj, objvalue, context):
if context[-1] in obj._required:
return objvalue
else:
return ''
def titlecasefilter(obj, objvalue, context):
newtitle = ''
dollars = 0
dashlen = 0
inmath = False
inliteral = False
incommand = False
wordbreak = True
sentencebreak = True
for i, char in enumerate(objvalue):
if char == '{':
close = _protectre.search(objvalue[i+1:])
inliteral = not incommand and (close is not None and close.group() == '}')
if char == '}':
inliteral = False
if char == '\\':
incommand = True
elif char.isspace():
incommand = False
if char == '-':
dashlen += 1
else:
dashlen = 0
if char == '$':
dollars += 1
elif dollars > 0:
inmath = not inmath
dollars = 0
if not (inliteral or inmath or incommand):
if wordbreak:
newtitle += char.upper()
else:
newtitle += char.lower()
else:
newtitle += char
sentencebreak = (not inliteral and not inmath and not incommand and (char in '!?:.' or dashlen > 1)) or (sentencebreak and (char.isspace() or incommand or inmath or char == '{'))
wordbreak = sentencebreak or (not inliteral and not inmath and not incommand and (char.isspace() or char in ',-')) or (wordbreak and (incommand or inmath or char == '{'))
if not char.isalnum() and char not in '_\\':
incommand = False
return newtitle
def lowertitlecasefilter(obj, objvalue, context):
newtitle = ''
dollars = 0
dashlen = 0
inmath = False
inliteral = False
incommand = False
sentencebreak = True
for i, char in enumerate(objvalue):
if char == '{':
close = _protectre.search(objvalue[i+1:])
inliteral = not incommand and (close is not None and close.group() == '}')
if char == '}':
inliteral = False
if char == '\\':
incommand = True
elif char.isspace():
incommand = False
if char == '-':
dashlen += 1
else:
dashlen = 0
if char == '$':
dollars += 1
elif dollars > 0:
inmath = not inmath
dollars = 0
if not (inliteral or inmath or incommand):
if sentencebreak:
newtitle += char.upper()
else:
newtitle += char.lower()
else:
newtitle += char
sentencebreak = (not inliteral and not inmath and not incommand and (char in '!?:.' or dashlen > 1)) or (sentencebreak and (char.isspace() or incommand or inmath or char == '{'))
if not char.isalnum() and char not in '_\\':
incommand = False
return newtitle
def uppercasefilter(obj, objvalue, context):
newtitle = ''
dollars = 0
dashlen = 0
inmath = False
inliteral = False
incommand = False
for i, char in enumerate(objvalue):
if char == '{':
close = _protectre.search(objvalue[i+1:])
inliteral = not incommand and (close is not None and close.group() == '}')
if char == '}':
inliteral = False
if char == '\\':
incommand = True
elif char.isspace():
incommand = False
if char == '-':
dashlen += 1
else:
dashlen = 0
if char == '$':
dollars += 1
elif dollars > 0:
inmath = not inmath
dollars = 0
if not (inliteral or inmath or incommand):
newtitle += char.upper()
else:
newtitle += char
if not char.isalnum() and char not in '_\\':
incommand = False
return newtitle
def maketitlephrasefilter(titlephrases):
def titlephrasefilter(obj, objvalue, context):
newtitle = ''
ignoreuntil = 0
dollars = 0
dashlen = 0
inmath = False
inliteral = False
incommand = False
wordbreak = True
sentencebreak = True
for i, char in enumerate(objvalue):
if char == '{':
close = _protectre.search(objvalue[i+1:])
inliteral = not incommand and (close is not None and close.group() == '}')
if char == '}':
inliteral = False
if char == '\\':
incommand = True
elif char.isspace():
incommand = False
if char == '-':
dashlen += 1
else:
dashlen = 0
if char == '$':
dollars += 1
elif dollars > 0:
inmath = not inmath
dollars = 0
if i >= ignoreuntil:
if wordbreak and not (inliteral or inmath or incommand):
match = ''
for phrase in titlephrases:
if objvalue.lower().startswith(phrase.lower(), i) and len(phrase) > len(match) and (i + len(phrase) >= len(objvalue) - 1 or not objvalue[i + len(phrase)].isalnum()):
match = phrase
if len(match) > 0:
ignoreuntil = i + len(match)
newtitle += match
else:
newtitle += char
else:
newtitle += char
sentencebreak = (not inliteral and not inmath and not incommand and (char in '!?:.' or dashlen > 1)) or (sentencebreak and (char.isspace() or incommand or inmath or char == '{'))
wordbreak = sentencebreak or (not inliteral and not inmath and not incommand and (char.isspace() or char in ',-')) or (wordbreak and (incommand or inmath or char == '{'))
if not char.isalnum() and char not in '_\\':
incommand = False
return newtitle
return titlephrasefilter
def makelowerphrasefilter(lowerphrases):
def lowerphrasefilter(obj, objvalue, context):
newtitle = ''
ignoreuntil = 0
dollars = 0
dashlen = 0
inmath = False
inliteral = False
incommand = False
wordbreak = True
sentencebreak = True
for i, char in enumerate(objvalue):
if char == '{':
close = _protectre.search(objvalue[i+1:])
inliteral = not incommand and (close is not None and close.group() == '}')
if char == '}':
inliteral = False
if char == '\\':
incommand = True
elif char.isspace():
incommand = False
if char == '-':
dashlen += 1
else:
dashlen = 0
if char == '$':
dollars += 1
elif dollars > 0:
inmath = not inmath
dollars = 0
if i >= ignoreuntil:
if wordbreak and not (sentencebreak or inliteral or inmath or incommand):
match = ''
for phrase in lowerphrases:
if objvalue.lower().startswith(phrase.lower(), i) and len(phrase) > len(match) and (i + len(phrase) >= len(objvalue) - 1 or not objvalue[i + len(phrase)].isalnum()):
match = phrase.lower()
if len(match) > 0:
ignoreuntil = i + len(match)
newtitle += match
else:
newtitle += char
else:
newtitle += char
sentencebreak = (not inliteral and not inmath and not incommand and (char in '!?:.' or dashlen > 1)) or (sentencebreak and (char.isspace() or incommand or inmath or char == '{'))
wordbreak = sentencebreak or (not inliteral and not inmath and not incommand and (char.isspace() or char in ',-')) or (wordbreak and (incommand or inmath or char == '{'))
if not char.isalnum() and char not in '_\\':
incommand = False
return newtitle
return lowerphrasefilter
def listproducer(obj, value, context):
if isinstance(obj, list):
return list(obj)
else:
return None
ObjectList._addproducer(listproducer, 'value')
Object._addlistfilter(alllastfirstlistfilter, 'sort', 'author')
Object._addfilter(titlecasefilter, 'sort', 'author') | PypiClean |
/GCN4LP-0.1-py3-none-any.whl/src/graph_att_gae/train.py | import scipy.sparse as sp
import numpy as np
import torch
import time
import os
from configparser import ConfigParser
import sys
sys.path.append('/home/shiyan/project/gcn_for_prediction_of_protein_interactions/')
from src.util.load_data import load_data, sparse_to_tuple, mask_test_edges, preprocess_graph
from src.util.loss import gae_loss_function, vgae_loss_function
from src.util.metrics import get_roc_score
from src.util import define_optimizer
from src.graph_att_gae.model import GATModelVAE
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# train file path
train_file_name = config.get(section, "train_file_name")
# model save/load path
model_path = config.get(section, "model_path")
# model param config
hidden_dim1 = config.getint(section, "hidden_dim1")
hidden_dim2 = config.getint(section, "hidden_dim2")
hidden_dim3 = config.getint(section, 'hidden_dim3')
dropout = config.getfloat(section, "dropout")
vae_bool = config.getboolean(section, 'vae_bool')
alpha = config.getfloat(section, 'alpha')
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
clip = config.getfloat(section, "clip")
epochs = config.getint(section, "epochs")
optimizer_name = config.get(section, "optimizer")
# 加载相关数据
adj = load_data(os.path.join(data_catalog, train_file_name))
num_nodes = adj.shape[0]
num_edges = adj.sum()
features = sparse_to_tuple(sp.identity(num_nodes))
num_features = features[2][1]
# 去除对角线元素
# 下边的右部分为:返回adj_orig的对角元素(一维),并增加一维,抽出adj_orig的对角元素并构建只有这些对角元素的对角矩阵
adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig)
adj = adj_train
# 返回D^{-0.5}SD^{-0.5}的coords, data, shape,其中S=A+I
adj_norm = preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
# adj_label = sparse_to_tuple(adj_label)
adj_label = torch.FloatTensor(adj_label.toarray()).to(DEVICE)
'''
注意,adj的每个元素非1即0。pos_weight是用于训练的邻接矩阵中负样本边(既不存在的边)和正样本边的倍数(即比值),这个数值在二分类交叉熵损失函数中用到,
如果正样本边所占的比例和负样本边所占比例失衡,比如正样本边很多,负样本边很少,那么在求loss的时候可以提供weight参数,将正样本边的weight设置小一点,负样本边的weight设置大一点,
此时能够很好的平衡两类在loss中的占比,任务效果可以得到进一步提升。参考:https://www.zhihu.com/question/383567632
负样本边的weight都为1,正样本边的weight都为pos_weight
'''
pos_weight = float(adj.shape[0] * adj.shape[0] - num_edges) / num_edges
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# create model
print('create model ...')
model = GATModelVAE(num_features, hidden_dim1=hidden_dim1, hidden_dim2=hidden_dim2, hidden_dim3=hidden_dim3, dropout=dropout, alpha=alpha, vae_bool=vae_bool)
# define optimizer
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
model = model.to(DEVICE)
# 稀疏张量被表示为一对致密张量:一维张量和二维张量的索引。可以通过提供这两个张量来构造稀疏张量
adj_norm = torch.sparse.FloatTensor(torch.LongTensor(adj_norm[0].T),
torch.FloatTensor(adj_norm[1]),
torch.Size(adj_norm[2]))
features = torch.sparse.FloatTensor(torch.LongTensor(features[0].T),
torch.FloatTensor(features[1]),
torch.Size(features[2])).to_dense()
adj_norm = adj_norm.to(DEVICE)
features = features.to(DEVICE)
norm = torch.FloatTensor(np.array(norm)).to(DEVICE)
pos_weight = torch.tensor(pos_weight).to(DEVICE)
num_nodes = torch.tensor(num_nodes).to(DEVICE)
print('start training...')
best_valid_roc_score = float('-inf')
hidden_emb = None
model.train()
for epoch in range(epochs):
t = time.time()
optimizer.zero_grad()
recovered, mu, logvar = model(features, adj_norm)
if vae_bool:
loss = vgae_loss_function(preds=recovered, labels=adj_label,
mu=mu, logvar=logvar, n_nodes=num_nodes,
norm=norm, pos_weight=pos_weight)
else:
loss = gae_loss_function(preds=recovered, labels=adj_label, norm=norm, pos_weight=pos_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.cpu().numpy()
# 评估验证集,val set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
# 保存最好的roc score
if roc_score > best_valid_roc_score:
best_valid_roc_score = roc_score
# 不需要保存整个model,只需保存hidden_emb,因为后面的解码是用hidden_emb内积的形式作推断
np.save(model_path, hidden_emb)
print("Epoch:", '%04d' % (epoch + 1), "train_loss = ", "{:.5f}".format(cur_loss),
"val_roc_score = ", "{:.5f}".format(roc_score),
"average_precision_score = ", "{:.5f}".format(ap_score),
"time=", "{:.5f}".format(time.time() - t)
)
print("Optimization Finished!")
# 评估测试集,test set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
print('test roc score: {}'.format(roc_score))
print('test ap score: {}'.format(ap_score))
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
train = Train()
train.train_model(config_path) | PypiClean |
/BittyTax-0.5.1.tar.gz/BittyTax-0.5.1/src/bittytax/conv/parsers/nexo.py |
import re
from decimal import Decimal
from ...config import config
from ..dataparser import DataParser
from ..exceptions import UnexpectedTypeError
from ..out_record import TransactionOutRecord
WALLET = "Nexo"
ASSET_NORMALISE = {
"NEXONEXO": "NEXO",
"NEXOBNB": "NEXO",
"NEXOBEP2": "NEXO",
"USDTERC": "USDT",
}
def parse_nexo(data_row, parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date / Time"], tz="Europe/Zurich")
if "rejected" in row_dict["Details"]:
# Skip failed transactions
return
if "Currency" in row_dict:
if row_dict["Type"] != "Exchange":
buy_asset = row_dict["Currency"]
sell_asset = row_dict["Currency"]
else:
buy_asset = row_dict["Currency"].split("/")[1]
sell_asset = row_dict["Currency"].split("/")[0]
else:
buy_asset = row_dict["Output Currency"]
sell_asset = row_dict["Input Currency"]
for local_asset in ASSET_NORMALISE:
buy_asset = buy_asset.replace(local_asset, ASSET_NORMALISE[local_asset])
sell_asset = sell_asset.replace(local_asset, ASSET_NORMALISE[local_asset])
if "Amount" in row_dict:
if row_dict["Type"] != "Exchange":
buy_quantity = row_dict["Amount"]
sell_quantity = abs(Decimal(row_dict["Amount"]))
else:
match = re.match(r"^-(\d+|\d+\.\d+) / \+(\d+|\d+\.\d+)$", row_dict["Amount"])
buy_quantity = None
sell_quantity = None
if match:
buy_quantity = match.group(2)
sell_quantity = match.group(1)
else:
buy_quantity = row_dict["Output Amount"]
sell_quantity = abs(Decimal(row_dict["Input Amount"]))
if row_dict.get("USD Equivalent") and buy_asset != config.ccy:
value = DataParser.convert_currency(
row_dict["USD Equivalent"].strip("$"), "USD", data_row.timestamp
)
else:
value = None
if row_dict["Type"] in ("Deposit", "ExchangeDepositedOn"):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_DEPOSIT,
data_row.timestamp,
buy_quantity=buy_quantity,
buy_asset=buy_asset,
buy_value=value,
wallet=WALLET,
)
elif row_dict["Type"] in ("Interest", "FixedTermInterest", "InterestAdditional"):
if ("Amount" in row_dict and Decimal(row_dict["Amount"]) > 0) or (
"Input Amount" in row_dict and Decimal(row_dict["Input Amount"]) > 0
):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_INTEREST,
data_row.timestamp,
buy_quantity=buy_quantity,
buy_asset=buy_asset,
buy_value=value,
wallet=WALLET,
)
else:
# Interest on loan is just informational
return
elif row_dict["Type"] == "Dividend":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_DIVIDEND,
data_row.timestamp,
buy_quantity=buy_quantity,
buy_asset=buy_asset,
buy_value=value,
wallet=WALLET,
)
elif row_dict["Type"] in (
"Bonus",
"Cashback",
"Exchange Cashback",
"ReferralBonus",
):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_GIFT_RECEIVED,
data_row.timestamp,
buy_quantity=buy_quantity,
buy_asset=buy_asset,
buy_value=value,
wallet=WALLET,
)
elif row_dict["Type"] in ("Exchange", "CreditCardStatus"):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=buy_quantity,
buy_asset=buy_asset,
buy_value=value,
sell_quantity=sell_quantity,
sell_asset=sell_asset,
sell_value=value,
wallet=WALLET,
)
elif row_dict["Type"] in ("Withdrawal", "WithdrawExchanged"):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_WITHDRAWAL,
data_row.timestamp,
sell_quantity=sell_quantity,
sell_asset=sell_asset,
sell_value=value,
wallet=WALLET,
)
elif row_dict["Type"] == "Liquidation":
# Repayment of loan
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_SPEND,
data_row.timestamp,
sell_quantity=sell_quantity,
sell_asset=sell_asset,
sell_value=value,
wallet=WALLET,
)
elif row_dict["Type"] in (
"WithdrawalCredit",
"UnlockingTermDeposit",
"LockingTermDeposit",
"Repayment",
):
# Skip loan operations which are not disposals or are just informational
return
elif row_dict["Type"] in (
"DepositToExchange",
"ExchangeToWithdraw",
"TransferIn",
"TransferOut",
):
# Skip internal operations
return
else:
raise UnexpectedTypeError(parser.in_header.index("Type"), "Type", row_dict["Type"])
DataParser(
DataParser.TYPE_SAVINGS,
"Nexo",
[
"Transaction",
"Type",
"Currency",
"Amount",
"USD Equivalent",
"Details",
"Outstanding Loan",
"Date / Time",
],
worksheet_name="Nexo",
row_handler=parse_nexo,
)
DataParser(
DataParser.TYPE_SAVINGS,
"Nexo",
[
"Transaction",
"Type",
"Currency",
"Amount",
"Details",
"Outstanding Loan",
"Date / Time",
],
worksheet_name="Nexo",
row_handler=parse_nexo,
)
DataParser(
DataParser.TYPE_SAVINGS,
"Nexo",
[
"Transaction",
"Type",
"Input Currency",
"Input Amount",
"Output Currency",
"Output Amount",
"USD Equivalent",
"Details",
"Outstanding Loan",
"Date / Time",
],
worksheet_name="Nexo",
row_handler=parse_nexo,
)
DataParser(
DataParser.TYPE_SAVINGS,
"Nexo",
[
"Transaction",
"Type",
"Input Currency",
"Input Amount",
"Output Currency",
"Output Amount",
"USD Equivalent",
"Details",
"Date / Time",
],
worksheet_name="Nexo",
row_handler=parse_nexo,
) | PypiClean |
/MambuPy-2.0.0b22.tar.gz/MambuPy-2.0.0b22/mambupy/rest/mambutask.py | from ..mambugeturl import gettasksurl
from .mambustruct import MambuStruct
from .mamburestutils import MambuStructIterator
mod_urlfunc = gettasksurl
class MambuTask(MambuStruct):
"""A Task from Mambu.
Don't instantiate this class directly. It's mostly used by
MambuTasks to configure each of its elements as MambuTask
objects. There's no suitable urlfunc to use to retrieve just a
specific transaction from a loan account. In fact, you can look at
the code of MambuTasks.convert_dict_to_attrs(), it uses urlfunc
and entid = None , so no connection to Mambu will be made, never,
for any particular MambuTask object.
"""
def __init__(self, urlfunc=mod_urlfunc, entid="", *args, **kwargs):
"""Tasks done here:
Just initializes the MambuStruct.
"""
MambuStruct.__init__(self, urlfunc, entid, *args, **kwargs)
def __repr__(self):
"""Instead of the default id given by the parent class, shows
the id, title, dueDate, status of the task.
"""
try:
return self.__class__.__name__ + " - taskid: '%s', %s, %s" % (
self["task"]["id"],
self["task"]["dueDate"],
self["task"]["status"],
)
except KeyError:
try:
return self.__class__.__name__ + " - taskid: '%s'" % self["task"]["id"]
except KeyError:
try:
return self.__class__.__name__ + " - taskid: '%s', %s, %s" % (
self["id"],
self["dueDate"],
self["status"],
)
except KeyError:
return self.__class__.__name__ + " - taskid: '%s'" % self["id"]
def close(self, *args, **kwargs):
""""""
from datetime import datetime
data = {
"task": {
"status": "COMPLETED",
"completionDate": datetime.now().strftime("%Y-%m-%d"),
"encodedKey": self["encodedKey"],
"dueDate": self["dueDate"].strftime("%Y-%m-%d"),
"title": self["title"],
"description": self["description"],
"assignedUserKey": self["assignedUserKey"],
}
}
try:
data["task"]["taskLinkType"] = self["taskLinkType"]
except KeyError:
pass
try:
data["task"]["taskLinkKey"] = self["taskLinkKey"]
except KeyError:
pass
self._MambuStruct__method = "POST"
self._MambuStruct__urlfunc = gettasksurl
self._MambuStruct__data = data
self.connect(*args, **kwargs)
data["task"]["id"] = self["task"]["id"]
data["task"]["completionDate"] = datetime.strptime(
data["task"]["completionDate"], "%Y-%m-%d"
)
data["task"]["dueDate"] = datetime.strptime(data["task"]["dueDate"], "%Y-%m-%d")
self.attrs = data["task"]
self.convert_dict_to_attrs()
self._MambuStruct__method = "GET"
self._MambuStruct__urlfunc = None
self._MambuStruct__data = None
class MambuTasks(MambuStruct):
"""A list of Tasks from Mambu."""
def __init__(self, urlfunc=mod_urlfunc, entid="", *args, **kwargs):
MambuStruct.__init__(self, urlfunc, entid, *args, **kwargs)
def __iter__(self):
return MambuStructIterator(self.attrs)
def convert_dict_to_attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Task object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTask just
created.
"""
for n, a in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError:
params = {}
kwargs.update(params)
try:
self.mambutaskclass
except AttributeError:
self.mambutaskclass = MambuTask
task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs)
task.init(a, *args, **kwargs)
task._MambuStruct__urlfunc = gettasksurl
self.attrs[n] = task | PypiClean |
/Another_One_Messenger_Server-0.9.tar.gz/Another_One_Messenger_Server-0.9/src/server/config_window.py | from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
import os
class ConfigWindow(QDialog):
'''Класс окно настроек.'''
def __init__(self, config):
super().__init__()
self.config = config
self.initUI()
def initUI(self):
'''Настройки окна'''
self.setFixedSize(365, 260)
self.setWindowTitle('Настройки сервера')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
# Надпись о файле базы данных:
self.db_path_label = QLabel('Путь до файла базы данных: ', self)
self.db_path_label.move(10, 10)
self.db_path_label.setFixedSize(240, 15)
# Строка с путём базы
self.db_path = QLineEdit(self)
self.db_path.setFixedSize(250, 20)
self.db_path.move(10, 30)
self.db_path.setReadOnly(True)
# Кнопка выбора пути.
self.db_path_select = QPushButton('Обзор...', self)
self.db_path_select.move(275, 28)
# Метка с именем поля файла базы данных
self.db_file_label = QLabel('Имя файла базы данных: ', self)
self.db_file_label.move(10, 68)
self.db_file_label.setFixedSize(180, 15)
# Поле для ввода имени файла
self.db_file = QLineEdit(self)
self.db_file.move(200, 66)
self.db_file.setFixedSize(150, 20)
# Метка с номером порта
self.port_label = QLabel('Номер порта для соединений:', self)
self.port_label.move(10, 108)
self.port_label.setFixedSize(180, 15)
# Поле для ввода номера порта
self.port = QLineEdit(self)
self.port.move(200, 108)
self.port.setFixedSize(150, 20)
# Метка с адресом для соединений
self.ip_label = QLabel('С какого IP принимаем соединения:', self)
self.ip_label.move(10, 148)
self.ip_label.setFixedSize(180, 15)
# Метка с напоминанием о пустом поле.
self.ip_label_note = QLabel(
' оставьте это поле пустым, чтобы\n принимать соединения с любых адресов.',
self)
self.ip_label_note.move(10, 168)
self.ip_label_note.setFixedSize(500, 30)
# Поле для ввода ip
self.ip = QLineEdit(self)
self.ip.move(200, 148)
self.ip.setFixedSize(150, 20)
# Кнопка сохранения настроек
self.save_btn = QPushButton('Сохранить', self)
self.save_btn.move(190, 220)
# Кнапка закрытия окна
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(275, 220)
self.close_button.clicked.connect(self.close)
self.db_path_select.clicked.connect(self.open_file_dialog)
self.show()
self.db_path.insert(self.config['SETTINGS']['Database_path'])
self.db_file.insert(self.config['SETTINGS']['Database_file'])
self.port.insert(self.config['SETTINGS']['Default_port'])
self.ip.insert(self.config['SETTINGS']['Listen_Address'])
self.save_btn.clicked.connect(self.save_server_config)
def open_file_dialog(self):
'''Метод обработчик открытия окна выбора папки.'''
global dialog
dialog = QFileDialog(self)
path = dialog.getExistingDirectory()
path = path.replace('/', '\\')
self.db_path.clear()
self.db_path.insert(path)
def save_server_config(self):
'''
Метод сохранения настроек.
Проверяет правильность введённых данных и
если всё правильно сохраняет ini файл.
'''
global config_window
message = QMessageBox()
self.config['SETTINGS']['Database_path'] = self.db_path.text()
self.config['SETTINGS']['Database_file'] = self.db_file.text()
try:
port = int(self.port.text())
except ValueError:
message.warning(self, 'Ошибка', 'Порт должен быть числом')
else:
self.config['SETTINGS']['Listen_Address'] = self.ip.text()
if 1023 < port < 65536:
self.config['SETTINGS']['Default_port'] = str(port)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, '..')
with open(f"{dir_path}/{'server.ini'}", 'w') as conf:
self.config.write(conf)
message.information(
self, 'OK', 'Настройки успешно сохранены!')
else:
message.warning(
self, 'Ошибка', 'Порт должен быть от 1024 до 65536') | PypiClean |
/NERO-nlp-0.0.8.tar.gz/NERO-nlp-0.0.8/NERO/Core.py | import pandas as pd
import matplotlib.pyplot as plt
class Corpus:
"""
The Corpus class containing functions to extract data from the corpus.
"""
def __init__(self, *args, **kwargs):
self._data = self._load_data(kwargs['df'])
# Main corpus/dataframe properties
self.columns = self._data.columns
self.shape = self._data.shape
self.size = self._data.size
self.values = self._data.values
def __getattribute__(self, attr):
"""
If a given attribute is a column name we return the respective
column. Otherwise the attribute will be called directly.
"""
try:
return super(Corpus, self).__getattribute__(attr)
except AttributeError:
if attr in self.columns:
return self._data[attr]
return super(Corpus, self).__getattribute__(attr)
def _load_data(self, _path):
"""
Reading the raw corpus and returning a pandas dataframe.
"""
df = pd.read_csv(
_path,
delimiter='\t',
encoding='latin1'
)
return df
def procset_bind(self):
"""Returns the rows with their procset column set to bind."""
return self._data[self._data['procset'] == 'bind']
def procset_gene_phenotype(self):
"""Returns the rows with their procset column set to gene_phenotype."""
return self._data[self._data['procset'] == 'gene_phenotype']
def procset_topic_bd(self, topic):
"""
Returns the rows that have a procset column set to the topic argument.
topic could be any of following variables: bd, ccc, aut, sch, bc, mrs,
ll.
"""
col = f"topic_{topic}"
return self._data[self._data['procset'] == col]
def procset_transcription(self):
"""
Returns the rows with their procset column set to transcription.
"""
return self._data[self._data['procset'] == 'transcription']
def plot_semantic_class(self):
"""
Plot a bar chart of semantic_class column based on the frequency
of semantic classes.
"""
gg = self._data['semantic_class_entity1'].value_counts()
gg.plot.bar()
plt.show()
def plot_protein_domain_entity(self, num=20):
"""
Plot a bar chart of top `num` protein_domain_entity fields
based on thir frequencies.
"""
col_names = [i for i in self.columns if i.startswith('protein_domain_entity')]
out = pd.Series()
for col in col_names:
out = out.add(self._data[col].value_counts(), fill_value=0)
out[:num].plot.bar()
plt.show() | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/tar/lib/pack.js | 'use strict'
// A readable tar stream creator
// Technically, this is a transform stream that you write paths into,
// and tar format comes out of.
// The `add()` method is like `write()` but returns this,
// and end() return `this` as well, so you can
// do `new Pack(opt).add('files').add('dir').end().pipe(output)
// You could also do something like:
// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
class PackJob {
constructor (path, absolute) {
this.path = path || './'
this.absolute = absolute
this.entry = null
this.stat = null
this.readdir = null
this.pending = false
this.ignore = false
this.piped = false
}
}
const { Minipass } = require('minipass')
const zlib = require('minizlib')
const ReadEntry = require('./read-entry.js')
const WriteEntry = require('./write-entry.js')
const WriteEntrySync = WriteEntry.Sync
const WriteEntryTar = WriteEntry.Tar
const Yallist = require('yallist')
const EOF = Buffer.alloc(1024)
const ONSTAT = Symbol('onStat')
const ENDED = Symbol('ended')
const QUEUE = Symbol('queue')
const CURRENT = Symbol('current')
const PROCESS = Symbol('process')
const PROCESSING = Symbol('processing')
const PROCESSJOB = Symbol('processJob')
const JOBS = Symbol('jobs')
const JOBDONE = Symbol('jobDone')
const ADDFSENTRY = Symbol('addFSEntry')
const ADDTARENTRY = Symbol('addTarEntry')
const STAT = Symbol('stat')
const READDIR = Symbol('readdir')
const ONREADDIR = Symbol('onreaddir')
const PIPE = Symbol('pipe')
const ENTRY = Symbol('entry')
const ENTRYOPT = Symbol('entryOpt')
const WRITEENTRYCLASS = Symbol('writeEntryClass')
const WRITE = Symbol('write')
const ONDRAIN = Symbol('ondrain')
const fs = require('fs')
const path = require('path')
const warner = require('./warn-mixin.js')
const normPath = require('./normalize-windows-path.js')
const Pack = warner(class Pack extends Minipass {
constructor (opt) {
super(opt)
opt = opt || Object.create(null)
this.opt = opt
this.file = opt.file || ''
this.cwd = opt.cwd || process.cwd()
this.maxReadSize = opt.maxReadSize
this.preservePaths = !!opt.preservePaths
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.prefix = normPath(opt.prefix || '')
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.readdirCache = opt.readdirCache || new Map()
this[WRITEENTRYCLASS] = WriteEntry
if (typeof opt.onwarn === 'function') {
this.on('warn', opt.onwarn)
}
this.portable = !!opt.portable
this.zip = null
if (opt.gzip) {
if (typeof opt.gzip !== 'object') {
opt.gzip = {}
}
if (this.portable) {
opt.gzip.portable = true
}
this.zip = new zlib.Gzip(opt.gzip)
this.zip.on('data', chunk => super.write(chunk))
this.zip.on('end', _ => super.end())
this.zip.on('drain', _ => this[ONDRAIN]())
this.on('resume', _ => this.zip.resume())
} else {
this.on('drain', this[ONDRAIN])
}
this.noDirRecurse = !!opt.noDirRecurse
this.follow = !!opt.follow
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
this[QUEUE] = new Yallist()
this[JOBS] = 0
this.jobs = +opt.jobs || 4
this[PROCESSING] = false
this[ENDED] = false
}
[WRITE] (chunk) {
return super.write(chunk)
}
add (path) {
this.write(path)
return this
}
end (path) {
if (path) {
this.write(path)
}
this[ENDED] = true
this[PROCESS]()
return this
}
write (path) {
if (this[ENDED]) {
throw new Error('write after end')
}
if (path instanceof ReadEntry) {
this[ADDTARENTRY](path)
} else {
this[ADDFSENTRY](path)
}
return this.flowing
}
[ADDTARENTRY] (p) {
const absolute = normPath(path.resolve(this.cwd, p.path))
// in this case, we don't have to wait for the stat
if (!this.filter(p.path, p)) {
p.resume()
} else {
const job = new PackJob(p.path, absolute, false)
job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
job.entry.on('end', _ => this[JOBDONE](job))
this[JOBS] += 1
this[QUEUE].push(job)
}
this[PROCESS]()
}
[ADDFSENTRY] (p) {
const absolute = normPath(path.resolve(this.cwd, p))
this[QUEUE].push(new PackJob(p, absolute))
this[PROCESS]()
}
[STAT] (job) {
job.pending = true
this[JOBS] += 1
const stat = this.follow ? 'stat' : 'lstat'
fs[stat](job.absolute, (er, stat) => {
job.pending = false
this[JOBS] -= 1
if (er) {
this.emit('error', er)
} else {
this[ONSTAT](job, stat)
}
})
}
[ONSTAT] (job, stat) {
this.statCache.set(job.absolute, stat)
job.stat = stat
// now we have the stat, we can filter it.
if (!this.filter(job.path, stat)) {
job.ignore = true
}
this[PROCESS]()
}
[READDIR] (job) {
job.pending = true
this[JOBS] += 1
fs.readdir(job.absolute, (er, entries) => {
job.pending = false
this[JOBS] -= 1
if (er) {
return this.emit('error', er)
}
this[ONREADDIR](job, entries)
})
}
[ONREADDIR] (job, entries) {
this.readdirCache.set(job.absolute, entries)
job.readdir = entries
this[PROCESS]()
}
[PROCESS] () {
if (this[PROCESSING]) {
return
}
this[PROCESSING] = true
for (let w = this[QUEUE].head;
w !== null && this[JOBS] < this.jobs;
w = w.next) {
this[PROCESSJOB](w.value)
if (w.value.ignore) {
const p = w.next
this[QUEUE].removeNode(w)
w.next = p
}
}
this[PROCESSING] = false
if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
if (this.zip) {
this.zip.end(EOF)
} else {
super.write(EOF)
super.end()
}
}
}
get [CURRENT] () {
return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
}
[JOBDONE] (job) {
this[QUEUE].shift()
this[JOBS] -= 1
this[PROCESS]()
}
[PROCESSJOB] (job) {
if (job.pending) {
return
}
if (job.entry) {
if (job === this[CURRENT] && !job.piped) {
this[PIPE](job)
}
return
}
if (!job.stat) {
if (this.statCache.has(job.absolute)) {
this[ONSTAT](job, this.statCache.get(job.absolute))
} else {
this[STAT](job)
}
}
if (!job.stat) {
return
}
// filtered out!
if (job.ignore) {
return
}
if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
if (this.readdirCache.has(job.absolute)) {
this[ONREADDIR](job, this.readdirCache.get(job.absolute))
} else {
this[READDIR](job)
}
if (!job.readdir) {
return
}
}
// we know it doesn't have an entry, because that got checked above
job.entry = this[ENTRY](job)
if (!job.entry) {
job.ignore = true
return
}
if (job === this[CURRENT] && !job.piped) {
this[PIPE](job)
}
}
[ENTRYOPT] (job) {
return {
onwarn: (code, msg, data) => this.warn(code, msg, data),
noPax: this.noPax,
cwd: this.cwd,
absolute: job.absolute,
preservePaths: this.preservePaths,
maxReadSize: this.maxReadSize,
strict: this.strict,
portable: this.portable,
linkCache: this.linkCache,
statCache: this.statCache,
noMtime: this.noMtime,
mtime: this.mtime,
prefix: this.prefix,
}
}
[ENTRY] (job) {
this[JOBS] += 1
try {
return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
.on('end', () => this[JOBDONE](job))
.on('error', er => this.emit('error', er))
} catch (er) {
this.emit('error', er)
}
}
[ONDRAIN] () {
if (this[CURRENT] && this[CURRENT].entry) {
this[CURRENT].entry.resume()
}
}
// like .pipe() but using super, because our write() is special
[PIPE] (job) {
job.piped = true
if (job.readdir) {
job.readdir.forEach(entry => {
const p = job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
}
const source = job.entry
const zip = this.zip
if (zip) {
source.on('data', chunk => {
if (!zip.write(chunk)) {
source.pause()
}
})
} else {
source.on('data', chunk => {
if (!super.write(chunk)) {
source.pause()
}
})
}
}
pause () {
if (this.zip) {
this.zip.pause()
}
return super.pause()
}
})
class PackSync extends Pack {
constructor (opt) {
super(opt)
this[WRITEENTRYCLASS] = WriteEntrySync
}
// pause/resume are no-ops in sync streams.
pause () {}
resume () {}
[STAT] (job) {
const stat = this.follow ? 'statSync' : 'lstatSync'
this[ONSTAT](job, fs[stat](job.absolute))
}
[READDIR] (job, stat) {
this[ONREADDIR](job, fs.readdirSync(job.absolute))
}
// gotta get it all in this tick
[PIPE] (job) {
const source = job.entry
const zip = this.zip
if (job.readdir) {
job.readdir.forEach(entry => {
const p = job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
}
if (zip) {
source.on('data', chunk => {
zip.write(chunk)
})
} else {
source.on('data', chunk => {
super[WRITE](chunk)
})
}
}
}
Pack.Sync = PackSync
module.exports = Pack | PypiClean |
/DecisionTree-3.4.3.tar.gz/DecisionTree-3.4.3/ExamplesRandomizedTrees/README_for_using_randomized_trees | README for using randomized trees
=================================
This README is specifically for the code in the ExamplesRandomizedTrees subdirectory
of the installation directory.
See the README in the Examples subdirectory of the installation directory for the
main examples for using the DecisionTree module.
You can solve two different kinds of problems through the RandomizedTreesForBigData
class:
--- The problems that involve training a decision tree classifier with a dataset that
suffers from serious imbalances in the populations available for the different
classes. If what you are facing is a two-class problem, you can directly use the
RandomizedTreesForBigData class as currently programmed. This class will create
multiple decision trees for you, each trained with a mixture of training samples
that consists of all the minority class samples and randomly drawn majority class
samples. The classification decisions for new data samples are based on majority
voting by the individual decision trees.
--- If you are faced with a "big data" problem, in the sense that you have access to
a very large training database of samples, you can use the
RandomizedTreesForBigData class to create multiple decision trees by drawing
training datasets randomly from your database. Subsequently, the final
classification decisions can be made by majority voting by the individual trees.
This directory contains the following scripts that you can use to become more
familiar with the RandomizedTreesForBigData class:
(1) randomized_trees_for_classifying_one_test_sample_1.py
This illustrates the "looking for a needle in a haystack" function of the
ExamplesRandomizedTrees class.
(2) randomized_trees_for_classifying_one_test_sample_2.py
This illustrates how you may address a big data problem by constructing a set
of decision trees, each based on random draw from your large training database.
(3) classify_database_records.py
This illustrates evaluating the quality of the ensemble of decision trees
constructed by ExamplesRandomizedTrees by aggregating all of the classification
information for a reasonable large number of samples drawn randomly from a
database. See the comment block at the beginning of this script for what sort
of diagnostic information the script puts out. | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/@types/node/v8.d.ts | declare module 'v8' {
import { Readable } from 'node:stream';
interface HeapSpaceInfo {
space_name: string;
space_size: number;
space_used_size: number;
space_available_size: number;
physical_space_size: number;
}
// ** Signifies if the --zap_code_space option is enabled or not. 1 == enabled, 0 == disabled. */
type DoesZapCodeSpaceFlag = 0 | 1;
interface HeapInfo {
total_heap_size: number;
total_heap_size_executable: number;
total_physical_size: number;
total_available_size: number;
used_heap_size: number;
heap_size_limit: number;
malloced_memory: number;
peak_malloced_memory: number;
does_zap_garbage: DoesZapCodeSpaceFlag;
number_of_native_contexts: number;
number_of_detached_contexts: number;
}
interface HeapCodeStatistics {
code_and_metadata_size: number;
bytecode_and_metadata_size: number;
external_script_source_size: number;
}
/**
* Returns an integer representing a version tag derived from the V8 version,
* command-line flags, and detected CPU features. This is useful for determining
* whether a `vm.Script` `cachedData` buffer is compatible with this instance
* of V8.
*
* ```js
* console.log(v8.cachedDataVersionTag()); // 3947234607
* // The value returned by v8.cachedDataVersionTag() is derived from the V8
* // version, command-line flags, and detected CPU features. Test that the value
* // does indeed update when flags are toggled.
* v8.setFlagsFromString('--allow_natives_syntax');
* console.log(v8.cachedDataVersionTag()); // 183726201
* ```
* @since v8.0.0
*/
function cachedDataVersionTag(): number;
/**
* Returns an object with the following properties:
*
* `does_zap_garbage` is a 0/1 boolean, which signifies whether the`--zap_code_space` option is enabled or not. This makes V8 overwrite heap
* garbage with a bit pattern. The RSS footprint (resident set size) gets bigger
* because it continuously touches all heap pages and that makes them less likely
* to get swapped out by the operating system.
*
* `number_of_native_contexts` The value of native\_context is the number of the
* top-level contexts currently active. Increase of this number over time indicates
* a memory leak.
*
* `number_of_detached_contexts` The value of detached\_context is the number
* of contexts that were detached and not yet garbage collected. This number
* being non-zero indicates a potential memory leak.
*
* ```js
* {
* total_heap_size: 7326976,
* total_heap_size_executable: 4194304,
* total_physical_size: 7326976,
* total_available_size: 1152656,
* used_heap_size: 3476208,
* heap_size_limit: 1535115264,
* malloced_memory: 16384,
* peak_malloced_memory: 1127496,
* does_zap_garbage: 0,
* number_of_native_contexts: 1,
* number_of_detached_contexts: 0
* }
* ```
* @since v1.0.0
*/
function getHeapStatistics(): HeapInfo;
/**
* Returns statistics about the V8 heap spaces, i.e. the segments which make up
* the V8 heap. Neither the ordering of heap spaces, nor the availability of a
* heap space can be guaranteed as the statistics are provided via the
* V8[`GetHeapSpaceStatistics`](https://v8docs.nodesource.com/node-13.2/d5/dda/classv8_1_1_isolate.html#ac673576f24fdc7a33378f8f57e1d13a4) function and may change from one V8 version to the
* next.
*
* The value returned is an array of objects containing the following properties:
*
* ```json
* [
* {
* "space_name": "new_space",
* "space_size": 2063872,
* "space_used_size": 951112,
* "space_available_size": 80824,
* "physical_space_size": 2063872
* },
* {
* "space_name": "old_space",
* "space_size": 3090560,
* "space_used_size": 2493792,
* "space_available_size": 0,
* "physical_space_size": 3090560
* },
* {
* "space_name": "code_space",
* "space_size": 1260160,
* "space_used_size": 644256,
* "space_available_size": 960,
* "physical_space_size": 1260160
* },
* {
* "space_name": "map_space",
* "space_size": 1094160,
* "space_used_size": 201608,
* "space_available_size": 0,
* "physical_space_size": 1094160
* },
* {
* "space_name": "large_object_space",
* "space_size": 0,
* "space_used_size": 0,
* "space_available_size": 1490980608,
* "physical_space_size": 0
* }
* ]
* ```
* @since v6.0.0
*/
function getHeapSpaceStatistics(): HeapSpaceInfo[];
/**
* The `v8.setFlagsFromString()` method can be used to programmatically set
* V8 command-line flags. This method should be used with care. Changing settings
* after the VM has started may result in unpredictable behavior, including
* crashes and data loss; or it may simply do nothing.
*
* The V8 options available for a version of Node.js may be determined by running`node --v8-options`.
*
* Usage:
*
* ```js
* // Print GC events to stdout for one minute.
* const v8 = require('v8');
* v8.setFlagsFromString('--trace_gc');
* setTimeout(() => { v8.setFlagsFromString('--notrace_gc'); }, 60e3);
* ```
* @since v1.0.0
*/
function setFlagsFromString(flags: string): void;
/**
* Generates a snapshot of the current V8 heap and returns a Readable
* Stream that may be used to read the JSON serialized representation.
* This JSON stream format is intended to be used with tools such as
* Chrome DevTools. The JSON schema is undocumented and specific to the
* V8 engine. Therefore, the schema may change from one version of V8 to the next.
*
* Creating a heap snapshot requires memory about twice the size of the heap at
* the time the snapshot is created. This results in the risk of OOM killers
* terminating the process.
*
* Generating a snapshot is a synchronous operation which blocks the event loop
* for a duration depending on the heap size.
*
* ```js
* // Print heap snapshot to the console
* const v8 = require('v8');
* const stream = v8.getHeapSnapshot();
* stream.pipe(process.stdout);
* ```
* @since v11.13.0
* @return A Readable Stream containing the V8 heap snapshot
*/
function getHeapSnapshot(): Readable;
/**
* Generates a snapshot of the current V8 heap and writes it to a JSON
* file. This file is intended to be used with tools such as Chrome
* DevTools. The JSON schema is undocumented and specific to the V8
* engine, and may change from one version of V8 to the next.
*
* A heap snapshot is specific to a single V8 isolate. When using `worker threads`, a heap snapshot generated from the main thread will
* not contain any information about the workers, and vice versa.
*
* Creating a heap snapshot requires memory about twice the size of the heap at
* the time the snapshot is created. This results in the risk of OOM killers
* terminating the process.
*
* Generating a snapshot is a synchronous operation which blocks the event loop
* for a duration depending on the heap size.
*
* ```js
* const { writeHeapSnapshot } = require('v8');
* const {
* Worker,
* isMainThread,
* parentPort
* } = require('worker_threads');
*
* if (isMainThread) {
* const worker = new Worker(__filename);
*
* worker.once('message', (filename) => {
* console.log(`worker heapdump: ${filename}`);
* // Now get a heapdump for the main thread.
* console.log(`main thread heapdump: ${writeHeapSnapshot()}`);
* });
*
* // Tell the worker to create a heapdump.
* worker.postMessage('heapdump');
* } else {
* parentPort.once('message', (message) => {
* if (message === 'heapdump') {
* // Generate a heapdump for the worker
* // and return the filename to the parent.
* parentPort.postMessage(writeHeapSnapshot());
* }
* });
* }
* ```
* @since v11.13.0
* @param filename The file path where the V8 heap snapshot is to be saved. If not specified, a file name with the pattern `'Heap-${yyyymmdd}-${hhmmss}-${pid}-${thread_id}.heapsnapshot'` will be
* generated, where `{pid}` will be the PID of the Node.js process, `{thread_id}` will be `0` when `writeHeapSnapshot()` is called from the main Node.js thread or the id of a
* worker thread.
* @return The filename where the snapshot was saved.
*/
function writeHeapSnapshot(filename?: string): string;
/**
* Returns an object with the following properties:
*
* ```js
* {
* code_and_metadata_size: 212208,
* bytecode_and_metadata_size: 161368,
* external_script_source_size: 1410794
* }
* ```
* @since v12.8.0
*/
function getHeapCodeStatistics(): HeapCodeStatistics;
/**
* @since v8.0.0
*/
class Serializer {
/**
* Writes out a header, which includes the serialization format version.
*/
writeHeader(): void;
/**
* Serializes a JavaScript value and adds the serialized representation to the
* internal buffer.
*
* This throws an error if `value` cannot be serialized.
*/
writeValue(val: any): boolean;
/**
* Returns the stored internal buffer. This serializer should not be used once
* the buffer is released. Calling this method results in undefined behavior
* if a previous write has failed.
*/
releaseBuffer(): Buffer;
/**
* Marks an `ArrayBuffer` as having its contents transferred out of band.
* Pass the corresponding `ArrayBuffer` in the deserializing context to `deserializer.transferArrayBuffer()`.
* @param id A 32-bit unsigned integer.
* @param arrayBuffer An `ArrayBuffer` instance.
*/
transferArrayBuffer(id: number, arrayBuffer: ArrayBuffer): void;
/**
* Write a raw 32-bit unsigned integer.
* For use inside of a custom `serializer._writeHostObject()`.
*/
writeUint32(value: number): void;
/**
* Write a raw 64-bit unsigned integer, split into high and low 32-bit parts.
* For use inside of a custom `serializer._writeHostObject()`.
*/
writeUint64(hi: number, lo: number): void;
/**
* Write a JS `number` value.
* For use inside of a custom `serializer._writeHostObject()`.
*/
writeDouble(value: number): void;
/**
* Write raw bytes into the serializer’s internal buffer. The deserializer
* will require a way to compute the length of the buffer.
* For use inside of a custom `serializer._writeHostObject()`.
*/
writeRawBytes(buffer: NodeJS.TypedArray): void;
}
/**
* A subclass of `Serializer` that serializes `TypedArray`(in particular `Buffer`) and `DataView` objects as host objects, and only
* stores the part of their underlying `ArrayBuffer`s that they are referring to.
* @since v8.0.0
*/
class DefaultSerializer extends Serializer {}
/**
* @since v8.0.0
*/
class Deserializer {
constructor(data: NodeJS.TypedArray);
/**
* Reads and validates a header (including the format version).
* May, for example, reject an invalid or unsupported wire format. In that case,
* an `Error` is thrown.
*/
readHeader(): boolean;
/**
* Deserializes a JavaScript value from the buffer and returns it.
*/
readValue(): any;
/**
* Marks an `ArrayBuffer` as having its contents transferred out of band.
* Pass the corresponding `ArrayBuffer` in the serializing context to `serializer.transferArrayBuffer()` (or return the `id` from `serializer._getSharedArrayBufferId()` in the case of
* `SharedArrayBuffer`s).
* @param id A 32-bit unsigned integer.
* @param arrayBuffer An `ArrayBuffer` instance.
*/
transferArrayBuffer(id: number, arrayBuffer: ArrayBuffer): void;
/**
* Reads the underlying wire format version. Likely mostly to be useful to
* legacy code reading old wire format versions. May not be called before`.readHeader()`.
*/
getWireFormatVersion(): number;
/**
* Read a raw 32-bit unsigned integer and return it.
* For use inside of a custom `deserializer._readHostObject()`.
*/
readUint32(): number;
/**
* Read a raw 64-bit unsigned integer and return it as an array `[hi, lo]`with two 32-bit unsigned integer entries.
* For use inside of a custom `deserializer._readHostObject()`.
*/
readUint64(): [number, number];
/**
* Read a JS `number` value.
* For use inside of a custom `deserializer._readHostObject()`.
*/
readDouble(): number;
/**
* Read raw bytes from the deserializer’s internal buffer. The `length` parameter
* must correspond to the length of the buffer that was passed to `serializer.writeRawBytes()`.
* For use inside of a custom `deserializer._readHostObject()`.
*/
readRawBytes(length: number): Buffer;
}
/**
* A subclass of `Deserializer` corresponding to the format written by `DefaultSerializer`.
* @since v8.0.0
*/
class DefaultDeserializer extends Deserializer {}
/**
* Uses a `DefaultSerializer` to serialize `value` into a buffer.
*
* `ERR_BUFFER_TOO_LARGE` will be thrown when trying to
* serialize a huge object which requires buffer
* larger than `buffer.constants.MAX_LENGTH`.
* @since v8.0.0
*/
function serialize(value: any): Buffer;
/**
* Uses a `DefaultDeserializer` with default options to read a JS value
* from a buffer.
* @since v8.0.0
* @param buffer A buffer returned by {@link serialize}.
*/
function deserialize(buffer: NodeJS.TypedArray): any;
/**
* The `v8.takeCoverage()` method allows the user to write the coverage started by `NODE_V8_COVERAGE` to disk on demand. This method can be invoked multiple
* times during the lifetime of the process. Each time the execution counter will
* be reset and a new coverage report will be written to the directory specified
* by `NODE_V8_COVERAGE`.
*
* When the process is about to exit, one last coverage will still be written to
* disk unless {@link stopCoverage} is invoked before the process exits.
* @since v15.1.0, v14.18.0, v12.22.0
*/
function takeCoverage(): void;
/**
* The `v8.stopCoverage()` method allows the user to stop the coverage collection
* started by `NODE_V8_COVERAGE`, so that V8 can release the execution count
* records and optimize code. This can be used in conjunction with {@link takeCoverage} if the user wants to collect the coverage on demand.
* @since v15.1.0, v14.18.0, v12.22.0
*/
function stopCoverage(): void;
/**
* This API collects GC data in current thread.
*/
class GCProfiler {
/**
* Start collecting GC data.
*/
start(): void;
/**
* Stop collecting GC data and return a object.
*/
stop(): GCProfilerResult;
}
interface GCProfilerResult {
version: number;
startTime: number;
endTime: number;
statistics: Array<{
gcType: string;
cost: number;
beforeGC: {
heapStatistics: HeapStatistics;
heapSpaceStatistics: HeapSpaceStatistics[];
};
afterGC: {
heapStatistics: HeapStatistics;
heapSpaceStatistics: HeapSpaceStatistics[];
};
}>;
}
interface HeapStatistics {
totalHeapSize: number;
totalHeapSizeExecutable: number;
totalPhysicalSize: number;
totalAvailableSize: number;
totalGlobalHandlesSize: number;
usedGlobalHandlesSize: number;
usedHeapSize: number;
heapSizeLimit: number;
mallocedMemory: number;
externalMemory: number;
peakMallocedMemory: number;
}
interface HeapSpaceStatistics {
spaceName: string;
spaceSize: number;
spaceUsedSize: number;
spaceAvailableSize: number;
physicalSpaceSize: number;
}
/**
* Called when a promise is constructed. This does not mean that corresponding before/after events will occur, only that the possibility exists. This will
* happen if a promise is created without ever getting a continuation.
* @since v17.1.0, v16.14.0
* @param promise The promise being created.
* @param parent The promise continued from, if applicable.
*/
interface Init {
(promise: Promise<unknown>, parent: Promise<unknown>): void;
}
/**
* Called before a promise continuation executes. This can be in the form of `then()`, `catch()`, or `finally()` handlers or an await resuming.
*
* The before callback will be called 0 to N times. The before callback will typically be called 0 times if no continuation was ever made for the promise.
* The before callback may be called many times in the case where many continuations have been made from the same promise.
* @since v17.1.0, v16.14.0
*/
interface Before {
(promise: Promise<unknown>): void;
}
/**
* Called immediately after a promise continuation executes. This may be after a `then()`, `catch()`, or `finally()` handler or before an await after another await.
* @since v17.1.0, v16.14.0
*/
interface After {
(promise: Promise<unknown>): void;
}
/**
* Called when the promise receives a resolution or rejection value. This may occur synchronously in the case of {@link Promise.resolve()} or
* {@link Promise.reject()}.
* @since v17.1.0, v16.14.0
*/
interface Settled {
(promise: Promise<unknown>): void;
}
/**
* Key events in the lifetime of a promise have been categorized into four areas: creation of a promise, before/after a continuation handler is called or
* around an await, and when the promise resolves or rejects.
*
* Because promises are asynchronous resources whose lifecycle is tracked via the promise hooks mechanism, the `init()`, `before()`, `after()`, and
* `settled()` callbacks must not be async functions as they create more promises which would produce an infinite loop.
* @since v17.1.0, v16.14.0
*/
interface HookCallbacks {
init?: Init;
before?: Before;
after?: After;
settled?: Settled;
}
interface PromiseHooks {
/**
* The `init` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop.
* @since v17.1.0, v16.14.0
* @param init The {@link Init | `init` callback} to call when a promise is created.
* @return Call to stop the hook.
*/
onInit: (init: Init) => Function;
/**
* The `settled` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop.
* @since v17.1.0, v16.14.0
* @param settled The {@link Settled | `settled` callback} to call when a promise is created.
* @return Call to stop the hook.
*/
onSettled: (settled: Settled) => Function;
/**
* The `before` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop.
* @since v17.1.0, v16.14.0
* @param before The {@link Before | `before` callback} to call before a promise continuation executes.
* @return Call to stop the hook.
*/
onBefore: (before: Before) => Function;
/**
* The `after` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop.
* @since v17.1.0, v16.14.0
* @param after The {@link After | `after` callback} to call after a promise continuation executes.
* @return Call to stop the hook.
*/
onAfter: (after: After) => Function;
/**
* Registers functions to be called for different lifetime events of each promise.
* The callbacks `init()`/`before()`/`after()`/`settled()` are called for the respective events during a promise's lifetime.
* All callbacks are optional. For example, if only promise creation needs to be tracked, then only the init callback needs to be passed.
* The hook callbacks must be plain functions. Providing async functions will throw as it would produce an infinite microtask loop.
* @since v17.1.0, v16.14.0
* @param callbacks The {@link HookCallbacks | Hook Callbacks} to register
* @return Used for disabling hooks
*/
createHook: (callbacks: HookCallbacks) => Function;
}
/**
* The `promiseHooks` interface can be used to track promise lifecycle events.
* @since v17.1.0, v16.14.0
*/
const promiseHooks: PromiseHooks;
}
declare module 'node:v8' {
export * from 'v8';
} | PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/lib/audit/SQLAudit.py | import datetime
from sqlalchemy import schema, types, orm, and_, or_, asc, desc
from M2Crypto import EVP, RSA
from binascii import hexlify
from binascii import unhexlify
from sqlalchemy import create_engine
from linotp.lib.audit.base import AuditBase
from pylons import config
import logging.config
import traceback
import linotp
from linotp.lib.text_utils import utf8_slice
# Create the logging object from the linotp.ini config file
ini_file = config.get("__file__")
if ini_file is not None:
# When importing the module with Sphinx to generate documentation
# 'ini_file' is None. In other cases this should not be the case.
logging.config.fileConfig(ini_file, disable_existing_loggers=False)
log = logging.getLogger(__name__)
metadata = schema.MetaData()
def now():
u_now = u"%s" % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
return u_now
######################## MODEL ################################################
table_prefix = config.get("linotpAudit.sql.table_prefix", "")
audit_table_name = '%saudit' % table_prefix
audit_table = schema.Table(audit_table_name, metadata,
schema.Column('id', types.Integer, schema.Sequence('audit_seq_id',
optional=True),
primary_key=True),
schema.Column('timestamp', types.Unicode(30), default=now, index=True),
schema.Column('signature', types.Unicode(512), default=u''),
schema.Column('action', types.Unicode(30), index=True),
schema.Column('success', types.Unicode(30), default=u"False"),
schema.Column('serial', types.Unicode(30), index=True),
schema.Column('tokentype', types.Unicode(40)),
schema.Column('user', types.Unicode(255), index=True),
schema.Column('realm', types.Unicode(255), index=True),
schema.Column('administrator', types.Unicode(255)),
schema.Column('action_detail', types.Unicode(512), default=u''),
schema.Column('info', types.Unicode(512), default=u''),
schema.Column('linotp_server', types.Unicode(80)),
schema.Column('client', types.Unicode(80)),
schema.Column('log_level', types.Unicode(20), default=u"INFO", index=True),
schema.Column('clearance_level', types.Integer, default=0)
)
AUDIT_ENCODE = ["action", "serial", "success", "user", "realm", "tokentype",
"administrator", "action_detail", "info", "linotp_server",
"client", "log_level"]
class AuditTable(object):
def __init__(self, serial=u"", action=u"", success=u"False",
tokentype=u"", user=u"",
realm=u"", administrator=u"",
action_detail=u"", info=u"",
linotp_server=u"",
client=u"",
log_level=u"INFO",
clearance_level=0,
config_param=None):
"""
build an audit db entry
*parmeters require to be compliant to the table defintion, which
implies that type unicode is recomended where appropriate
:param serial: token serial number
:type serial: unicode
:param action: the scope of the audit entry, eg. admin/show
:type action: unicode
:param success: the result of the action
:type success: unicode
:param tokentype: which token type was involved
:type tokentype: unicode
:param user: user login
:type user: unicode
:param realm: the involved realm
:type realm: unicode
:param administrator: the admin involved
:type administrator: unicode
:param action_detail: the additional action details
:type action_detail: unicode
:param info: additional info for failures
:type info: unicode
:param linotp_server: the server name
:type linotp_server: unicode
:param client: info about the requesting client
:type client: unicode
:param loglevel: the loglevel of the action
:type loglevel: unicode
:param clearance_level: *??*
:type clearance_level: integer
"""
log.debug("[__init__] creating AuditTable object, action = %s"
% action)
if config_param:
self.config = config_param
else:
self.config = config
self.trunc_as_err = self.config.get('linotpAudit.error_on_truncation',
'False') == 'True'
self.serial = unicode(serial or '')
self.action = unicode(action or '')
self.success = unicode(success or '0')
self.tokentype = unicode(tokentype or '')
self.user = unicode(user or '')
self.realm = unicode(realm or '')
self.administrator = unicode(administrator or '')
#
# we have to truncate the 'action_detail' and the 'info' data
# in utf-8 compliant way
#
self.action_detail = utf8_slice(unicode(action_detail or ''), 512).next()
self.info = utf8_slice(unicode(info or ''), 512).next()
self.linotp_server = unicode(linotp_server or '')
self.client = unicode(client or '')
self.log_level = unicode(log_level or '')
self.clearance_level = clearance_level
self.timestamp = now()
self.siganture = ' '
def _get_field_len(self, col_name):
leng = -1
try:
ll = audit_table.columns[col_name]
ty = ll.type
leng = ty.length
except Exception as exx:
leng = -1
return leng
def __setattr__(self, name, value):
"""
to support unicode on all backends, we use the json encoder with
the assci encode default
:param name: db column name or class memeber
:param value: the corresponding value
:return: - nothing -
"""
if type(value) in [str, unicode]:
field_len = self._get_field_len(name)
encoded_value = linotp.lib.crypto.uencode(value)
if field_len != -1 and len(encoded_value) > field_len:
log.warning("truncating audit data: [audit.%s] %s",
name, value)
if self.trunc_as_err is not False:
raise Exception("truncating audit data: [audit.%s] %s" %
(name, value))
## during the encoding the value might expand -
## so we take this additional length into account
add_len = len(encoded_value) - len(value)
value = value[:field_len - add_len]
if name in AUDIT_ENCODE:
## encode data
if value:
value = linotp.lib.crypto.uencode(value)
super(AuditTable, self).__setattr__(name, value)
def __getattribute__(self, name):
"""
to support unicode on all backends, we use the json decoder with
the assci decode default
:param name: db column name or class memeber
:return: the corresponding value
"""
#Default behaviour
value = object.__getattribute__(self, name)
if name in AUDIT_ENCODE:
if value:
value = linotp.lib.crypto.udecode(value)
else:
value = ""
return value
orm.mapper(AuditTable, audit_table)
# replace sqlalchemy-migrate by the ability to ad a column
def add_column(engine, table, column):
"""
small helper to add a column by calling a native 'ALTER TABLE' to
replace the need for sqlalchemy-migrate
from:
http://stackoverflow.com/questions/7300948/add-column-to-sqlalchemy-table
:param engine: the running sqlalchemy
:param table: in which table should this column be added
:param column: the sqlalchemy definition of a column
:return: boolean of success or not
"""
result = False
table_name = table.description
column_name = column.compile(dialect=engine.dialect)
column_type = column.type.compile(engine.dialect)
try:
engine.execute('ALTER TABLE %s ADD COLUMN %s %s'
% (table_name, column_name, column_type))
result = True
except Exception as exx:
# Obviously we already migrated the database.
result = False
return result
###############################################################################
class Audit(AuditBase):
"""
Audit Implementation to the generic audit interface
"""
def __init__(self, config):
self.name = "SQlAudit"
self.config = config
connect_string = config.get("linotpAudit.sql.url")
pool_recycle = config.get("linotpAudit.sql.pool_recyle", 3600)
implicit_returning = config.get("linotpSQL.implicit_returning", True)
self.engine = None
########################## SESSION ##################################
# Create an engine and create all the tables we need
if implicit_returning:
# If implicit_returning is explicitly set to True, we
# get lots of mysql errors
# AttributeError: 'MySQLCompiler_mysqldb' object has no
# attribute 'returning_clause'
# So we do not mention explicit_returning at all
self.engine = create_engine(connect_string,
pool_recycle=pool_recycle)
else:
self.engine = create_engine(connect_string,
pool_recycle=pool_recycle,
implicit_returning=False)
metadata.bind = self.engine
metadata.create_all()
# Set up the session
self.sm = orm.sessionmaker(bind=self.engine, autoflush=True,
autocommit=True, expire_on_commit=True)
self.session = orm.scoped_session(self.sm)
# initialize signing keys
self.readKeys()
self.PublicKey = RSA.load_pub_key(
self.config.get("linotpAudit.key.public"))
self.VerifyEVP = EVP.PKey()
self.VerifyEVP.reset_context(md='sha256')
self.VerifyEVP.assign_rsa(self.PublicKey)
return
def _attr_to_dict(self, audit_line):
line = {}
line['number'] = audit_line.id
line['id'] = audit_line.id
line['date'] = str(audit_line.timestamp)
line['timestamp'] = str(audit_line.timestamp)
line['missing_line'] = ""
line['serial'] = audit_line.serial
line['action'] = audit_line.action
line['action_detail'] = audit_line.action_detail
line['success'] = audit_line.success
line['token_type'] = audit_line.tokentype
line['tokentype'] = audit_line.tokentype
line['user'] = audit_line.user
line['realm'] = audit_line.realm
line['administrator'] = audit_line.administrator
line['action_detail'] = audit_line.action_detail
line['info'] = audit_line.info
line['linotp_server'] = audit_line.linotp_server
line["client"] = audit_line.client
line['log_level'] = audit_line.log_level
line['clearance_level'] = audit_line.clearance_level
return line
def _sign(self, audit_line):
'''
Create a signature of the audit object
'''
line = self._attr_to_dict(audit_line)
s_audit = getAsString(line)
key = EVP.load_key_string(self.private)
key.reset_context(md='sha256')
key.sign_init()
key.sign_update(s_audit)
signature = key.sign_final()
return u'' + hexlify(signature)
def _verify(self, auditline, signature):
'''
Verify the signature of the audit line
'''
res = False
if not signature:
log.debug("[_verify] missing signature %r" % auditline)
return res
s_audit = getAsString(auditline)
self.VerifyEVP.verify_init()
self.VerifyEVP.verify_update(s_audit)
res = self.VerifyEVP.verify_final(unhexlify(signature))
return res
def log(self, param):
'''
This method is used to log the data. It splits information of
multiple tokens (e.g from import) in multiple audit log entries
'''
try:
serial = param.get('serial', '') or ''
if not serial:
# if no serial, do as before
self.log_entry(param)
else:
# look if we have multiple serials inside
serials = serial.split(',')
for serial in serials:
p = {}
p.update(param)
p['serial'] = serial
self.log_entry(p)
except Exception as exx:
log.exception("[log] error writing log message: %r" % exx)
self.session.rollback()
raise exx
return
def log_entry(self, param):
'''
This method is used to log the data.
It should hash the data and do a hash chain and sign the data
'''
at = AuditTable(
serial=param.get('serial'),
action=param.get('action'),
success=1 if param.get('success') else 0,
tokentype=param.get('token_type'),
user=param.get('user'),
realm=param.get('realm'),
administrator=param.get('administrator'),
action_detail=param.get('action_detail'),
info=param.get('info'),
linotp_server=param.get('linotp_server'),
client=param.get('client'),
log_level=param.get('log_level'),
clearance_level=param.get('clearance_level'),
config_param=self.config,
)
self.session.add(at)
self.session.flush()
# At this point "at" contains the primary key id
at.signature = self._sign(at)
self.session.merge(at)
self.session.flush()
def initialize_log(self, param):
'''
This method initialized the log state.
The fact, that the log state was initialized, also needs to be logged.
Therefor the same params are passed as i the log method.
'''
pass
def set(self):
'''
This function could be used to set certain things like the signing key.
But maybe it should only be read from linotp.ini?
'''
pass
def _buildCondition(self, param, AND):
'''
create the sqlalchemy condition from the params
'''
conditions = []
boolCheck = and_
if not AND:
boolCheck = or_
for k, v in param.items():
if "" != v:
if "serial" == k:
conditions.append(AuditTable.serial.like(v))
elif "user" == k:
conditions.append(AuditTable.user.like(v))
elif "realm" == k:
conditions.append(AuditTable.realm.like(v))
elif "action" == k:
conditions.append(AuditTable.action.like(v))
elif "action_detail" == k:
conditions.append(AuditTable.action_detail.like(v))
elif "date" == k:
conditions.append(AuditTable.timestamp.like(v))
elif "number" == k:
conditions.append(AuditTable.id.like(v))
elif "success" == k:
conditions.append(AuditTable.success.like(v))
elif "tokentype" == k:
conditions.append(AuditTable.tokentype.like(v))
elif "administrator" == k:
conditions.append(AuditTable.administrator.like(v))
elif "info" == k:
conditions.append(AuditTable.info.like(v))
elif "linotp_server" == k:
conditions.append(AuditTable.linotp_server.like(v))
elif "client" == k:
conditions.append(AuditTable.client.like(v))
all_conditions = None
if conditions:
all_conditions = boolCheck(*conditions)
return all_conditions
def row2dict(self, audit_line):
"""
convert an SQL audit db to a audit dict
:param audit_line: audit db row
:return: audit entry dict
"""
line = self._attr_to_dict(audit_line)
## if we have an \uencoded data, we extract the unicode back
for key, value in line.items():
if value and type(value) in [str, unicode]:
value = linotp.lib.crypto.udecode(value)
line[key] = value
elif value is None:
line[key] = ''
# Signature check
# TODO: use instead the verify_init
res = self._verify(line, audit_line.signature)
if res == 1:
line['sig_check'] = "OK"
else:
line['sig_check'] = "FAIL"
return line
def searchQuery(self, param, AND=True, display_error=True, rp_dict=None):
'''
This function is used to search audit events.
param:
Search parameters can be passed.
return:
a result object which has to be converted with iter() to an
iterator
'''
if rp_dict is None:
rp_dict = {}
if 'or' in param:
if "true" == param['or'].lower():
AND = False
# build the condition / WHERE clause
condition = self._buildCondition(param, AND)
order = AuditTable.id
if rp_dict.get("sortname"):
sortn = rp_dict.get('sortname').lower()
if "serial" == sortn:
order = AuditTable.serial
elif "number" == sortn:
order = AuditTable.id
elif "user" == sortn:
order = AuditTable.user
elif "action" == sortn:
order = AuditTable.action
elif "action_detail" == sortn:
order = AuditTable.action_detail
elif "realm" == sortn:
order = AuditTable.realm
elif "date" == sortn:
order = AuditTable.timestamp
elif "administrator" == sortn:
order = AuditTable.administrator
elif "success" == sortn:
order = AuditTable.success
elif "tokentype" == sortn:
order = AuditTable.tokentype
elif "info" == sortn:
order = AuditTable.info
elif "linotp_server" == sortn:
order = AuditTable.linotp_server
elif "client" == sortn:
order = AuditTable.client
elif "log_level" == sortn:
order = AuditTable.log_level
elif "clearance_level" == sortn:
order = AuditTable.clearance_level
# build the ordering
order_dir = asc(order)
if rp_dict.get("sortorder"):
sorto = rp_dict.get('sortorder').lower()
if "desc" == sorto:
order_dir = desc(order)
if type(condition).__name__ == 'NoneType':
audit_q = self.session.query(AuditTable)\
.order_by(order_dir)
else:
audit_q = self.session.query(AuditTable)\
.filter(condition)\
.order_by(order_dir)
# FIXME? BUT THIS IS SO MUCH SLOWER!
# FIXME: Here desc() ordering also does not work! :/
if 'rp' in rp_dict or 'page' in rp_dict:
# build the LIMIT and OFFSET
page = 1
offset = 0
limit = 15
if 'rp' in rp_dict:
limit = int(rp_dict.get('rp'))
if 'page' in rp_dict:
page = int(rp_dict.get('page'))
offset = limit * (page - 1)
start = offset
stop = offset + limit
audit_q = audit_q.slice(start, stop)
## we drop here the ORM due to memory consumption
## and return a resultproxy for row iteration
result = self.session.execute(audit_q.statement)
return result
def getTotal(self, param, AND=True, display_error=True):
'''
This method returns the total number of audit entries in
the audit store
'''
condition = self._buildCondition(param, AND)
if type(condition).__name__ == 'NoneType':
c = self.session.query(AuditTable).count()
else:
c = self.session.query(AuditTable).filter(condition).count()
return c
def getAsString(data):
'''
We need to distinguish, if this is an entry after the adding the
client entry or before. Otherwise the old signatures will break!
'''
s = ("number=%s, date=%s, action=%s, %s, serial=%s, %s, user=%s, %s,"
" admin=%s, %s, %s, server=%s, %s, %s") % (
str(data.get('id')), str(data.get('timestamp')),
data.get('action'), str(data.get('success')),
data.get('serial'), data.get('tokentype'),
data.get('user'), data.get('realm'),
data.get('administrator'), data.get('action_detail'),
data.get('info'), data.get('linotp_server'),
data.get('log_level'), str(data.get('clearance_level')))
if 'client' in data:
s += ", client=%s" % data.get('client')
return s
###eof######################################################################### | PypiClean |
/CANberry-0.4.tar.gz/CANberry-0.4/canberry/bower_components/Flot/API.md | # Flot Reference #
Consider a call to the plot function:
```js
var plot = $.plot(placeholder, data, options)
```
The placeholder is a jQuery object or DOM element or jQuery expression
that the plot will be put into. This placeholder needs to have its
width and height set as explained in the [README](README.md) (go read that now if
you haven't, it's short). The plot will modify some properties of the
placeholder so it's recommended you simply pass in a div that you
don't use for anything else. Make sure you check any fancy styling
you apply to the div, e.g. background images have been reported to be a
problem on IE 7.
The plot function can also be used as a jQuery chainable property. This form
naturally can't return the plot object directly, but you can still access it
via the 'plot' data key, like this:
```js
var plot = $("#placeholder").plot(data, options).data("plot");
```
The format of the data is documented below, as is the available
options. The plot object returned from the call has some methods you
can call. These are documented separately below.
Note that in general Flot gives no guarantees if you change any of the
objects you pass in to the plot function or get out of it since
they're not necessarily deep-copied.
## Data Format ##
The data is an array of data series:
```js
[ series1, series2, ... ]
```
A series can either be raw data or an object with properties. The raw
data format is an array of points:
```js
[ [x1, y1], [x2, y2], ... ]
```
E.g.
```js
[ [1, 3], [2, 14.01], [3.5, 3.14] ]
```
Note that to simplify the internal logic in Flot both the x and y
values must be numbers (even if specifying time series, see below for
how to do this). This is a common problem because you might retrieve
data from the database and serialize them directly to JSON without
noticing the wrong type. If you're getting mysterious errors, double
check that you're inputting numbers and not strings.
If a null is specified as a point or if one of the coordinates is null
or couldn't be converted to a number, the point is ignored when
drawing. As a special case, a null value for lines is interpreted as a
line segment end, i.e. the points before and after the null value are
not connected.
Lines and points take two coordinates. For filled lines and bars, you
can specify a third coordinate which is the bottom of the filled
area/bar (defaults to 0).
The format of a single series object is as follows:
```js
{
color: color or number
data: rawdata
label: string
lines: specific lines options
bars: specific bars options
points: specific points options
xaxis: number
yaxis: number
clickable: boolean
hoverable: boolean
shadowSize: number
highlightColor: color or number
}
```
You don't have to specify any of them except the data, the rest are
options that will get default values. Typically you'd only specify
label and data, like this:
```js
{
label: "y = 3",
data: [[0, 3], [10, 3]]
}
```
The label is used for the legend, if you don't specify one, the series
will not show up in the legend.
If you don't specify color, the series will get a color from the
auto-generated colors. The color is either a CSS color specification
(like "rgb(255, 100, 123)") or an integer that specifies which of
auto-generated colors to select, e.g. 0 will get color no. 0, etc.
The latter is mostly useful if you let the user add and remove series,
in which case you can hard-code the color index to prevent the colors
from jumping around between the series.
The "xaxis" and "yaxis" options specify which axis to use. The axes
are numbered from 1 (default), so { yaxis: 2} means that the series
should be plotted against the second y axis.
"clickable" and "hoverable" can be set to false to disable
interactivity for specific series if interactivity is turned on in
the plot, see below.
The rest of the options are all documented below as they are the same
as the default options passed in via the options parameter in the plot
commmand. When you specify them for a specific data series, they will
override the default options for the plot for that data series.
Here's a complete example of a simple data specification:
```js
[ { label: "Foo", data: [ [10, 1], [17, -14], [30, 5] ] },
{ label: "Bar", data: [ [11, 13], [19, 11], [30, -7] ] }
]
```
## Plot Options ##
All options are completely optional. They are documented individually
below, to change them you just specify them in an object, e.g.
```js
var options = {
series: {
lines: { show: true },
points: { show: true }
}
};
$.plot(placeholder, data, options);
```
## Customizing the legend ##
```js
legend: {
show: boolean
labelFormatter: null or (fn: string, series object -> string)
labelBoxBorderColor: color
noColumns: number
position: "ne" or "nw" or "se" or "sw"
margin: number of pixels or [x margin, y margin]
backgroundColor: null or color
backgroundOpacity: number between 0 and 1
container: null or jQuery object/DOM element/jQuery expression
sorted: null/false, true, "ascending", "descending", "reverse", or a comparator
}
```
The legend is generated as a table with the data series labels and
small label boxes with the color of the series. If you want to format
the labels in some way, e.g. make them to links, you can pass in a
function for "labelFormatter". Here's an example that makes them
clickable:
```js
labelFormatter: function(label, series) {
// series is the series object for the label
return '<a href="#' + label + '">' + label + '</a>';
}
```
To prevent a series from showing up in the legend, simply have the function
return null.
"noColumns" is the number of columns to divide the legend table into.
"position" specifies the overall placement of the legend within the
plot (top-right, top-left, etc.) and margin the distance to the plot
edge (this can be either a number or an array of two numbers like [x,
y]). "backgroundColor" and "backgroundOpacity" specifies the
background. The default is a partly transparent auto-detected
background.
If you want the legend to appear somewhere else in the DOM, you can
specify "container" as a jQuery object/expression to put the legend
table into. The "position" and "margin" etc. options will then be
ignored. Note that Flot will overwrite the contents of the container.
Legend entries appear in the same order as their series by default. If "sorted"
is "reverse" then they appear in the opposite order from their series. To sort
them alphabetically, you can specify true, "ascending" or "descending", where
true and "ascending" are equivalent.
You can also provide your own comparator function that accepts two
objects with "label" and "color" properties, and returns zero if they
are equal, a positive value if the first is greater than the second,
and a negative value if the first is less than the second.
```js
sorted: function(a, b) {
// sort alphabetically in ascending order
return a.label == b.label ? 0 : (
a.label > b.label ? 1 : -1
)
}
```
## Customizing the axes ##
```js
xaxis, yaxis: {
show: null or true/false
position: "bottom" or "top" or "left" or "right"
mode: null or "time" ("time" requires jquery.flot.time.js plugin)
timezone: null, "browser" or timezone (only makes sense for mode: "time")
color: null or color spec
tickColor: null or color spec
font: null or font spec object
min: null or number
max: null or number
autoscaleMargin: null or number
transform: null or fn: number -> number
inverseTransform: null or fn: number -> number
ticks: null or number or ticks array or (fn: axis -> ticks array)
tickSize: number or array
minTickSize: number or array
tickFormatter: (fn: number, object -> string) or string
tickDecimals: null or number
labelWidth: null or number
labelHeight: null or number
reserveSpace: null or true
tickLength: null or number
alignTicksWithAxis: null or number
}
```
All axes have the same kind of options. The following describes how to
configure one axis, see below for what to do if you've got more than
one x axis or y axis.
If you don't set the "show" option (i.e. it is null), visibility is
auto-detected, i.e. the axis will show up if there's data associated
with it. You can override this by setting the "show" option to true or
false.
The "position" option specifies where the axis is placed, bottom or
top for x axes, left or right for y axes. The "mode" option determines
how the data is interpreted, the default of null means as decimal
numbers. Use "time" for time series data; see the time series data
section. The time plugin (jquery.flot.time.js) is required for time
series support.
The "color" option determines the color of the line and ticks for the axis, and
defaults to the grid color with transparency. For more fine-grained control you
can also set the color of the ticks separately with "tickColor".
You can customize the font and color used to draw the axis tick labels with CSS
or directly via the "font" option. When "font" is null - the default - each
tick label is given the 'flot-tick-label' class. For compatibility with Flot
0.7 and earlier the labels are also given the 'tickLabel' class, but this is
deprecated and scheduled to be removed with the release of version 1.0.0.
To enable more granular control over styles, labels are divided between a set
of text containers, with each holding the labels for one axis. These containers
are given the classes 'flot-[x|y]-axis', and 'flot-[x|y]#-axis', where '#' is
the number of the axis when there are multiple axes. For example, the x-axis
labels for a simple plot with only a single x-axis might look like this:
```html
<div class='flot-x-axis flot-x1-axis'>
<div class='flot-tick-label'>January 2013</div>
...
</div>
```
For direct control over label styles you can also provide "font" as an object
with this format:
```js
{
size: 11,
lineHeight: 13,
style: "italic",
weight: "bold",
family: "sans-serif",
variant: "small-caps",
color: "#545454"
}
```
The size and lineHeight must be expressed in pixels; CSS units such as 'em'
or 'smaller' are not allowed.
The options "min"/"max" are the precise minimum/maximum value on the
scale. If you don't specify either of them, a value will automatically
be chosen based on the minimum/maximum data values. Note that Flot
always examines all the data values you feed to it, even if a
restriction on another axis may make some of them invisible (this
makes interactive use more stable).
The "autoscaleMargin" is a bit esoteric: it's the fraction of margin
that the scaling algorithm will add to avoid that the outermost points
ends up on the grid border. Note that this margin is only applied when
a min or max value is not explicitly set. If a margin is specified,
the plot will furthermore extend the axis end-point to the nearest
whole tick. The default value is "null" for the x axes and 0.02 for y
axes which seems appropriate for most cases.
"transform" and "inverseTransform" are callbacks you can put in to
change the way the data is drawn. You can design a function to
compress or expand certain parts of the axis non-linearly, e.g.
suppress weekends or compress far away points with a logarithm or some
other means. When Flot draws the plot, each value is first put through
the transform function. Here's an example, the x axis can be turned
into a natural logarithm axis with the following code:
```js
xaxis: {
transform: function (v) { return Math.log(v); },
inverseTransform: function (v) { return Math.exp(v); }
}
```
Similarly, for reversing the y axis so the values appear in inverse
order:
```js
yaxis: {
transform: function (v) { return -v; },
inverseTransform: function (v) { return -v; }
}
```
Note that for finding extrema, Flot assumes that the transform
function does not reorder values (it should be monotone).
The inverseTransform is simply the inverse of the transform function
(so v == inverseTransform(transform(v)) for all relevant v). It is
required for converting from canvas coordinates to data coordinates,
e.g. for a mouse interaction where a certain pixel is clicked. If you
don't use any interactive features of Flot, you may not need it.
The rest of the options deal with the ticks.
If you don't specify any ticks, a tick generator algorithm will make
some for you. The algorithm has two passes. It first estimates how
many ticks would be reasonable and uses this number to compute a nice
round tick interval size. Then it generates the ticks.
You can specify how many ticks the algorithm aims for by setting
"ticks" to a number. The algorithm always tries to generate reasonably
round tick values so even if you ask for three ticks, you might get
five if that fits better with the rounding. If you don't want any
ticks at all, set "ticks" to 0 or an empty array.
Another option is to skip the rounding part and directly set the tick
interval size with "tickSize". If you set it to 2, you'll get ticks at
2, 4, 6, etc. Alternatively, you can specify that you just don't want
ticks at a size less than a specific tick size with "minTickSize".
Note that for time series, the format is an array like [2, "month"],
see the next section.
If you want to completely override the tick algorithm, you can specify
an array for "ticks", either like this:
```js
ticks: [0, 1.2, 2.4]
```
Or like this where the labels are also customized:
```js
ticks: [[0, "zero"], [1.2, "one mark"], [2.4, "two marks"]]
```
You can mix the two if you like.
For extra flexibility you can specify a function as the "ticks"
parameter. The function will be called with an object with the axis
min and max and should return a ticks array. Here's a simplistic tick
generator that spits out intervals of pi, suitable for use on the x
axis for trigonometric functions:
```js
function piTickGenerator(axis) {
var res = [], i = Math.floor(axis.min / Math.PI);
do {
var v = i * Math.PI;
res.push([v, i + "\u03c0"]);
++i;
} while (v < axis.max);
return res;
}
```
You can control how the ticks look like with "tickDecimals", the
number of decimals to display (default is auto-detected).
Alternatively, for ultimate control over how ticks are formatted you can
provide a function to "tickFormatter". The function is passed two
parameters, the tick value and an axis object with information, and
should return a string. The default formatter looks like this:
```js
function formatter(val, axis) {
return val.toFixed(axis.tickDecimals);
}
```
The axis object has "min" and "max" with the range of the axis,
"tickDecimals" with the number of decimals to round the value to and
"tickSize" with the size of the interval between ticks as calculated
by the automatic axis scaling algorithm (or specified by you). Here's
an example of a custom formatter:
```js
function suffixFormatter(val, axis) {
if (val > 1000000)
return (val / 1000000).toFixed(axis.tickDecimals) + " MB";
else if (val > 1000)
return (val / 1000).toFixed(axis.tickDecimals) + " kB";
else
return val.toFixed(axis.tickDecimals) + " B";
}
```
"labelWidth" and "labelHeight" specifies a fixed size of the tick
labels in pixels. They're useful in case you need to align several
plots. "reserveSpace" means that even if an axis isn't shown, Flot
should reserve space for it - it is useful in combination with
labelWidth and labelHeight for aligning multi-axis charts.
"tickLength" is the length of the tick lines in pixels. By default, the
innermost axes will have ticks that extend all across the plot, while
any extra axes use small ticks. A value of null means use the default,
while a number means small ticks of that length - set it to 0 to hide
the lines completely.
If you set "alignTicksWithAxis" to the number of another axis, e.g.
alignTicksWithAxis: 1, Flot will ensure that the autogenerated ticks
of this axis are aligned with the ticks of the other axis. This may
improve the looks, e.g. if you have one y axis to the left and one to
the right, because the grid lines will then match the ticks in both
ends. The trade-off is that the forced ticks won't necessarily be at
natural places.
## Multiple axes ##
If you need more than one x axis or y axis, you need to specify for
each data series which axis they are to use, as described under the
format of the data series, e.g. { data: [...], yaxis: 2 } specifies
that a series should be plotted against the second y axis.
To actually configure that axis, you can't use the xaxis/yaxis options
directly - instead there are two arrays in the options:
```js
xaxes: []
yaxes: []
```
Here's an example of configuring a single x axis and two y axes (we
can leave options of the first y axis empty as the defaults are fine):
```js
{
xaxes: [ { position: "top" } ],
yaxes: [ { }, { position: "right", min: 20 } ]
}
```
The arrays get their default values from the xaxis/yaxis settings, so
say you want to have all y axes start at zero, you can simply specify
yaxis: { min: 0 } instead of adding a min parameter to all the axes.
Generally, the various interfaces in Flot dealing with data points
either accept an xaxis/yaxis parameter to specify which axis number to
use (starting from 1), or lets you specify the coordinate directly as
x2/x3/... or x2axis/x3axis/... instead of "x" or "xaxis".
## Time series data ##
Please note that it is now required to include the time plugin,
jquery.flot.time.js, for time series support.
Time series are a bit more difficult than scalar data because
calendars don't follow a simple base 10 system. For many cases, Flot
abstracts most of this away, but it can still be a bit difficult to
get the data into Flot. So we'll first discuss the data format.
The time series support in Flot is based on Javascript timestamps,
i.e. everywhere a time value is expected or handed over, a Javascript
timestamp number is used. This is a number, not a Date object. A
Javascript timestamp is the number of milliseconds since January 1,
1970 00:00:00 UTC. This is almost the same as Unix timestamps, except it's
in milliseconds, so remember to multiply by 1000!
You can see a timestamp like this
```js
alert((new Date()).getTime())
```
There are different schools of thought when it comes to diplay of
timestamps. Many will want the timestamps to be displayed according to
a certain time zone, usually the time zone in which the data has been
produced. Some want the localized experience, where the timestamps are
displayed according to the local time of the visitor. Flot supports
both. Optionally you can include a third-party library to get
additional timezone support.
Default behavior is that Flot always displays timestamps according to
UTC. The reason being that the core Javascript Date object does not
support other fixed time zones. Often your data is at another time
zone, so it may take a little bit of tweaking to work around this
limitation.
The easiest way to think about it is to pretend that the data
production time zone is UTC, even if it isn't. So if you have a
datapoint at 2002-02-20 08:00, you can generate a timestamp for eight
o'clock UTC even if it really happened eight o'clock UTC+0200.
In PHP you can get an appropriate timestamp with:
```php
strtotime("2002-02-20 UTC") * 1000
```
In Python you can get it with something like:
```python
calendar.timegm(datetime_object.timetuple()) * 1000
```
In .NET you can get it with something like:
```aspx
public static int GetJavascriptTimestamp(System.DateTime input)
{
System.TimeSpan span = new System.TimeSpan(System.DateTime.Parse("1/1/1970").Ticks);
System.DateTime time = input.Subtract(span);
return (long)(time.Ticks / 10000);
}
```
Javascript also has some support for parsing date strings, so it is
possible to generate the timestamps manually client-side.
If you've already got the real UTC timestamp, it's too late to use the
pretend trick described above. But you can fix up the timestamps by
adding the time zone offset, e.g. for UTC+0200 you would add 2 hours
to the UTC timestamp you got. Then it'll look right on the plot. Most
programming environments have some means of getting the timezone
offset for a specific date (note that you need to get the offset for
each individual timestamp to account for daylight savings).
The alternative with core Javascript is to interpret the timestamps
according to the time zone that the visitor is in, which means that
the ticks will shift with the time zone and daylight savings of each
visitor. This behavior is enabled by setting the axis option
"timezone" to the value "browser".
If you need more time zone functionality than this, there is still
another option. If you include the "timezone-js" library
<https://github.com/mde/timezone-js> in the page and set axis.timezone
to a value recognized by said library, Flot will use timezone-js to
interpret the timestamps according to that time zone.
Once you've gotten the timestamps into the data and specified "time"
as the axis mode, Flot will automatically generate relevant ticks and
format them. As always, you can tweak the ticks via the "ticks" option
- just remember that the values should be timestamps (numbers), not
Date objects.
Tick generation and formatting can also be controlled separately
through the following axis options:
```js
minTickSize: array
timeformat: null or format string
monthNames: null or array of size 12 of strings
dayNames: null or array of size 7 of strings
twelveHourClock: boolean
```
Here "timeformat" is a format string to use. You might use it like
this:
```js
xaxis: {
mode: "time",
timeformat: "%Y/%m/%d"
}
```
This will result in tick labels like "2000/12/24". A subset of the
standard strftime specifiers are supported (plus the nonstandard %q):
```js
%a: weekday name (customizable)
%b: month name (customizable)
%d: day of month, zero-padded (01-31)
%e: day of month, space-padded ( 1-31)
%H: hours, 24-hour time, zero-padded (00-23)
%I: hours, 12-hour time, zero-padded (01-12)
%m: month, zero-padded (01-12)
%M: minutes, zero-padded (00-59)
%q: quarter (1-4)
%S: seconds, zero-padded (00-59)
%y: year (two digits)
%Y: year (four digits)
%p: am/pm
%P: AM/PM (uppercase version of %p)
%w: weekday as number (0-6, 0 being Sunday)
```
Flot 0.8 switched from %h to the standard %H hours specifier. The %h specifier
is still available, for backwards-compatibility, but is deprecated and
scheduled to be removed permanently with the release of version 1.0.
You can customize the month names with the "monthNames" option. For
instance, for Danish you might specify:
```js
monthNames: ["jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"]
```
Similarly you can customize the weekday names with the "dayNames"
option. An example in French:
```js
dayNames: ["dim", "lun", "mar", "mer", "jeu", "ven", "sam"]
```
If you set "twelveHourClock" to true, the autogenerated timestamps
will use 12 hour AM/PM timestamps instead of 24 hour. This only
applies if you have not set "timeformat". Use the "%I" and "%p" or
"%P" options if you want to build your own format string with 12-hour
times.
If the Date object has a strftime property (and it is a function), it
will be used instead of the built-in formatter. Thus you can include
a strftime library such as http://hacks.bluesmoon.info/strftime/ for
more powerful date/time formatting.
If everything else fails, you can control the formatting by specifying
a custom tick formatter function as usual. Here's a simple example
which will format December 24 as 24/12:
```js
tickFormatter: function (val, axis) {
var d = new Date(val);
return d.getUTCDate() + "/" + (d.getUTCMonth() + 1);
}
```
Note that for the time mode "tickSize" and "minTickSize" are a bit
special in that they are arrays on the form "[value, unit]" where unit
is one of "second", "minute", "hour", "day", "month" and "year". So
you can specify
```js
minTickSize: [1, "month"]
```
to get a tick interval size of at least 1 month and correspondingly,
if axis.tickSize is [2, "day"] in the tick formatter, the ticks have
been produced with two days in-between.
## Customizing the data series ##
```js
series: {
lines, points, bars: {
show: boolean
lineWidth: number
fill: boolean or number
fillColor: null or color/gradient
}
lines, bars: {
zero: boolean
}
points: {
radius: number
symbol: "circle" or function
}
bars: {
barWidth: number
align: "left", "right" or "center"
horizontal: boolean
}
lines: {
steps: boolean
}
shadowSize: number
highlightColor: color or number
}
colors: [ color1, color2, ... ]
```
The options inside "series: {}" are copied to each of the series. So
you can specify that all series should have bars by putting it in the
global options, or override it for individual series by specifying
bars in a particular the series object in the array of data.
The most important options are "lines", "points" and "bars" that
specify whether and how lines, points and bars should be shown for
each data series. In case you don't specify anything at all, Flot will
default to showing lines (you can turn this off with
lines: { show: false }). You can specify the various types
independently of each other, and Flot will happily draw each of them
in turn (this is probably only useful for lines and points), e.g.
```js
var options = {
series: {
lines: { show: true, fill: true, fillColor: "rgba(255, 255, 255, 0.8)" },
points: { show: true, fill: false }
}
};
```
"lineWidth" is the thickness of the line or outline in pixels. You can
set it to 0 to prevent a line or outline from being drawn; this will
also hide the shadow.
"fill" is whether the shape should be filled. For lines, this produces
area graphs. You can use "fillColor" to specify the color of the fill.
If "fillColor" evaluates to false (default for everything except
points which are filled with white), the fill color is auto-set to the
color of the data series. You can adjust the opacity of the fill by
setting fill to a number between 0 (fully transparent) and 1 (fully
opaque).
For bars, fillColor can be a gradient, see the gradient documentation
below. "barWidth" is the width of the bars in units of the x axis (or
the y axis if "horizontal" is true), contrary to most other measures
that are specified in pixels. For instance, for time series the unit
is milliseconds so 24 * 60 * 60 * 1000 produces bars with the width of
a day. "align" specifies whether a bar should be left-aligned
(default), right-aligned or centered on top of the value it represents.
When "horizontal" is on, the bars are drawn horizontally, i.e. from the
y axis instead of the x axis; note that the bar end points are still
defined in the same way so you'll probably want to swap the
coordinates if you've been plotting vertical bars first.
Area and bar charts normally start from zero, regardless of the data's range.
This is because they convey information through size, and starting from a
different value would distort their meaning. In cases where the fill is purely
for decorative purposes, however, "zero" allows you to override this behavior.
It defaults to true for filled lines and bars; setting it to false tells the
series to use the same automatic scaling as an un-filled line.
For lines, "steps" specifies whether two adjacent data points are
connected with a straight (possibly diagonal) line or with first a
horizontal and then a vertical line. Note that this transforms the
data by adding extra points.
For points, you can specify the radius and the symbol. The only
built-in symbol type is circles, for other types you can use a plugin
or define them yourself by specifying a callback:
```js
function cross(ctx, x, y, radius, shadow) {
var size = radius * Math.sqrt(Math.PI) / 2;
ctx.moveTo(x - size, y - size);
ctx.lineTo(x + size, y + size);
ctx.moveTo(x - size, y + size);
ctx.lineTo(x + size, y - size);
}
```
The parameters are the drawing context, x and y coordinates of the
center of the point, a radius which corresponds to what the circle
would have used and whether the call is to draw a shadow (due to
limited canvas support, shadows are currently faked through extra
draws). It's good practice to ensure that the area covered by the
symbol is the same as for the circle with the given radius, this
ensures that all symbols have approximately the same visual weight.
"shadowSize" is the default size of shadows in pixels. Set it to 0 to
remove shadows.
"highlightColor" is the default color of the translucent overlay used
to highlight the series when the mouse hovers over it.
The "colors" array specifies a default color theme to get colors for
the data series from. You can specify as many colors as you like, like
this:
```js
colors: ["#d18b2c", "#dba255", "#919733"]
```
If there are more data series than colors, Flot will try to generate
extra colors by lightening and darkening colors in the theme.
## Customizing the grid ##
```js
grid: {
show: boolean
aboveData: boolean
color: color
backgroundColor: color/gradient or null
margin: number or margin object
labelMargin: number
axisMargin: number
markings: array of markings or (fn: axes -> array of markings)
borderWidth: number or object with "top", "right", "bottom" and "left" properties with different widths
borderColor: color or null or object with "top", "right", "bottom" and "left" properties with different colors
minBorderMargin: number or null
clickable: boolean
hoverable: boolean
autoHighlight: boolean
mouseActiveRadius: number
}
interaction: {
redrawOverlayInterval: number or -1
}
```
The grid is the thing with the axes and a number of ticks. Many of the
things in the grid are configured under the individual axes, but not
all. "color" is the color of the grid itself whereas "backgroundColor"
specifies the background color inside the grid area, here null means
that the background is transparent. You can also set a gradient, see
the gradient documentation below.
You can turn off the whole grid including tick labels by setting
"show" to false. "aboveData" determines whether the grid is drawn
above the data or below (below is default).
"margin" is the space in pixels between the canvas edge and the grid,
which can be either a number or an object with individual margins for
each side, in the form:
```js
margin: {
top: top margin in pixels
left: left margin in pixels
bottom: bottom margin in pixels
right: right margin in pixels
}
```
"labelMargin" is the space in pixels between tick labels and axis
line, and "axisMargin" is the space in pixels between axes when there
are two next to each other.
"borderWidth" is the width of the border around the plot. Set it to 0
to disable the border. Set it to an object with "top", "right",
"bottom" and "left" properties to use different widths. You can
also set "borderColor" if you want the border to have a different color
than the grid lines. Set it to an object with "top", "right", "bottom"
and "left" properties to use different colors. "minBorderMargin" controls
the default minimum margin around the border - it's used to make sure
that points aren't accidentally clipped by the canvas edge so by default
the value is computed from the point radius.
"markings" is used to draw simple lines and rectangular areas in the
background of the plot. You can either specify an array of ranges on
the form { xaxis: { from, to }, yaxis: { from, to } } (with multiple
axes, you can specify coordinates for other axes instead, e.g. as
x2axis/x3axis/...) or with a function that returns such an array given
the axes for the plot in an object as the first parameter.
You can set the color of markings by specifying "color" in the ranges
object. Here's an example array:
```js
markings: [ { xaxis: { from: 0, to: 2 }, yaxis: { from: 10, to: 10 }, color: "#bb0000" }, ... ]
```
If you leave out one of the values, that value is assumed to go to the
border of the plot. So for example if you only specify { xaxis: {
from: 0, to: 2 } } it means an area that extends from the top to the
bottom of the plot in the x range 0-2.
A line is drawn if from and to are the same, e.g.
```js
markings: [ { yaxis: { from: 1, to: 1 } }, ... ]
```
would draw a line parallel to the x axis at y = 1. You can control the
line width with "lineWidth" in the range object.
An example function that makes vertical stripes might look like this:
```js
markings: function (axes) {
var markings = [];
for (var x = Math.floor(axes.xaxis.min); x < axes.xaxis.max; x += 2)
markings.push({ xaxis: { from: x, to: x + 1 } });
return markings;
}
```
If you set "clickable" to true, the plot will listen for click events
on the plot area and fire a "plotclick" event on the placeholder with
a position and a nearby data item object as parameters. The coordinates
are available both in the unit of the axes (not in pixels) and in
global screen coordinates.
Likewise, if you set "hoverable" to true, the plot will listen for
mouse move events on the plot area and fire a "plothover" event with
the same parameters as the "plotclick" event. If "autoHighlight" is
true (the default), nearby data items are highlighted automatically.
If needed, you can disable highlighting and control it yourself with
the highlight/unhighlight plot methods described elsewhere.
You can use "plotclick" and "plothover" events like this:
```js
$.plot($("#placeholder"), [ d ], { grid: { clickable: true } });
$("#placeholder").bind("plotclick", function (event, pos, item) {
alert("You clicked at " + pos.x + ", " + pos.y);
// axis coordinates for other axes, if present, are in pos.x2, pos.x3, ...
// if you need global screen coordinates, they are pos.pageX, pos.pageY
if (item) {
highlight(item.series, item.datapoint);
alert("You clicked a point!");
}
});
```
The item object in this example is either null or a nearby object on the form:
```js
item: {
datapoint: the point, e.g. [0, 2]
dataIndex: the index of the point in the data array
series: the series object
seriesIndex: the index of the series
pageX, pageY: the global screen coordinates of the point
}
```
For instance, if you have specified the data like this
```js
$.plot($("#placeholder"), [ { label: "Foo", data: [[0, 10], [7, 3]] } ], ...);
```
and the mouse is near the point (7, 3), "datapoint" is [7, 3],
"dataIndex" will be 1, "series" is a normalized series object with
among other things the "Foo" label in series.label and the color in
series.color, and "seriesIndex" is 0. Note that plugins and options
that transform the data can shift the indexes from what you specified
in the original data array.
If you use the above events to update some other information and want
to clear out that info in case the mouse goes away, you'll probably
also need to listen to "mouseout" events on the placeholder div.
"mouseActiveRadius" specifies how far the mouse can be from an item
and still activate it. If there are two or more points within this
radius, Flot chooses the closest item. For bars, the top-most bar
(from the latest specified data series) is chosen.
If you want to disable interactivity for a specific data series, you
can set "hoverable" and "clickable" to false in the options for that
series, like this:
```js
{ data: [...], label: "Foo", clickable: false }
```
"redrawOverlayInterval" specifies the maximum time to delay a redraw
of interactive things (this works as a rate limiting device). The
default is capped to 60 frames per second. You can set it to -1 to
disable the rate limiting.
## Specifying gradients ##
A gradient is specified like this:
```js
{ colors: [ color1, color2, ... ] }
```
For instance, you might specify a background on the grid going from
black to gray like this:
```js
grid: {
backgroundColor: { colors: ["#000", "#999"] }
}
```
For the series you can specify the gradient as an object that
specifies the scaling of the brightness and the opacity of the series
color, e.g.
```js
{ colors: [{ opacity: 0.8 }, { brightness: 0.6, opacity: 0.8 } ] }
```
where the first color simply has its alpha scaled, whereas the second
is also darkened. For instance, for bars the following makes the bars
gradually disappear, without outline:
```js
bars: {
show: true,
lineWidth: 0,
fill: true,
fillColor: { colors: [ { opacity: 0.8 }, { opacity: 0.1 } ] }
}
```
Flot currently only supports vertical gradients drawn from top to
bottom because that's what works with IE.
## Plot Methods ##
The Plot object returned from the plot function has some methods you
can call:
- highlight(series, datapoint)
Highlight a specific datapoint in the data series. You can either
specify the actual objects, e.g. if you got them from a
"plotclick" event, or you can specify the indices, e.g.
highlight(1, 3) to highlight the fourth point in the second series
(remember, zero-based indexing).
- unhighlight(series, datapoint) or unhighlight()
Remove the highlighting of the point, same parameters as
highlight.
If you call unhighlight with no parameters, e.g. as
plot.unhighlight(), all current highlights are removed.
- setData(data)
You can use this to reset the data used. Note that axis scaling,
ticks, legend etc. will not be recomputed (use setupGrid() to do
that). You'll probably want to call draw() afterwards.
You can use this function to speed up redrawing a small plot if
you know that the axes won't change. Put in the new data with
setData(newdata), call draw(), and you're good to go. Note that
for large datasets, almost all the time is consumed in draw()
plotting the data so in this case don't bother.
- setupGrid()
Recalculate and set axis scaling, ticks, legend etc.
Note that because of the drawing model of the canvas, this
function will immediately redraw (actually reinsert in the DOM)
the labels and the legend, but not the actual tick lines because
they're drawn on the canvas. You need to call draw() to get the
canvas redrawn.
- draw()
Redraws the plot canvas.
- triggerRedrawOverlay()
Schedules an update of an overlay canvas used for drawing
interactive things like a selection and point highlights. This
is mostly useful for writing plugins. The redraw doesn't happen
immediately, instead a timer is set to catch multiple successive
redraws (e.g. from a mousemove). You can get to the overlay by
setting up a drawOverlay hook.
- width()/height()
Gets the width and height of the plotting area inside the grid.
This is smaller than the canvas or placeholder dimensions as some
extra space is needed (e.g. for labels).
- offset()
Returns the offset of the plotting area inside the grid relative
to the document, useful for instance for calculating mouse
positions (event.pageX/Y minus this offset is the pixel position
inside the plot).
- pointOffset({ x: xpos, y: ypos })
Returns the calculated offset of the data point at (x, y) in data
space within the placeholder div. If you are working with multiple
axes, you can specify the x and y axis references, e.g.
```js
o = pointOffset({ x: xpos, y: ypos, xaxis: 2, yaxis: 3 })
// o.left and o.top now contains the offset within the div
````
- resize()
Tells Flot to resize the drawing canvas to the size of the
placeholder. You need to run setupGrid() and draw() afterwards as
canvas resizing is a destructive operation. This is used
internally by the resize plugin.
- shutdown()
Cleans up any event handlers Flot has currently registered. This
is used internally.
There are also some members that let you peek inside the internal
workings of Flot which is useful in some cases. Note that if you change
something in the objects returned, you're changing the objects used by
Flot to keep track of its state, so be careful.
- getData()
Returns an array of the data series currently used in normalized
form with missing settings filled in according to the global
options. So for instance to find out what color Flot has assigned
to the data series, you could do this:
```js
var series = plot.getData();
for (var i = 0; i < series.length; ++i)
alert(series[i].color);
```
A notable other interesting field besides color is datapoints
which has a field "points" with the normalized data points in a
flat array (the field "pointsize" is the increment in the flat
array to get to the next point so for a dataset consisting only of
(x,y) pairs it would be 2).
- getAxes()
Gets an object with the axes. The axes are returned as the
attributes of the object, so for instance getAxes().xaxis is the
x axis.
Various things are stuffed inside an axis object, e.g. you could
use getAxes().xaxis.ticks to find out what the ticks are for the
xaxis. Two other useful attributes are p2c and c2p, functions for
transforming from data point space to the canvas plot space and
back. Both returns values that are offset with the plot offset.
Check the Flot source code for the complete set of attributes (or
output an axis with console.log() and inspect it).
With multiple axes, the extra axes are returned as x2axis, x3axis,
etc., e.g. getAxes().y2axis is the second y axis. You can check
y2axis.used to see whether the axis is associated with any data
points and y2axis.show to see if it is currently shown.
- getPlaceholder()
Returns placeholder that the plot was put into. This can be useful
for plugins for adding DOM elements or firing events.
- getCanvas()
Returns the canvas used for drawing in case you need to hack on it
yourself. You'll probably need to get the plot offset too.
- getPlotOffset()
Gets the offset that the grid has within the canvas as an object
with distances from the canvas edges as "left", "right", "top",
"bottom". I.e., if you draw a circle on the canvas with the center
placed at (left, top), its center will be at the top-most, left
corner of the grid.
- getOptions()
Gets the options for the plot, normalized, with default values
filled in. You get a reference to actual values used by Flot, so
if you modify the values in here, Flot will use the new values.
If you change something, you probably have to call draw() or
setupGrid() or triggerRedrawOverlay() to see the change.
## Hooks ##
In addition to the public methods, the Plot object also has some hooks
that can be used to modify the plotting process. You can install a
callback function at various points in the process, the function then
gets access to the internal data structures in Flot.
Here's an overview of the phases Flot goes through:
1. Plugin initialization, parsing options
2. Constructing the canvases used for drawing
3. Set data: parsing data specification, calculating colors,
copying raw data points into internal format,
normalizing them, finding max/min for axis auto-scaling
4. Grid setup: calculating axis spacing, ticks, inserting tick
labels, the legend
5. Draw: drawing the grid, drawing each of the series in turn
6. Setting up event handling for interactive features
7. Responding to events, if any
8. Shutdown: this mostly happens in case a plot is overwritten
Each hook is simply a function which is put in the appropriate array.
You can add them through the "hooks" option, and they are also available
after the plot is constructed as the "hooks" attribute on the returned
plot object, e.g.
```js
// define a simple draw hook
function hellohook(plot, canvascontext) { alert("hello!"); };
// pass it in, in an array since we might want to specify several
var plot = $.plot(placeholder, data, { hooks: { draw: [hellohook] } });
// we can now find it again in plot.hooks.draw[0] unless a plugin
// has added other hooks
```
The available hooks are described below. All hook callbacks get the
plot object as first parameter. You can find some examples of defined
hooks in the plugins bundled with Flot.
- processOptions [phase 1]
```function(plot, options)```
Called after Flot has parsed and merged options. Useful in the
instance where customizations beyond simple merging of default
values is needed. A plugin might use it to detect that it has been
enabled and then turn on or off other options.
- processRawData [phase 3]
```function(plot, series, data, datapoints)```
Called before Flot copies and normalizes the raw data for the given
series. If the function fills in datapoints.points with normalized
points and sets datapoints.pointsize to the size of the points,
Flot will skip the copying/normalization step for this series.
In any case, you might be interested in setting datapoints.format,
an array of objects for specifying how a point is normalized and
how it interferes with axis scaling. It accepts the following options:
```js
{
x, y: boolean,
number: boolean,
required: boolean,
defaultValue: value,
autoscale: boolean
}
```
"x" and "y" specify whether the value is plotted against the x or y axis,
and is currently used only to calculate axis min-max ranges. The default
format array, for example, looks like this:
```js
[
{ x: true, number: true, required: true },
{ y: true, number: true, required: true }
]
```
This indicates that a point, i.e. [0, 25], consists of two values, with the
first being plotted on the x axis and the second on the y axis.
If "number" is true, then the value must be numeric, and is set to null if
it cannot be converted to a number.
"defaultValue" provides a fallback in case the original value is null. This
is for instance handy for bars, where one can omit the third coordinate
(the bottom of the bar), which then defaults to zero.
If "required" is true, then the value must exist (be non-null) for the
point as a whole to be valid. If no value is provided, then the entire
point is cleared out with nulls, turning it into a gap in the series.
"autoscale" determines whether the value is considered when calculating an
automatic min-max range for the axes that the value is plotted against.
- processDatapoints [phase 3]
```function(plot, series, datapoints)```
Called after normalization of the given series but before finding
min/max of the data points. This hook is useful for implementing data
transformations. "datapoints" contains the normalized data points in
a flat array as datapoints.points with the size of a single point
given in datapoints.pointsize. Here's a simple transform that
multiplies all y coordinates by 2:
```js
function multiply(plot, series, datapoints) {
var points = datapoints.points, ps = datapoints.pointsize;
for (var i = 0; i < points.length; i += ps)
points[i + 1] *= 2;
}
```
Note that you must leave datapoints in a good condition as Flot
doesn't check it or do any normalization on it afterwards.
- processOffset [phase 4]
```function(plot, offset)```
Called after Flot has initialized the plot's offset, but before it
draws any axes or plot elements. This hook is useful for customizing
the margins between the grid and the edge of the canvas. "offset" is
an object with attributes "top", "bottom", "left" and "right",
corresponding to the margins on the four sides of the plot.
- drawBackground [phase 5]
```function(plot, canvascontext)```
Called before all other drawing operations. Used to draw backgrounds
or other custom elements before the plot or axes have been drawn.
- drawSeries [phase 5]
```function(plot, canvascontext, series)```
Hook for custom drawing of a single series. Called just before the
standard drawing routine has been called in the loop that draws
each series.
- draw [phase 5]
```function(plot, canvascontext)```
Hook for drawing on the canvas. Called after the grid is drawn
(unless it's disabled or grid.aboveData is set) and the series have
been plotted (in case any points, lines or bars have been turned
on). For examples of how to draw things, look at the source code.
- bindEvents [phase 6]
```function(plot, eventHolder)```
Called after Flot has setup its event handlers. Should set any
necessary event handlers on eventHolder, a jQuery object with the
canvas, e.g.
```js
function (plot, eventHolder) {
eventHolder.mousedown(function (e) {
alert("You pressed the mouse at " + e.pageX + " " + e.pageY);
});
}
```
Interesting events include click, mousemove, mouseup/down. You can
use all jQuery events. Usually, the event handlers will update the
state by drawing something (add a drawOverlay hook and call
triggerRedrawOverlay) or firing an externally visible event for
user code. See the crosshair plugin for an example.
Currently, eventHolder actually contains both the static canvas
used for the plot itself and the overlay canvas used for
interactive features because some versions of IE get the stacking
order wrong. The hook only gets one event, though (either for the
overlay or for the static canvas).
Note that custom plot events generated by Flot are not generated on
eventHolder, but on the div placeholder supplied as the first
argument to the plot call. You can get that with
plot.getPlaceholder() - that's probably also the one you should use
if you need to fire a custom event.
- drawOverlay [phase 7]
```function (plot, canvascontext)```
The drawOverlay hook is used for interactive things that need a
canvas to draw on. The model currently used by Flot works the way
that an extra overlay canvas is positioned on top of the static
canvas. This overlay is cleared and then completely redrawn
whenever something interesting happens. This hook is called when
the overlay canvas is to be redrawn.
"canvascontext" is the 2D context of the overlay canvas. You can
use this to draw things. You'll most likely need some of the
metrics computed by Flot, e.g. plot.width()/plot.height(). See the
crosshair plugin for an example.
- shutdown [phase 8]
```function (plot, eventHolder)```
Run when plot.shutdown() is called, which usually only happens in
case a plot is overwritten by a new plot. If you're writing a
plugin that adds extra DOM elements or event handlers, you should
add a callback to clean up after you. Take a look at the section in
PLUGINS.txt for more info.
## Plugins ##
Plugins extend the functionality of Flot. To use a plugin, simply
include its Javascript file after Flot in the HTML page.
If you're worried about download size/latency, you can concatenate all
the plugins you use, and Flot itself for that matter, into one big file
(make sure you get the order right), then optionally run it through a
Javascript minifier such as YUI Compressor.
Here's a brief explanation of how the plugin plumbings work:
Each plugin registers itself in the global array $.plot.plugins. When
you make a new plot object with $.plot, Flot goes through this array
calling the "init" function of each plugin and merging default options
from the "option" attribute of the plugin. The init function gets a
reference to the plot object created and uses this to register hooks
and add new public methods if needed.
See the PLUGINS.txt file for details on how to write a plugin. As the
above description hints, it's actually pretty easy.
## Version number ##
The version number of Flot is available in ```$.plot.version```.
| PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/quickstart.rst | .. _quickstart:
Quickstart
==========
This page gives a good introduction in how to get started with Glances.
Glances offers 3 modes:
- Standalone
- Client/Server
- Web server
Standalone Mode
---------------
If you want to monitor your local machine, open a console/terminal
and simply run:
.. code-block:: console
$ glances
Glances should start (press 'q' or 'ESC' to exit):
.. image:: _static/screenshot-wide.png
It is also possible to display RAW (Python) stats directly to stdout using:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or as a JSON format thanks to the stdout-json option (attribute not supported in this mode):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Note: It will display one line per stat per refresh.
Client/Server Mode
------------------
If you want to remotely monitor a machine, called ``server``, from
another one, called ``client``, just run on the server:
.. code-block:: console
server$ glances -s
and on the client:
.. code-block:: console
client$ glances -c @server
where ``@server`` is the IP address or hostname of the server.
In server mode, you can set the bind address with ``-B ADDRESS`` and
the listening TCP port with ``-p PORT``.
In client mode, you can set the TCP port of the server with ``-p PORT``.
Default binding address is ``0.0.0.0`` (Glances will listen on all the
available network interfaces) and TCP port is ``61209``.
In client/server mode, limits are set by the server side.
Central client
^^^^^^^^^^^^^^
.. image:: _static/browser.png
Glances can centralize available Glances servers using the ``--browser``
option. The server list can be statically defined via the configuration
file (section ``[serverlist]``).
Example:
.. code-block:: ini
[serverlist]
# Define the static servers list
server_1_name=xps
server_1_alias=xps
server_1_port=61209
server_2_name=win
server_2_port=61235
Glances can also detect and display all Glances servers available on
your network via the ``zeroconf`` protocol (not available on Windows):
To start the central client, use the following option:
.. code-block:: console
client$ glances --browser
.. note::
Use ``--disable-autodiscover`` to disable the auto discovery mode.
When the list is displayed, you can navigate through the Glances servers with
up/down keys. It is also possible to sort the server using:
- '1' is normal (do not sort)
- '2' is using sorting with ascending order (ONLINE > SNMP > PROTECTED > OFFLINE > UNKNOWN)
- '3' is using sorting with descending order (UNKNOWN > OFFLINE > PROTECTED > SNMP > ONLINE)
SNMP
^^^^
As an experimental feature, if Glances server is not detected by the
client, the latter will try to grab stats using the ``SNMP`` protocol:
.. code-block:: console
client$ glances -c @snmpserver
.. note::
Stats grabbed by SNMP request are limited and OS dependent.
A SNMP server should be installed and configured...
IPv6
^^^^
Glances is ``IPv6`` compatible. Just use the ``-B ::`` option to bind to
all IPv6 addresses.
Web Server Mode
---------------
.. image:: _static/screenshot-web.png
If you want to remotely monitor a machine, called ``server``, from any
device with a web browser, just run the server with the ``-w`` option:
.. code-block:: console
server$ glances -w
then on the client enter the following URL in your favorite web browser:
::
http://@server:61208
where ``@server`` is the IP address or hostname of the server.
To change the refresh rate of the page, just add the period in seconds
at the end of the URL. For example, to refresh the page every ``10``
seconds:
::
http://@server:61208/10
The Glances web interface follows responsive web design principles.
Here's a screenshot from Chrome on Android:
.. image:: _static/screenshot-web2.png
How to protect your server (or Web server) with a login/password ?
------------------------------------------------------------------
You can set a password to access to the server using the ``--password``.
By default, the login is ``glances`` but you can change it with
``--username``.
If you want, the SHA password will be stored in ``<login>.pwd`` file (in
the same folder where the Glances configuration file is stored, so
~/.config/glances/ on GNU Linus operating system).
Next time your run the server/client, password will not be asked. To set a
specific username you can used the -u <username> option.
It is also possible to set the default password in the Glances configuration
file:
.. code-block:: ini
[passwords]
# Define the passwords list
# Syntax: host=password
# Where: host is the hostname
# password is the clear password
# Additionally (and optionally) a default password could be defined
localhost=mylocalhostpassword
default=mydefaultpassword
| PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/cache/backend.py | import hashlib
import hmac
import io
import logging
import pickle
import re
import zlib
from django.conf import settings
from django.core.cache import cache
from django.contrib.sites.models import Site
from django.utils.encoding import force_bytes
from djblets.cache.errors import MissingChunkError
from djblets.secrets.crypto import (aes_decrypt,
aes_decrypt_iter,
aes_encrypt,
aes_encrypt_iter,
get_default_aes_encryption_key)
logger = logging.getLogger(__name__)
DEFAULT_EXPIRATION_TIME = 60 * 60 * 24 * 30 # 1 month
CACHE_CHUNK_SIZE = 2 ** 20 - 1024 # almost 1M (memcached's slab limit)
# memcached key size constraint (typically 250, but leave a few bytes for the
# large data handling)
MAX_KEY_SIZE = 240
_INVALID_KEY_CHARS_RE = re.compile(r'[\x00-\x20\x7f]')
_NO_RESULTS = object()
_default_expiration = getattr(settings, 'CACHE_EXPIRATION_TIME',
DEFAULT_EXPIRATION_TIME)
class _CacheContext:
"""State and functions for performing a cache-related operation.
This is created internally by :py:func:`cache_memoize` and
:py:func:`cache_memoize_iter`, and ensures consistency in cache key
names, timeouts, and state storage, simplifying the operations
responsible for loading and saving complex cache data.
Version Added:
3.0
"""
def __init__(self, cache, base_cache_key, expiration, compress_large_data,
use_encryption, encryption_key):
"""Initialize the context.
Args:
cache (object):
The Django cache connection that all operations will work
on. This is considered opaque.
base_cache_key (str):
The base cache key for caching operations.
This is the value passed in to ::py:func:`cache_memoize` or
:py:func:`cache_memoize_iter`, and will be used when
constructing related cache keys.
expiration (int):
The expiration time in seconds for all data cached using
this context.
compress_large_data (bool):
Whether large data will be compressed.
use_encryption (bool):
Whether to use encryption when storing or reading data.
This defaults to ``False``, but can be forced on for all
cached data by setting
``settings.DJBLETS_CACHE_FORCE_ENCRYPTION=True``.
encryption_key (bytes or str):
An explicit AES encryption key to use when passing
``use_encryption=True``.
This defaults to the value in
``settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY``, if set, or
to the default AES encryption key for the server as provided by
:py:func:`djblets.secrets.crypto.
get_default_aes_encryption_key`.
"""
if use_encryption is None:
use_encryption = _get_default_use_encryption()
if use_encryption:
if encryption_key:
assert isinstance(encryption_key, bytes)
else:
encryption_key = _get_default_encryption_key()
else:
encryption_key = None
self.cache = cache
self.expiration = expiration
self.full_cache_key = None
self.base_cache_key = base_cache_key
self.use_encryption = use_encryption
self.encryption_key = encryption_key
self.compress_large_data = compress_large_data
self.full_cache_key = self.make_key(base_cache_key)
def make_key(self, key):
"""Return a full cache key from the provided base key.
Args:
key (str):
The base cache key to make a full key from.
Returns:
str:
The full cache key.
"""
if key == self.base_cache_key and self.full_cache_key:
return self.full_cache_key
return make_cache_key(key,
use_encryption=self.use_encryption,
encryption_key=self.encryption_key)
def make_subkey(self, suffix):
"""Return a full cache key combining the main key and a suffix.
The key will be built in the form of :samp:`{mainkey}-{suffix}`, and
will then be converted into a full cache key.
Args:
suffix (int or str):
The suffix to append to the cache key.
Returns:
str:
The full cache key in the form of ``<mainkey>-<suffix>``.
"""
return self.make_key('%s-%s' % (self.base_cache_key, suffix))
def prepare_value(self, full_cache_key, value):
"""Prepare a value for storage in cache.
If using encryption, this will serialize and encrypt the value.
Otherwise this returns the value as-is.
Args:
full_cache_key (str):
The full cache key where this value will be stored.
value (object):
The value to cache.
Returns:
object:
The prepared value.
Raises:
Exception:
An error occurred pickling or encrypting the value. The
The exception is logged and then raised as-is.
"""
if self.use_encryption:
# Normally Django's cache backends will handle pickling of data,
# so what we set is what we get. That doesn't work when we encrypt
# data, though, since the type information will be lost. Plus,
# we need to be able to give a bytestring representation to
# aes_encrypt().
#
# So we instead pickle the data before encrypting it, and then we
# unpickle after we decrypt it later.
try:
value = pickle.dumps(value, protocol=0)
except Exception as e:
logger.error('Failed to serialize data for cache key "%s": %s',
full_cache_key, e)
raise
try:
value = aes_encrypt(value, key=self.encryption_key)
except Exception as e:
logger.error('Failed to encrypt data for cache for key '
'%s: %s.',
full_cache_key, e)
raise
return value
def load_value(self, key=None):
"""Load a value from cache.
This will take care of converting the provided key to a full cache
key, and decrypting and deserializing the value if needed.
Args:
key (str):
The full cache key to load from cache.
If not provided, the full main cache key will be used.
Returns:
object:
The value from the cache, or :py:data:`_NO_RESULTS` if not found.
Raises:
Exception:
An error occurred reading from cache or processing results.
The exception is raised as-is.
"""
if key is None:
key = self.full_cache_key
value = self.cache.get(key, _NO_RESULTS)
if value is not _NO_RESULTS and self.use_encryption:
try:
value = aes_decrypt(value, key=self.encryption_key)
except Exception as e:
logger.error('Failed to decrypt data from cache for '
'key %s: %s.',
key, e)
raise
try:
value = pickle.loads(value)
except Exception as e:
logger.warning('Failed to deserialize data from cache for '
'key %s: %s.',
key, e)
raise
return value
def store_value(self, value, *, key=None, raw=False):
"""Store a value in cache.
This will take care of preparing any values to be stored, if needed.
Args:
value (object):
The value to store in cache.
key (str, optional):
The full cache key to save to cache.
If not provided, the full main cache key will be used.
raw (bool, optional):
Whether to store the value directly without modifications.
Raises:
Exception:
An error occurred preparing the value or writing to cache.
The exception is raised as-is.
"""
if key is None:
key = self.full_cache_key
if not raw:
value = self.prepare_value(key, value)
self.cache.set(key, value,
timeout=self.expiration)
def store_many(self, items):
"""Store many items directly to cache.
All keys should be full cache keys, and all values should be prepared
already.
Args:
items (dict):
A dictionary of base cache keys to raw values to store.
Raises:
Exception:
An error occurred preparing the value or writing to cache.
The exception is raised as-is.
"""
self.cache.set_many(items,
timeout=self.expiration)
def _get_default_use_encryption():
"""Return whether encryption should be enabled by default.
This will dynamically check whether encryption should be allowed, based
on the ``settings.DJBLETS_CACHE_FORCE_ENCRYPTION`` setting. If not set,
this defaults to ``False``.
Returns:
bool:
Whether encryption should be used by default.
"""
return getattr(settings, 'DJBLETS_CACHE_FORCE_ENCRYPTION', False)
def _get_default_encryption_key():
"""Return the default AES encryption key for caching.
This will return the default encryption key used for caching, using
``settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY`` if set to a valid
AES encryption key, and falling back to
:py:func:`djblets.secrets.crypto.get_default_aes_encryption_key`.
Returns:
bytes:
The default encryption key.
"""
key = getattr(settings, 'DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY', None)
if key:
if not isinstance(key, bytes):
key = key.encode('utf-8')
settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY = key
else:
key = get_default_aes_encryption_key()
return key
def _cache_fetch_large_data(cache_context, chunk_count):
"""Fetch large data from the cache.
The main cache key indicating the number of chunks will be read, followed
by each of the chunks. If any chunks are missing, a
:py:class:`~djblets.cache.errors.MissingChunkError` will be immediately
raised.
The data is then combined and optionally uncompressed, and returned to
the caller. The caller should iterate through the results using
:py:func:`_cache_iter_large_data`.
Version Changed:
3.0:
* Updated to take ``cache_context`` instead of additional arguments,
and ``chunk_count`` to save a cache hit.
* Added support for key/value encryption.
Args:
cache_context (_CacheContext):
The caching operation context.
chunk_count (int):
The number of chunks to fetch.
Returns:
bytes:
The complete fetched data, ready for deserializing.
Raises:
djblets.cache.errors.MissingChunkError:
A chunk of data was missing. All cached data for the key is
invalid.
Exception:
An error occurred reading from cache or processing data. The
exception is raised as-is.
"""
# Fetch all the chunks at once.
chunk_keys = [
cache_context.make_subkey(i)
for i in range(chunk_count)
]
chunks = cache_context.cache.get_many(chunk_keys)
# Check that we have all the keys we expect, before we begin generating
# values. We don't want to waste effort loading anything, and we want to
# pass an error about missing keys to the caller up-front before we
# generate anything.
if len(chunks) != chunk_count:
missing_keys = sorted(set(chunk_keys) - set(chunks.keys()))
missing_keys_str = ', '.join(missing_keys)
logger.debug('Cache miss for key(s): %s.',
missing_keys_str)
raise MissingChunkError
# Process all the chunks.
data = (
chunks[chunk_key][0]
for chunk_key in chunk_keys
)
if cache_context.use_encryption:
# We can iteratively decrypt these and build the results into a
# single string below for optional decompression.
data = aes_decrypt_iter(data, key=cache_context.encryption_key)
data = b''.join(data)
# Decompress them all at once, instead of streaming the results. It's
# faster for any reasonably-sized data in cache. We'll stream depickles
# instead.
if cache_context.compress_large_data:
data = zlib.decompress(data)
return data
def _cache_iter_large_data(cache_context, data):
"""Iterate through large data that was fetched from the cache.
This will unpickle the large data previously fetched through
:py:func:`_cache_fetch_large_data`, and yield each object to the caller.
Version Changed:
3.0:
* Updated to take ``cache_context`` instead of additional arguments.
* Added support for key/value encryption.
Args:
cache_context (_CacheContext):
The caching operation context.
data (bytes):
The combined data fetched from cache.
Yields:
object:
Each value from cache.
Raises:
Exception:
An error occurred processing data. The exception is logged and
then raised as-is.
"""
fp = io.BytesIO(data)
try:
# Unpickle all the items we're expecting from the cached data.
#
# There will only be one item in the case of old-style cache data.
while True:
try:
yield pickle.load(fp)
except EOFError:
return
except Exception as e:
logger.warning('Unpickle error for cache key "%s": %s.',
cache_context.full_cache_key, e)
raise
def _cache_compress_pickled_data(items):
"""Compress lists of items for storage in the cache.
This works with generators, and will take each item in the list or
generator of items, zlib-compress the data, and store it in a buffer.The
item and a blob of compressed data will be yielded to the caller.
Args:
items (generator of tuple):
The generator of item tuples prepared in
:py:func:`_cache_store_items`.
Each is in the form of:
1. Byte string to compress
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
Yields:
tuple:
An item tuple, but with the first entry containing compressed data.
Each is in the form of:
1. Byte string containing compressed data
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
"""
compressor = zlib.compressobj()
for data, has_item, item in items:
yield compressor.compress(data), has_item, item
remaining = compressor.flush()
if remaining:
yield remaining, False, None
def _cache_encrypt_data(items, encryption_key):
"""Iteratively encrypt data from items tuples.
Version Added:
3.0
Args:
items (generator of tuple):
The generator of item tuples prepared in
:py:func:`_cache_store_items`.
Each is in the form of:
1. Byte string to encrypt
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
encryption_key (bytes, optional):
The AES encryption key to use.
Yields:
tuple:
Each is in the form of:
1. Byte string containing encrypted data
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
"""
# We're working with a generator yielding tuples containing items.
# The first entry in each tuple is a piece of the data we want to
# encrypt. We want to encrypt iteratively, and we don't want to have
# to load the entire results of the generator into memory at once.
#
# So we have to feed the first index of each tuple iteratively into
# aes_encrypt_iter as a generator, but we also want to retain the other
# data in each tuple and yield it along with the encrypted results.
#
# There isn't a one-to-one mapping between what goes in and what comes
# out, particularly when dealing with compression.
#
# So here's the plan of attack:
#
# 1. We feed data into aes_encrypt_iter(), and keep a queue of the item
# data (has_item and item).
#
# 2. As we receive results, we pull off the front of the queue and yield
# it along with the data.
#
# We then go through anything else in the queue (one item coming
# in could result in multiple coming out). Those yield those
# values, but without any data scheduled for storage, just scheduled
# to yield as results.
#
# 3. If there was nothing in the queue when we get data, then yield the
# data but with has_item=False, so it'll be stored but not yielded as
# results.
item_queue = []
def _gen_data():
for data, has_item, item in items:
item_queue.append((has_item, item))
yield data
for data in aes_encrypt_iter(_gen_data(),
key=encryption_key):
if item_queue:
# There were items in the queue. Yield the first with the data,
# and anything else without (those will be returned as results
# but won't be stored in cache).
yield (data,) + item_queue[0]
for item in item_queue[1:]:
yield (None,) + item
else:
# The queue is empty. Yield what data we have for caching, but
# don't yield for results.
yield data, False, None
item_queue.clear()
def _cache_store_chunks(cache_context, items):
"""Store a list of items as chunks in the cache.
The list of items will be combined into chunks and stored in the
cache as efficiently as possible. Each item in the list will be
yielded to the caller as it's fetched from the list or generator.
Version Changed:
3.0:
* Updated to take ``cache_context`` instead of additional arguments.
* Added support for encrypting keys and data through the
``use_encryption`` and ``encryption_key`` arguments.
Args:
cache_context (_CacheContext):
The caching operation context.
items (generator of tuple):
The generator of item tuples prepared in
:py:func:`_cache_store_items`.
Each is in the form of:
1. Byte string to store in cache
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
Yields:
object:
Each chunk of original, unmodified item data being cached.
"""
chunks_data = io.BytesIO()
chunks_data_len = 0
read_start = 0
i = 0
for data, has_item, item in items:
if has_item:
yield item
if not data:
continue
chunks_data.write(data)
chunks_data_len += len(data)
if chunks_data_len > CACHE_CHUNK_SIZE:
# We have enough data to fill a chunk now. Start processing
# what we've stored and create cache keys for each chunk.
# Anything remaining will be stored for the next round.
chunks_data.seek(read_start)
cached_data = {}
while chunks_data_len > CACHE_CHUNK_SIZE:
chunk = chunks_data.read(CACHE_CHUNK_SIZE)
chunk_len = len(chunk)
chunks_data_len -= chunk_len
read_start += chunk_len
# Note that we wrap the chunk in a list so that the cache
# backend won't try to perform any conversion on the string.
cached_data[cache_context.make_subkey(i)] = [chunk]
i += 1
# Store the keys in the cache in a single request.
cache_context.store_many(cached_data)
# Reposition back at the end of the stream.
chunks_data.seek(0, io.SEEK_END)
if chunks_data_len > 0:
# There's one last bit of data to store. Note that this should be
# less than the size of a chunk,
assert chunks_data_len <= CACHE_CHUNK_SIZE
chunks_data.seek(read_start)
chunk = chunks_data.read()
cache_context.store_value([chunk],
key=cache_context.make_subkey(i),
raw=True)
i += 1
# Store the final count.
cache_context.store_value('%d' % i)
def _cache_store_items(cache_context, items):
"""Store items in the cache.
The items will be individually pickled and combined into a binary blob,
which can then optionally be compressed. The resulting data is then
cached over one or more keys, each representing a chunk about 1MB in size.
A main cache key will be set that contains information on the other keys.
Version Changed:
3.0:
* Updated to take ``cache_context`` instead of additional arguments.
* Added support for encrypting keys and data through the
``use_encryption`` and ``encryption_key`` arguments.
Args:
cache_context (_CacheContext):
The caching operation context.
items (generator):
The generator of data to store.
Yields:
tuple:
Each is in the form of:
1. Byte string to store in the cache
2. Boolean indicating if the raw data represents valid data to
ultimately yield to the caller in :py:func:`cache_memoize_iter`
3. Raw data to yield back to the caller
"""
# Note that we want to use pickle protocol 0 in order to be compatible
# across both Python 2 and 3. On Python 2, 0 is the default.
results = (
(pickle.dumps(item, protocol=0), True, item)
for item in items
)
if cache_context.compress_large_data:
results = _cache_compress_pickled_data(results)
if cache_context.use_encryption:
# Note that the order of operations here is important. We must
# compress and *then* encrypt. Encrypted AES data cannot be
# compressed.
results = _cache_encrypt_data(
results,
encryption_key=cache_context.encryption_key)
yield from _cache_store_chunks(cache_context=cache_context,
items=results)
def cache_memoize_iter(key, items_or_callable,
expiration=_default_expiration,
force_overwrite=False,
compress_large_data=True,
use_encryption=None,
encryption_key=None):
"""Memoize an iterable list of items inside the configured cache.
If the provided list of items is a function, the function must return a
an iterable object, such as a list or a generator.
If a generator is provided, directly or through a function, then each
item will be immediately yielded to the caller as they're retrieved, and
the cached entries will be built up as the items are processed.
The data is assumed to be big enough that it must be pickled,
optionally compressed, and stored as chunks in the cache.
Data can be encrypted using AES encryption, for safe storage of potentially
sensitive information or state. This adds to the processing time slightly,
but can be important for cache keys containing sensitive information or
that impact access control, particularly in the event that the cache is
compromised or shared between services.
The result from this function is always a generator. Note that it's
important that the generator be allowed to continue until completion, or
the data won't be retrievable from the cache.
Version Changed:
3.0:
Added support for encrypting keys and data through the
``use_encryption`` and ``encryption_key`` arguments.
Args:
key (unicode):
The key to use in the cache.
items_or_callable (list or callable):
A list of items or callable returning a list of items to cache,
if the key is not already found in cache.
expiration (int):
The expiration time for the key, in seconds.
force_overwrite (bool):
If ``True``, the value will always be computed and stored
regardless of whether it exists in the cache already.
compress_large_data (bool):
If ``True``, the data will be zlib-compressed.
use_encryption (bool, optional):
Whether to use encryption when storing or reading data.
If reading data, and if the data cannot be decrypted with the
given key, then the data will be considered to have fallen out
of cache.
This defaults to ``False``, but can be forced on for all cached
data by setting ``settings.DJBLETS_CACHE_FORCE_ENCRYPTION=True``.
Version Added:
3.0
encryption_key (bytes or str, optional):
An explicit AES encryption key to use when passing
``use_encryption=True``.
This defaults the value in
``settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY``, if set, or to
the default AES encryption key for the server as provided by
:py:func:`djblets.secrets.crypto.get_default_aes_encryption_key`.
Version Added:
3.0
Yields:
object:
The list of items from the cache or from ``items_or_callable`` if
uncached.
"""
cache_context = _CacheContext(
cache=cache,
base_cache_key=key,
expiration=expiration,
compress_large_data=compress_large_data,
use_encryption=use_encryption,
encryption_key=encryption_key)
full_cache_key = cache_context.full_cache_key
results = _NO_RESULTS
if not force_overwrite:
chunk_count = cache_context.load_value()
if chunk_count is not _NO_RESULTS:
try:
results = _cache_iter_large_data(
cache_context=cache_context,
data=_cache_fetch_large_data(
cache_context=cache_context,
chunk_count=int(chunk_count)))
except Exception as e:
logger.warning('Failed to fetch large data from cache for '
'key "%s": %s',
full_cache_key, e)
results = _NO_RESULTS
else:
logger.debug('Cache miss for key "%s"' % full_cache_key)
if results is _NO_RESULTS:
if callable(items_or_callable):
items = items_or_callable()
else:
items = items_or_callable
results = _cache_store_items(cache_context=cache_context,
items=items)
yield from results
def cache_memoize(key,
lookup_callable,
expiration=_default_expiration,
force_overwrite=False,
large_data=False,
compress_large_data=True,
use_generator=False,
use_encryption=None,
encryption_key=None):
"""Memoize the results of a callable inside the configured cache.
Data can be encrypted using AES encryption, for safe storage of potentially
sensitive information or state. This adds to the processing time slightly,
but can be important for cache keys containing sensitive information or
that impact access control, particularly in the event that the cache is
compromised or shared between services.
Version Changed:
3.0:
Added support for encrypting keys and data through the
``use_encryption`` and ``encryption_key`` arguments.
Version Changed:
2.2.4:
Added support for non-iterable value types.
Args:
key (unicode):
The key to use in the cache.
lookup_callable (callable):
A callable to execute in the case where the data did not exist in
the cache.
expiration (int):
The expiration time for the key, in seconds.
force_overwrite (bool):
If ``True``, the value will always be computed and stored
regardless of whether it exists in the cache already.
large_data (bool):
If ``True``, the resulting data will be pickled, gzipped, and
(potentially) split up into megabyte-sized chunks. This is useful
for very large, computationally intensive hunks of data which we
don't want to store in a database due to the way things are
accessed.
compress_large_data (bool):
Compresses the data with zlib compression when ``large_data``
is ``True``.
use_generator (bool, deprecated):
This parameter is no longer used and will be removed in Djblets
3.0.
use_encryption (bool, optional):
Whether to use encryption when storing or reading data.
If reading data, and if the data cannot be decrypted with the
given key, then the data will be considered to have fallen out
of cache.
This defaults to ``False``, but can be forced on for all cached
data by setting ``settings.DJBLETS_CACHE_FORCE_ENCRYPTION=True``.
Version Added:
3.0
encryption_key (bytes or str, optional):
An explicit AES encryption key to use when passing
``use_encryption=True``.
This defaults the value in
``settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY``, if set, or to
the default AES encryption key for the server as provided by
:py:func:`djblets.secrets.crypto.get_default_aes_encryption_key`.
Version Added:
3.0
Returns:
object:
The cached data, or the result of ``lookup_callable`` if uncached.
"""
if large_data:
results = list(cache_memoize_iter(
key,
lambda: [lookup_callable()],
expiration=expiration,
force_overwrite=force_overwrite,
compress_large_data=compress_large_data,
use_encryption=use_encryption,
encryption_key=encryption_key))
assert len(results) == 1
return results[0]
else:
cache_context = _CacheContext(
cache=cache,
base_cache_key=key,
expiration=expiration,
compress_large_data=compress_large_data,
use_encryption=use_encryption,
encryption_key=encryption_key)
full_cache_key = cache_context.full_cache_key
if not force_overwrite:
try:
result = cache_context.load_value(full_cache_key)
except Exception:
# We've already logged this. Proceed to generate new data.
pass
if result is not _NO_RESULTS:
return result
else:
logger.debug('Cache miss for key "%s"' % full_cache_key)
data = lookup_callable()
# Most people will be using memcached, and memcached has a limit of
# 1MB. Data this big should be broken up somehow, so let's warn
# about this. Users should hopefully be using large_data=True which
# will handle this appropriately.
#
# If we do get here, we try to do some sanity checking.
# python-memcached will return a result in the case where the data
# exceeds the value size, which Django will then silently use to clear
# out the key. We won't know at all whether we had success unless we
# come back and try to verify the value.
#
# This check handles the common case of large string data being stored
# in cache. It's still possible to attempt to store large data
# structures (where len(data) might be something like '6' but the
# serialized value is huge), where this can still fail.
if (isinstance(data, str) and
len(data) >= CACHE_CHUNK_SIZE):
logger.warning('Cache data for key "%s" (length %s) may be too '
'big for the cache.',
full_cache_key, len(data))
try:
cache_context.store_value(data, key=full_cache_key)
except Exception as e:
logger.error('Unable to store cached data in key "%s": %s',
full_cache_key, e)
return data
def make_cache_key(key, use_encryption=None, encryption_key=None):
"""Create a cache key guaranteed to avoid conflicts and size limits.
The cache key will be prefixed by the site's domain, and will be
changed to an SHA256 hash if it's larger than the maximum key size or
contains characters not compatible with the cache backend.
Version Changed:
3.0:
* Added support for encrypting keys through the
``use_encryption`` and ``encryption_key`` arguments.
* Changed the hash format for keys to use SHA256 instead of MD5.
This will invalidate all old keys in cache, but reduces chances
of collision.
* Keys will automatically use the hash format if they contain
characters unsupported by the cache backend.
* The return type is now :py:class:`str`, to generate keys that are
more suitable for modern versions of Django.
Args:
key (str):
The base key to generate a cache key from.
use_encryption (bool, optional):
Whether to generate an encrypted key.
This will generate a HMAC digest of the key. There will be no
identifying information in the resulting key.
This defaults to ``False``, but can be forced on for all cached
data by setting ``settings.DJBLETS_CACHE_FORCE_ENCRYPTION=True``.
Version Added:
3.0
encryption_key (bytes, optional):
An explicit AES encryption key to use when passing
``use_encryption=True``.
This defaults the value in
``settings.DJBLETS_CACHE_DEFAULT_ENCRYPTION_KEY``, if set, or to
the default AES encryption key for the server as provided by
:py:func:`djblets.secrets.crypto.get_default_aes_encryption_key`.
Version Added:
3.0
Returns:
str:
A cache key suitable for use with the cache backend.
"""
if use_encryption is None:
use_encryption = _get_default_use_encryption()
try:
site = Site.objects.get_current()
# The install has a Site app, so prefix the domain to the key.
# If a SITE_ROOT is defined, also include that, to allow for multiple
# instances on the same host.
site_root = getattr(settings, 'SITE_ROOT', None)
if site_root:
key = '%s:%s:%s' % (site.domain, site_root, key)
else:
key = '%s:%s' % (site.domain, key)
except Exception:
# The install doesn't have a Site app, so use the key as-is.
pass
if use_encryption:
if encryption_key:
assert isinstance(encryption_key, bytes)
else:
encryption_key = _get_default_encryption_key()
# Construct a HMAC digest of the key using the encryption key.
# The result will be a SHA256 hash.
key = hmac.new(
encryption_key,
msg=force_bytes(key),
digestmod=hashlib.sha256).hexdigest()
else:
# Normalize any invalid characters in the key.
key = _INVALID_KEY_CHARS_RE.sub(lambda m: '\\x%02x' % ord(m.group(0)),
key)
if len(key) > MAX_KEY_SIZE:
digest = hashlib.sha256(key.encode('utf-8')).hexdigest()
# Replace the excess part of the key with a digest of the key
key = key[:MAX_KEY_SIZE - len(digest)] + digest
return key | PypiClean |
/Dandelion-0.17.26-py3-none-any.whl/dandelion/ctc_theano.py | __author__ = 'dawei.leng'
__version__ = '1.48'
"""
Another CTC implemented in theano.
The `cost()` function of `CTC_Timescale` and `CTC_Logscale` classes return the average NLL over a batch samples given query sequences and score matrices.
This implementation features:
1) batch / mask supported.
2) speed comparable with (~35% slower than) the numba implementation which is the fastest by now.
Created : 12, 10, 2015
Revised : 8, 3, 2016 ver 1.46
: 1, 10, 2017 ver 1.47 fix wrong 'static_method' decorators.
: 3, 20, 2017 ver 1.48 add support for alignment {'pre'/'post'} for `CTC_Logscale` class
Reference : [1] Alex Graves, etc., Connectionist temporal classification: labelling unsegmented sequence data with
recurrent neural networks, ICML, 2006
[2] Alex Graves, Supervised sequence labelling with recurrent neural networks, 2014
[3] Lawrence R. Rabiner, A tutorial on hidden Markov models and selected applications in speech recognition,
Proceedings of the IEEE, 1989
[4] Maas Andrew, etc., https://github.com/amaas/stanford-ctc/blob/master/ctc_fast/ctc-loss/ctc_fast.pyx
[5] Mohammad Pezeshki, https://github.com/mohammadpz/CTC-Connectionist-Temporal-Classification/blob/master/ctc_cost.py
[6] Shawn Tan, https://github.com/shawntan/rnn-experiment/blob/master/CTC.ipynb
"""
import theano
from theano import tensor
from theano.ifelse import ifelse
floatX = theano.config.floatX
class CTC_Timescale(object):
"""
Compute CTC cost using time normalization instead of log scale computation.
Batch supported.
To compute the batch cost, use .cost() function below.
Speed slower than the numba & cython version (~6min vs ~3.9min on word_correction_CTC experiment), much faster than
the following non-batch version ctc_path_probability().
B: BATCH_SIZE
L: query sequence length (maximum length of a batch)
C: class number
T: time length (maximum time length of a batch)
"""
@classmethod
def cost(self, queryseq, scorematrix, queryseq_mask=None, scorematrix_mask=None, blank_symbol=None, align='pre'):
"""
Compute CTC cost, using only the forward pass
:param queryseq: (L, B)
:param scorematrix: (T, C+1, B)
:param queryseq_mask: (L, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: scalar, = C by default
:return: negative log likelihood averaged over a batch
"""
if blank_symbol is None:
# blank_symbol = scorematrix.shape[1] - 1
blank_symbol = tensor.cast(scorematrix.shape[1], floatX) - 1.0
queryseq_padded, queryseq_mask_padded = self._pad_blanks(queryseq, blank_symbol, queryseq_mask)
results = self.path_probability(queryseq_padded, scorematrix, queryseq_mask_padded, scorematrix_mask, blank_symbol, align)
NLL = -results[1][-1] # negative log likelihood
NLL_avg = tensor.mean(NLL) # batch averaged NLL, used as cost
# if scorematrix_mask is not None:
# sm = scorematrix * scorematrix_mask.dimshuffle(0, 'x', 1)
# else:
# sm = scorematrix
# penalty = tensor.log(sm[:, blank_symbol, :].sum())
return NLL_avg
@classmethod
def path_probability(self, queryseq_padded, scorematrix, queryseq_mask_padded=None, scorematrix_mask=None, blank_symbol=None):
"""
Compute p(l|x) using only the forward variable
:param queryseq_padded: (2L+1, B)
:param scorematrix: (T, C+1, B)
:param queryseq_mask_padded: (2L+1, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: = C by default
:return:
"""
if blank_symbol is None:
# blank_symbol = scorematrix.shape[1] - 1
blank_symbol = tensor.cast(scorematrix.shape[1], floatX) - 1.0
if queryseq_mask_padded is None:
queryseq_mask_padded = tensor.ones_like(queryseq_padded, dtype=floatX)
pred_y = self._class_batch_to_labeling_batch(queryseq_padded, scorematrix, scorematrix_mask) # (T, 2L+1, B), reshaped scorematrix
r2, r3 = self._recurrence_relation(queryseq_padded, queryseq_mask_padded, blank_symbol) # r2 (2L+1, 2L+1), r3 (2L+1, 2L+1, B)
def step(p_curr, p_prev, LLForward, countdown, r2, r3, queryseq_mask_padded):
"""
[DV, 1-14-2016]: A very weird problem encountered when integrating this CTC implementation into Keras. Before this revision
there were no input parameters (r2, r3, queryseq_mask_padded) specified, they just referred to the outer scope ones.
However, this will cause the CTC integrated within Keras producing inaccurate loss value, meanwhile when compiled
as a separate function, the returned ctc loss value is accurate anyway. But if with these 3 parameters added as
input, the problem vanished. This took me two days to find this remedy. I suspect this'd be the bug of theano.
:param p_curr: (2L+1, B), one column of scorematrix
:param p_prev: (B, 2L+1)
:param LLForward: (B, 1)
:param countdown: scalar
:param r2: (2L+1, 2L+1)
:param r3: (2L+1, 2L+1, B)
:param queryseq_mask_padded: (2L+1, B)
:return:
"""
dotproduct = (p_prev + tensor.dot(p_prev, r2) + # tensor.dot(p_prev, r2) = alpha(t-1, u-1)
(p_prev.dimshuffle(1, 'x', 0) * r3).sum(axis=0).T) # = alpha(t-1, u-2) conditionally
p_curr = p_curr.T * dotproduct
if queryseq_mask_padded is not None:
p_curr *= queryseq_mask_padded.T # (B, 2L+1) * (B, 2L+1) * (B, 2L+1) = (B, 2L+1)
start = tensor.max([0, queryseq_padded.shape[0] - 2 * countdown])
mask = tensor.concatenate([tensor.zeros([queryseq_padded.shape[1], start]),
tensor.ones([queryseq_padded.shape[1], queryseq_padded.shape[0] - start])], axis=1)
p_curr *= mask
c_batch = p_curr.sum(axis=1, keepdims=True) # (B, 1)
p_curr /= c_batch
LLForward += tensor.log(c_batch)
countdown -= 1
return p_curr, LLForward, countdown # (B, 2L+1), (B, 1), scalar
results, _ = theano.scan(
step,
sequences=[pred_y], # scan only work on the first dimension
outputs_info=[tensor.eye(queryseq_padded.shape[0])[0] * tensor.ones(queryseq_padded.T.shape),
tensor.unbroadcast(tensor.zeros([queryseq_padded.shape[1], 1]), 1), scorematrix.shape[0]],
non_sequences=[r2, r3, queryseq_mask_padded])
return results
@classmethod
def best_path_decode(self, scorematrix, scorematrix_mask=None, blank_symbol=None):
"""
Computes the best path by simply choosing most likely label at each timestep
:param scorematrix: (T, C+1, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: = C by default
:return: resultseq (T, B), resultseq_mask(T, B)
Speed much slower than pure python version (normally ~40 times on HTR tasks)
"""
bestlabels = tensor.argmax(scorematrix, axis=1) # (T, B)
T, Cp, B = scorematrix.shape
resultseq, resultseq_mask = tensor.zeros([T, B], dtype=scorematrix.dtype)-1, tensor.zeros([T, B], dtype=scorematrix.dtype)
if blank_symbol is None:
# blank_symbol = Cp - 1.0
blank_symbol = tensor.cast(Cp, floatX) - 1.0
if scorematrix_mask is None:
scorematrix_mask = tensor.ones([T, B], dtype=scorematrix.dtype)
def step(labelseq, labelseq_mask, idx, resultseq, resultseq_mask, blank_symbol):
seqlen = tensor.cast(labelseq_mask.sum(), 'int32')
labelseq = self._remove_adjdup(labelseq[0:seqlen])
labelseq = self._remove_value(labelseq, blank_symbol)
seqlen2 = labelseq.size
resultseq = tensor.set_subtensor(resultseq[0:seqlen2, idx], labelseq)
resultseq_mask = tensor.set_subtensor(resultseq_mask[0:seqlen2, idx], tensor.ones_like(labelseq))
idx += 1
return idx, resultseq, resultseq_mask
outputs, updates = theano.scan(fn = step,
sequences=[bestlabels.T, scorematrix_mask.T],
outputs_info=[0, resultseq, resultseq_mask],
non_sequences=[blank_symbol],
name='decode_scan')
resultseq, resultseq_mask = outputs[1][-1], outputs[2][-1]
return resultseq, resultseq_mask
@classmethod
def calc_CER(self, resultseq, targetseq, resultseq_mask=None, targetseq_mask=None):
"""
Calculate the character error rate (CER) given ground truth 'targetseq' and CTC decoding output 'resultseq'
:param resultseq (T1, B)
:param resultseq_mask (T1, B)
:param targetseq (T2, B)
:param targetseq_mask (T2, B)
:return: CER scalar
"""
if resultseq_mask is None:
resultseq_mask = tensor.ones_like(resultseq)
if targetseq_mask is None:
targetseq_mask = tensor.ones_like(targetseq)
def step(result_seq, target_seq, result_seq_mask, target_seq_mask, TE, TG):
L1 = tensor.cast(result_seq_mask.sum(), 'int32')
L2 = tensor.cast(target_seq_mask.sum(), 'int32')
d = self._editdist(result_seq[0:L1], target_seq[0:L2])
TE += d
TG += target_seq_mask.sum()
return TE, TG
outputs, updates = theano.scan(fn=step,
sequences=[resultseq.T, targetseq.T, resultseq_mask.T, targetseq_mask.T],
outputs_info=[tensor.zeros(1), tensor.zeros(1)],
name='calc_CER')
TE, TG = outputs[0][-1], outputs[1][-1]
CER = TE/TG
return CER, TE, TG
@staticmethod
def _remove_value(x, value):
"""
Remove certain valued elements from a vector
x: vector (must); value: scalar
return a vector with all elements = 'value' removed
"""
return (x - value).nonzero_values() + value
@staticmethod
def _remove_adjdup(x):
"""
Remove adjacent duplicate items of a vector
x: vector
return a vector with adjacent duplicate items removed, for example [1,2,2,2,3,3,4] -> [1,2,3,4]
"""
def update(x, nondup, idx):
nondup = tensor.switch(tensor.eq(nondup[idx], x), nondup, tensor.set_subtensor(nondup[idx + 1], x)) # tensor.switch is much faster than ifelse
idx = tensor.switch(tensor.eq(nondup[idx], x), idx, idx + 1)
return nondup, idx
nondup = x
idx = tensor.as_tensor_variable(0)
idx = tensor.cast(idx, 'int32')
result, updates = theano.scan(fn = update, sequences=x, outputs_info=[nondup, idx], name='remove_adjdup')
nondup = result[0][-1]
idx = result[1][-1]
return nondup[0:idx+1]
@staticmethod
def _editdist(s, t):
"""
Levenshtein's edit distance function
:param s: vector, source string
:param t: vector, target string
:return: edit distance, scalar
"""
def update(x, previous_row):
current_row = previous_row + 1
current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], tensor.add(previous_row[:-1], tensor.neq(target,x))))
current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], current_row[0:-1] + 1))
return current_row
source, target = ifelse(tensor.lt(s.shape[0], t.shape[0]), (t, s), (s, t))
previous_row = tensor.arange(target.size + 1, dtype=theano.config.floatX)
result, updates = theano.scan(fn=update, sequences=source, outputs_info=previous_row, name='editdist')
return result[-1,-1]
@staticmethod
def _pad_blanks(queryseq, blank_symbol, queryseq_mask=None):
"""
Pad queryseq and corresponding queryseq_mask with blank symbol
:param queryseq (L, B)
:param queryseq_mask (L, B)
:param blank_symbol scalar, must be float type!
:return queryseq_padded, queryseq_mask_padded, both with shape (2L+1, B)
"""
# for queryseq
queryseq_extended = queryseq.dimshuffle(1, 0, 'x') # (L, B) -> (B, L, 1)
blanks = tensor.zeros_like(queryseq_extended) + blank_symbol # (B, L, 1)
concat = tensor.concatenate([queryseq_extended, blanks], axis=2) # concat.shape = (B, L, 2)
res = concat.reshape((concat.shape[0], concat.shape[1] * concat.shape[2])).T # res.shape = (2L, B), the reshape will cause the last 2 dimensions interlace
begining_blanks = tensor.zeros((1, res.shape[1])) + blank_symbol # (1, B)
queryseq_padded = tensor.concatenate([begining_blanks, res], axis=0) # (1+2L, B)
# for queryseq_mask
if queryseq_mask is not None:
queryseq_mask_extended = queryseq_mask.dimshuffle(1, 0, 'x') # (L, B) -> (B, L, 1)
concat = tensor.concatenate([queryseq_mask_extended, queryseq_mask_extended], axis=2) # concat.shape = (B, L, 2)
res = concat.reshape((concat.shape[0], concat.shape[1] * concat.shape[2])).T
begining_blanks = tensor.ones((1, res.shape[1]), dtype=floatX)
queryseq_mask_padded = tensor.concatenate([begining_blanks, res], axis=0)
else:
queryseq_mask_padded = None
return queryseq_padded, queryseq_mask_padded
@staticmethod
def _class_batch_to_labeling_batch(queryseq_padded, scorematrix, scorematrix_mask=None):
"""
Convert dimension 'class' of scorematrix to 'label'
:param queryseq_padded: (2L+1, B)
:param scorematrix: (T, C+1, B)
:param scorematrix_mask: (T, B)
:return: (T, 2L+1, B)
"""
if scorematrix_mask is not None:
scorematrix = scorematrix * scorematrix_mask.dimshuffle(0, 'x', 1) # (T, C+1, B) * (T, 1, B)
batch_size = scorematrix.shape[2] # = B
res = scorematrix[:, queryseq_padded.astype('int32'), tensor.arange(batch_size)] # (T, 2L+1, B), indexing each row of scorematrix with queryseq_padded
return res
@staticmethod
def _recurrence_relation(queryseq_padded, queryseq_mask_padded=None, blank_symbol=None):
"""
Generate structured matrix r2 & r3 for dynamic programming recurrence
:param queryseq_padded: (2L+1, B)
:param queryseq_mask_padded: (2L+1, B)
:param blank_symbol: = C
:return: r2 (2L+1, 2L+1), r3 (2L+1, 2L+1, B)
"""
L2 = queryseq_padded.shape[0] # = 2L+1
blanks = tensor.zeros((2, queryseq_padded.shape[1])) + blank_symbol # (2, B)
ybb = tensor.concatenate((queryseq_padded, blanks), axis=0).T # (2L+3, B) -> (B, 2L+3)
sec_diag = tensor.neq(ybb[:, :-2], ybb[:, 2:]) * tensor.eq(ybb[:, 1:-1], blank_symbol) # (B, 2L+1)
if queryseq_mask_padded is not None:
sec_diag *= queryseq_mask_padded.T
r2 = tensor.eye(L2, k=1) # upper diagonal matrix (2L+1, 2L+1)
r3 = tensor.eye(L2, k=2).dimshuffle(0, 1, 'x') * sec_diag.dimshuffle(1, 'x', 0) # (2L+1, 2L+1, B)
return r2, r3
class CTC_Logscale(CTC_Timescale):
"""
This implementation uses log scale computation.
Batch supported. Note the log scale computation used to produce imprecise CTC cost in Theano (path probability).
[Credits to Mohammad Pezeshki, https://github.com/mohammadpz/CTC-Connectionist-Temporal-Classification]
B: BATCH_SIZE
L: query sequence length (maximum length of a batch)
C: class number
T: time length (maximum time length of a batch)
"""
@classmethod
def cost(self, queryseq, scorematrix, queryseq_mask=None, scorematrix_mask=None, blank_symbol=None, align='pre'):
"""
Compute CTC cost, using only the forward pass
:param queryseq: (L, B)
:param scorematrix: (T, C+1, B)
:param queryseq_mask: (L, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: scalar, = C by default
:param align: string, {'pre'/'post'}, indicating how input samples are aligned in one batch
:return: negative log likelihood averaged over a batch
"""
if blank_symbol is None:
# blank_symbol = scorematrix.shape[1] - 1.0
blank_symbol = tensor.cast(scorematrix.shape[1], floatX) - 1.0
queryseq_padded, queryseq_mask_padded = self._pad_blanks(queryseq, blank_symbol, queryseq_mask)
NLL, alphas = self.path_probability(queryseq_padded, scorematrix, queryseq_mask_padded, scorematrix_mask, blank_symbol, align)
NLL_avg = tensor.mean(NLL)
return NLL_avg
@classmethod
def path_probability(self, queryseq_padded, scorematrix, queryseq_mask_padded=None, scorematrix_mask=None, blank_symbol=None, align='pre'):
"""
Compute p(l|x) using only the forward variable and log scale
:param queryseq_padded: (2L+1, B)
:param scorematrix: (T, C+1, B)
:param queryseq_mask_padded: (2L+1, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: = C by default
:return:
"""
if blank_symbol is None:
# blank_symbol = scorematrix.shape[1] - 1.0
blank_symbol = tensor.cast(scorematrix.shape[1], floatX) - 1.0
if queryseq_mask_padded is None:
queryseq_mask_padded = tensor.ones_like(queryseq_padded, dtype=floatX)
if scorematrix_mask is None:
scorematrix_mask = tensor.ones([scorematrix.shape[0], scorematrix.shape[2]])
pred_y = self._class_batch_to_labeling_batch(queryseq_padded, scorematrix, scorematrix_mask) # (T, 2L+1, B), reshaped scorematrix
r2, r3 = self._recurrence_relation(queryseq_padded, queryseq_mask_padded, blank_symbol) # r2 (2L+1, 2L+1), r3 (2L+1, 2L+1, B)
def step(p_curr, p_prev):
p1 = p_prev
p2 = self._log_dot_matrix(p1, r2)
p3 = self._log_dot_tensor(p1, r3)
p123 = self._log_add(p3, self._log_add(p1, p2))
return p_curr.T + p123 + self._epslog(queryseq_mask_padded.T)
alphas, _ = theano.scan(
step,
sequences=[self._epslog(pred_y)],
outputs_info=[self._epslog(tensor.eye(queryseq_padded.shape[0])[0] * tensor.ones(queryseq_padded.T.shape))])
B = alphas.shape[1]
LL = tensor.sum(queryseq_mask_padded, axis=0, dtype='int32')
if align == 'pre':
TL = tensor.sum(scorematrix_mask, axis=0, dtype='int32')
NLL = -self._log_add(alphas[TL - 1, tensor.arange(B), LL - 1],
alphas[TL - 1, tensor.arange(B), LL - 2])
else: # align == 'post'
NLL = -self._log_add(alphas[-1, tensor.arange(B), LL - 1],
alphas[-1, tensor.arange(B), LL - 2])
return NLL, alphas
@staticmethod
def _epslog(x):
return tensor.cast(tensor.log(tensor.clip(x, 1E-12, 1E12)),
theano.config.floatX)
@staticmethod
def _log_add(a, b):
max_ = tensor.maximum(a, b)
return max_ + tensor.log1p(tensor.exp(a + b - 2 * max_))
@staticmethod
def _log_dot_matrix(x, z):
inf = 1E12
log_dot = tensor.dot(x, z)
zeros_to_minus_inf = (z.max(axis=0) - 1) * inf
return log_dot + zeros_to_minus_inf
@staticmethod
def _log_dot_tensor(x, z):
inf = 1E12
log_dot = (x.dimshuffle(1, 'x', 0) * z).sum(axis=0).T
zeros_to_minus_inf = (z.max(axis=0) - 1) * inf
return log_dot + zeros_to_minus_inf.T
def ctc_path_probability(scorematrix, queryseq, blank):
"""
Compute path probability based on CTC algorithm, only forward pass is used.
Batch not supported, for batch version, refer to the CTC class above
Speed much slower than the numba & cython version (51.5min vs ~3.9min on word_correction_CTC experiment)
:param scorematrix: (T, C+1)
:param queryseq: (L, 1)
:param blank: scalar, blank symbol
:return: (NLL, alphas), NLL > 0 (smaller is better, = -log(p(l|x)); alphas is the forward variable)
"""
def update_s(s, alphas, scorematrix, queryseq, blank, t):
l = (s - 1) // 2
alphas = ifelse(tensor.eq(s % 2, 0),
ifelse(tensor.eq(s, 0),
tensor.set_subtensor(alphas[s, t], alphas[s, t - 1] * scorematrix[blank, t]),
tensor.set_subtensor(alphas[s, t],
(alphas[s, t - 1] + alphas[s - 1, t - 1]) * scorematrix[blank, t]),
name='for_blank_symbol'),
ifelse(tensor.or_(tensor.eq(s, 1), tensor.eq(queryseq[l], queryseq[l - 1])),
tensor.set_subtensor(alphas[s, t],
(alphas[s, t - 1] + alphas[s - 1, t - 1]) * scorematrix[
queryseq[l], t]),
tensor.set_subtensor(alphas[s, t],
(alphas[s, t - 1] + alphas[s - 1, t - 1] + alphas[s - 2, t - 1]) *
scorematrix[queryseq[l], t]),
name='for_same_label_twice'))
return alphas
def update_t(t, LLForward, alphas, scorematrix, queryseq, blank, T, L2):
start = tensor.max([0, L2 - 2 * (T - t)])
end = tensor.min([2 * t + 2, L2])
s = tensor.arange(start, end)
results, _ = theano.scan(fn=update_s, sequences=[s], non_sequences=[scorematrix, queryseq, blank, t],
outputs_info=[alphas], name='scan_along_s')
alphas = results[-1]
c = tensor.sum(alphas[start:end, t])
c = tensor.max([1e-15, c])
alphas = tensor.set_subtensor(alphas[start:end, t], alphas[start:end, t] / c)
LLForward += tensor.log(c)
return LLForward, alphas
L = queryseq.shape[0] # Length of label sequence
L2 = 2 * L + 1 # Length of label sequence padded with blanks
T = scorematrix.shape[1] # time length
alphas = tensor.zeros((L2, T))
# Initialize alphas and forward pass
alphas = tensor.set_subtensor(alphas[[0, 1], 0], scorematrix[[blank, queryseq[0]], 0])
c = tensor.sum(alphas[:, 0])
alphas = tensor.set_subtensor(alphas[:, 0], alphas[:, 0] / c)
LLForward = tensor.log(c)
t = tensor.arange(1, T)
results, _ = theano.scan(fn=update_t, sequences=[t], non_sequences=[scorematrix, queryseq, blank, T, L2],
outputs_info=[LLForward, alphas], name='scan_along_t')
NLL, alphas = ifelse(tensor.gt(T, 1), (-results[0][-1], results[1][-1]), (-LLForward, alphas))
return NLL, alphas | PypiClean |
/Astropysics-1.0.tar.gz/Astropysics-1.0/astropysics/gui/spectarget.py | from __future__ import division,with_statement
import numpy as np
try:
import enthought
from enthought.traits.api import HasTraits
traitsflat = False
except ImportError:
import traits
traitsflat = True
if traitsflat:
from traits.api import HasTraits,on_trait_change,Instance,Float,\
String,ListStr,ListFloat,Button,Bool,\
Property,Event,Array,Enum,TraitError
from traitsui.api import View,Group,HGroup,HFlow,Item,Handler
from traitsui.menu import ModalButtons
from chaco.api import Plot,ArrayPlotData,HPlotContainer,ColorBar,\
LinearMapper,jet,LassoOverlay,ColormappedSelectionOverlay
from chaco.tools.api import PanTool, ZoomTool,LassoSelection
from enable.component_editor import ComponentEditor
else:
from enthought.traits.api import HasTraits,on_trait_change,Instance,Float,\
String,ListStr,ListFloat,Button,Bool,\
Property,Event,Array,Enum,TraitError
from enthought.traits.ui.api import View,Group,HGroup,HFlow,Item,Handler
from enthought.traits.ui.menu import ModalButtons
from enthought.chaco.api import Plot,ArrayPlotData,HPlotContainer,ColorBar,\
LinearMapper,jet,LassoOverlay,ColormappedSelectionOverlay
from enthought.chaco.tools.api import PanTool, ZoomTool,LassoSelection
from enthought.enable.component_editor import ComponentEditor
from ..phot import CMDAnalyzer
class MaskMaker(object):
"""
base class for objects that make masks - subclasses should:
* replace name with a name for the mask-making operation
* shortlen: maximum length of shortname
* override makeMask(self,filename)
* size: approximate dimensions of a mask as (len,width) in degrees
expectations for special priority groups:
* -1: alignment stars
* -2: guide stars
* -9: remove from list unless guide/align star
"""
name = 'Default mask-maker'
shortlen = None
size = None
def makeMask(self,filename):
raise NotImplementedError
def __init__(self,spectargobj):
self.sto = spectargobj
class ODHandler(Handler):
def apply(self,info):
o = info.object
o.cmda.offsetbands = o.offsetbands
o.cmda.offsetweights = 1.0/o.offsetscales
o.cmda.locweight = 1.0/o.locscale if o.locscale != 0 else 0
o.st.updatepri = True
def closed(self,info, is_ok):
if is_ok:
self.apply(info)
Handler.closed(self,info,is_ok)
class OffsetDialog(HasTraits):
offsetbands = ListStr
offsetscales = Array(float)
locscale = Float
view = View(Group(Item('offsetbands'),
Item('offsetscales'),
Item('locscale'),
layout='normal'),
resizable=True,title='Offset Settings',handler=ODHandler(),
buttons = ModalButtons,kind='modal')
def __init__(self,cmda,st):
self.cmda = cmda
self.st = st
self.offsetbands = cmda.offsetbands if cmda.offsetbands is not None else []
self.offsetscales = 1.0/cmda.offsetweights
self.locscale = 1.0/cmda.locweight if cmda.locweight != 0 else 0
class SpecTarget(HasTraits):
distance = Float(10)
distmod = Float(0)
cmda = Instance(CMDAnalyzer)
plotcontainer = HPlotContainer
locplot = Plot
cmplot = Plot
cb = ColorBar
data = ArrayPlotData
locs = ArrayPlotData
xax = String
yax = String
priband = String('')
pribandw = Float(0)
pripower = Float(2)
groupcutoff = Float(100)
agband = String
astarcut = Float(18.5)
astarcut2 = Float(20)
gstarcut = Float(16)
agigpri = Float(0.5)
hideg0 = Bool(True)
agigg1 = Bool(False)
offsetset = Button
priorities = Property(depends_on='distmod,offsetset')
masktype = Enum(['DEIMOS'])
maskmaker = Instance(MaskMaker)
maskfn = String('spectarg1')
maskshortname = String('Nonam1')
masklongname = String('')
makemaskbutton = Button
updatepri = Event
updatedata = Event
updatelocs = Event
updategastars = Event
view = View(Group(Item('plotcontainer',editor=ComponentEditor(size=(800,400)),show_label=False),
Group(Group(Item('distance',label='Distance (kpc)'),
Item('distmod',label='Distance Modulus'),
Item('xax',label='CMD x-axis'),
Item('yax',label='CMD y-axis'),
columns=2),
Group(Item('agband',label='Alignment/Guide Star Band'),
Item('gstarcut',label='Guide Star Cutoff'),
Item('astarcut',label='Alignment Star Cutoff'),
Item('astarcut2',label='Alignment Star Cutoff 2'),
Item('agigpri',label='A/G Ignore priority'),
Item('agigg1',label='A/G Ignore if group 1?'),
columns=6),
HGroup(Item('priband',label='Priority Band'),
Item('pribandw',label='Band Priority Weight'),
Item('offsetset',label='Offset Settings'),
Item('pripower',label='Power for converting offsets into priorities'),
Item('groupcutoff',label='Last Group Cutoff'),
Item('hideg0',label='Set Group 0 priority to 0?')
),
Group(Item('masktype',label='Mask Type'),
Item('maskfn',label='Mask File(base)'),
Item('makemaskbutton',label='Make Mask'),
Item('maskshortname',label='Mask Name(short)'),
Item('masklongname',label='Mask Name(long)'),
columns=3)),
layout='normal'),
resizable=True,title='Spectra Targeter')
def __init__(self,*args,**kwargs):
"""
generates the SpecTarget GUI - args can be either:
* SpecTarget(cmdanalyzer)
provide a ready-made CMDAnalyzer
* SpecTarget(fiducialdict,data)
provide a dictionary of fiducial data and a set of data (array or
dict)
kwargs get applied to the CMDAnalyzer
"""
if len(args) == 1:
cmda = args[0]
elif len(args) == 2:
cmda = CMDAnalyzer(args[0].values(),args[0].keys())
cmda.setData(args[1])
#elif len(args) == 3:
# cmda = CMDAnalyzer(args[0].values(),args[0].keys())
# cmda.setData(args[1])
# cmda.locs = args[2]
else:
raise TypeError('SpecTarget initializer takes 1 or 2 arguments')
for k,v in kwargs.iteritems():
setattr(cmda,k,v)
self.cmda = cmda
self._distinner = False
ob = cmda.offsetbands
if ob is not None and len(ob) > 1:
self.xax,self.yax = ob[0],ob[1]
else:
bi1,bi2 = cmda.validbandinds[0],cmda.validbandinds[1]
self.xax,self.yax = cmda.bandnames[bi1],cmda.bandnames[bi2]
self.agband = self.yax
self.data = ArrayPlotData()
self.locs = ArrayPlotData()
self._update_x_data()
self._update_y_data()
self.updatelocs = True
self.updatepri = True
self.updategastars = True
cmdplot = Plot(self.data,resizable='hv')
objplotcmd = cmdplot.plot(('x','y','pri'),name='target_mags',type='cmap_scatter',marker='dot',marker_size=1,color_mapper=jet)[0]
cmdplot.plot(('fidx','fidy'),name='fid_mags',type='line',color='black')
cmdplot.plot(('gx','gy'),name='guide_mags',type='scatter',color='gray',marker='inverted_triangle',outline_color='red',marker_size=5)
cmdplot.plot(('ax','ay'),name='align_mags',type='scatter',color='black',marker='diamond',outline_color='red',marker_size=4)
cmdplot.tools.append(PanTool(cmdplot,drag_button='right'))
cmdplot.tools.append(ZoomTool(cmdplot))
locplot = Plot(self.locs,resizable='hv')
objplotloc = locplot.plot(('ra','dec','pri'),name='target_locs',type='cmap_scatter',marker_size=2,color_mapper=jet)[0]
locplot.plot(('centerx','centery'),name='cen_locs',type='scatter',color='black',marker='cross',marker_size=10,line_width=4)
locplot.plot(('boxx','boxy'),name='box_locs',type='line',color='black',line_width=2)
locplot.plot(('gra','gdec'),name='guide_locs',type='scatter',color='gray',marker='inverted_triangle',outline_color='red',marker_size=5)
locplot.plot(('ara','adec'),name='align_locs',type='scatter',color='black',marker='diamond',outline_color='red',marker_size=5)
locplot.tools.append(PanTool(locplot,drag_button='right'))
locplot.tools.append(ZoomTool(locplot))
cb = ColorBar(index_mapper=LinearMapper(range=cmdplot.color_mapper.range),color_mapper=cmdplot.color_mapper,resizable='v',width=30,padding=5)
ls1 = LassoSelection(objplotcmd,selection_datasource=objplotcmd.color_data)
objplotcmd.tools.append(ls1)
objplotcmd.overlays.append(LassoOverlay(objplotcmd,lasso_selection=ls1))
objplotcmd.active_tool = ls1
ls1.on_trait_change(self._selection_changed,'selection_changed')
ls1.on_trait_change(self._selection_completed,'selection_completed')
objplotcmd.overlays.append(ColormappedSelectionOverlay(objplotcmd,selection_type='mask',selection_datasource=objplotcmd.color_data))
ls2 = LassoSelection(objplotloc,selection_datasource=objplotloc.index)
objplotloc.tools.append(ls2)
objplotloc.overlays.append(LassoOverlay(objplotloc,lasso_selection=ls2))
objplotloc.active_tool = ls2
ls2.on_trait_change(self._selection_changed,'selection_changed')
ls2.on_trait_change(self._selection_completed,'selection_completed')
self.plotcontainer = HPlotContainer(use_backbuffer=True)
self.locplot = locplot
self.cmplot = cmdplot
self.cb = cb
self.plotcontainer.add(locplot)
self.plotcontainer.add(cmdplot)
self.plotcontainer.add(cb)
self.on_trait_change(self._cmda_late_changed,'cmda')
self.on_trait_change(self._xax_late_changed,'xax')
self.on_trait_change(self._yax_late_changed,'yax')
self.on_trait_change(self._late_dogastars,'agband,gstarcut,astarcut,agigpri')
self.priband = self.yax #must be done after data is set
self._cmda_late_changed() #clean things up in case random stuff changes
self._masktype_changed(self.masktype) #call to intialize default mask maker
def _selection_changed(self,obj,name,new):
cds = (self.cmplot.plots['target_mags'][0].color_data,self.locplot.plots['target_locs'][0].color_data)
datasource = obj.selection_datasource
targds = cds[1] if cds[0] == datasource else cds[0]
mask = datasource.metadata['selection'].astype(bool)
targds.set_mask(mask)
def _selection_completed(self,obj,name,new):
cds = (self.cmplot.plots['target_mags'][0].color_data,self.locplot.plots['target_locs'][0].color_data)
datasource = obj.selection_datasource
targds = cds[1] if cds[0] == datasource else cds[0]
targds.set_mask(None)
def _maskfn_changed(self):
self.dsimready = False
def _distance_changed(self,old,new):
if self._distinner:
self._distinner = False
else:
self.cmda.distance = self.distance
self._distinner = True
self.distmod = self.cmda.distmod
self.updatedata = True
def _distmod_changed(self,old,new):
if self._distinner:
self._distinner = False
else:
self.cmda.distmod = self.distmod
self._distinner = True
self.distance = self.cmda.distance
self.updatedata = True
def _priband_changed(self,old,new):
if new not in self.cmda.validbandnames:
print 'band',new,'not valid'
self.priband = old if new != old else ''
else:
self.updatepri = True
def _pribandw_changed(self):
if self.priband:
self.updatepri = True
def _agigpri_changed(self):
self.updatepri = True
def _pripower_changed(self):
self.updatepri = True
def _hideg0_changed(self):
self.updatedata = True
#m = self._get_g0m()
#for v in self.locplot.plots.itervalues():
# v = v[0]
# v.index.set_mask(m)
# def _get_distance(self):
# return self.cmda.distance
# def _set_distance(self,val):
# self.cmda.distance = val
# self.updatedata = True
# self.test = 1
# def _get_distmod(self):
# return self.cmda.distmod
# def _set_distmod(self,val):
# self.cmda.distmod = val
# self.updatedata = True
def _cmda_late_changed(self):
self.distance = self.cmda.distance
self.updatedata = True
def _updatedata_fired(self):
try:
self._xax_late_changed()
self._yax_late_changed()
except ValueError:
bi1,bi2 = self.cmda.validbandinds[0],cmda.validbandinds[1]
self.xax = self.cmda.bandnames[bi1]
self.yax = self.cmda.bandnames[bi2]
self._xax_late_changed()
self._yax_late_changed()
self.updatepri = True
self.updategastars = True
def _xax_late_changed(self):
self._update_x_data()
self.cmplot.x_axis.title = self.xax
def _get_g0m(self):
if self.hideg0 and self.cmda.datagroups is not None:
return self.cmda.datagroups !=0
else:
return np.ones(self.cmda._nd,dtype=bool)
def _update_x_data(self):
try:
x = self.cmda.getData(self.xax)
fx = self.cmda.getFiducialData(self.xax)
except ValueError,e:
if e.message == ' is not a valid fiducial band' or e.message == ' is not a valid band':
return
self._fix_axes()
self.data.set_data('x',x)
self.data.set_data('fidx',fx)
self.updategastars = 'xd'
def _yax_late_changed(self):
self._update_y_data()
self.cmplot.y_axis.title = self.yax
def _update_y_data(self):
try:
y = self.cmda.getData(self.yax)
fy = self.cmda.getFiducialData(self.yax)
except ValueError,e:
if e.message == ' is not a valid fiducial band' or e.message == ' is not a valid band':
return
self._fix_axes()
self.data.set_data('y',y)
self.data.set_data('fidy',fy)
self.updategastars = 'yd'
def _fix_axes(self):
if self.cmplot is not Plot:
oristr=[]
if '-' not in self.yax:
oristr.append('top')
else:
oristr.append('bottom')
if '-' not in self.xax:
oristr.append('right')
else:
oristr.append('left')
oristr = ' '.join(oristr)
self.cmplot.default_origin = oristr
for pl in self.cmplot.plots.values():
pl[0].origin = oristr
def _updatelocs_fired(self):
if self.cmda.locs is None:
ra,dec = np.zeros(self.cmda._nd),np.zeros(self.cmda._nd)
else:
ra = self.cmda.locs[0]
dec = self.cmda.locs[1]
cen = self.cmda.center
self.locs.set_data('ra',ra)
self.locs.set_data('dec',dec)
self.locs.set_data('centerx',(cen[0],))
self.locs.set_data('centery',(cen[1],))
if self.maskmaker is not None:
l,w = self.maskmaker.size
l2,w2=l/2.0,w/2.0
self.locs.set_data('boxx',np.array([-w2,w2,w2,-w2,-w2])+cen[0])
self.locs.set_data('boxy',np.array([-l2,-l2,l2,l2,-l2])+cen[1])
else:
self.locs.set_data('boxx',np.array(tuple()))
self.locs.set_data('boxy',np.array(tuple()))
self.updategastars = 'loc'
def _late_dogastars(self):
self.updategastars = True
def _updategastars_fired(self,val):
if self.agband:
mg,ma = self._guidealignmasks()
if val is True or val == 'loc':
if self.cmda.locs is None:
ra,dec = np.zeros(self.cmda._nd),np.zeros(self.cmda._nd)
else:
ra = self.cmda.locs[0]
dec = self.cmda.locs[1]
self.locs.set_data('gra',ra[mg])
self.locs.set_data('gdec',dec[mg])
self.locs.set_data('ara',ra[ma])
self.locs.set_data('adec',dec[ma])
if val is True or val == 'xd':
x = self.cmda.getData(self.xax)
self.data.set_data('gx',x[mg])
self.data.set_data('ax',x[ma])
if val is True or val == 'yd':
y = self.cmda.getData(self.yax)
self.data.set_data('gy',y[mg])
self.data.set_data('ay',y[ma])
else:
self.locs.set_data('gra',[])
self.locs.set_data('gdec',[])
self.locs.set_data('ara',[])
self.locs.set_data('adec',[])
self.locs.set_data('gx',[])
self.locs.set_data('gy',[])
self.locs.set_data('ax',[])
self.locs.set_data('ay',[])
def _guidealignmasks(self):
mdat = self.cmda.getData(self.agband)
pricut = self.agigpri > self.priorities
mg = (mdat < self.gstarcut) & pricut
ma = (mdat < self.astarcut) & pricut & ~mg
return mg,ma
def _offsetset_fired(self):
od = OffsetDialog(self.cmda,self)
od.edit_traits()
def _masktype_changed(self,newval):
if newval == 'DEIMOS':
self.maskmaker = DEIMOSMaskMaker(self)
else:
raise TraitError('Unrecognized Mask Type')
self.updatelocs = True
def _maskshortname_changed(self,new,old):
maxn = self.maskmaker.shortlen
if len(new) > maxn:
self.maskshortname = new[:maxn]
def _makemaskbutton_fired(self):
self.maskmaker.makeMask(self.maskfn)
#increment number if there is one in the name
name = self.maskshortname
for cut in range(len(name)+1):
try:
num = int(name[cut:])
num+=1
self.maskshortname = name[:cut]+str(num)
self.masklongname = self.maskshortname
break
except ValueError:
pass
#do the same for the file name
name = self.maskfn
for cut in range(len(name)+1):
try:
num = int(name[cut:])
num+=1
self.maskfn = name[:cut]+str(num)
break
except ValueError:
pass
def _get_priorities(self):
off = self.cmda.getOffsets()
pri = 1-off/off.max()
if self.priband and self.pribandw:
data = self.cmda.getData(self.priband)
if self.pribandw < 0:
data = (data-data.min())/(data.max()-data.min())
else:
data = (data.max()-data)/(data.max()-data.min())
pri = pri+self.pribandw*data
pri = (pri - pri.min())/(pri.max()-pri.min())
pri = pri**self.pripower
if self.hideg0:
# if self.hideg0 and self.cmda.datagroups is not None:
# m = self.cmda.datagroups ==1
# else:
# m = slice(None)
# pri[m] = 1
pri[~self._get_g0m()] = 0
return pri
def _updatepri_fired(self):
#pri = np.arange(len(self.data.get_data('x')))
#pri = self.cmda.getOffsets()
self.data.set_data('pri',self.priorities)
self.locs.set_data('pri',self.priorities)
if self.agigpri < 1:
self.updategastars = True
class DEIMOSMaskMaker(MaskMaker):
#import pyraf #do this to make sure pyraf is installed
name = 'Dsim'
shortlen = 6
size = (5.0/60,16.0/60)
exporttoiraf = False
def makeMask(self,fn):
from pyraf import iraf
import os
from os import path
from warnings import warn
fn = fn if fn.endswith('.in') else fn+'.in'
basefn = path.splitext(fn)[0]
print 'making',fn
if path.exists(fn):
warn('path %s exists, overwriting'%fn)
os.remove(fn)
self.writeInFile(fn)
dskw={}
dskw['output'] = basefn+'.dsim'
dskw['mdf'] = basefn+'.fits'
dskw['plotfile'] = basefn+'.plot'
dskw['ra0'] = self.sto.cmda.center[0]*24.0/360.0
dskw['dec0'] = self.sto.cmda.center[1]
dskw['equinox'] = 2000
dskw['guiname'] = self.sto.maskshortname
dskw['maskid'] = self.sto.masklongname.replace(' ','_') if self.sto.masklongname else self.sto.maskshortname
print dskw['ra0'],dskw['dec0'],iraf.dsimulator.PA0
for k in ('output','mdf','plotfile'):
kfn = dskw[k]
print 'deleteing',kfn
if path.exists(kfn):
os.remove(kfn)
print 'running dsimulator'
iraf.dsimulator(fn,**dskw)
print 'dsimulator 1 complete!'
fn2 = fn.replace('.in','.in2')
dskw['ra0'],dskw['dec0'],dskw['PA0'] = self.secondFile(dskw['output'],fn2)
print 'deleting intermediate files'
for k in ('output','mdf','plotfile'):
kfn = dskw[k]
print 'deleteing',kfn
if path.exists(kfn):
os.remove(kfn)
print 'Running with offset pa'
iraf.dsimulator(fn2,**dskw)
print 'setting selected objects to 0 group'
self.doneToG0(dskw['output'],self.sto.cmda)
print 'saving iraf parameters'
with open(basefn+'.irafpars','w') as f:
f.write('input:%s\n'%fn2)
for t in dskw.iteritems():
f.write('%s\t=\t%s\n'%t)
if self.exporttoiraf:
print 'exporting settings to iraf'
for k,v in dskw.iteritems():
setattr(iraf.dsimulator,k,v)
#iraf.dsimulator.output = ''
#iraf.dsimulator.mdf = ''
#iraf.dsimulator.plotfile = ''
iraf.dsimulator.saveParList()
print 'all done!'
def writeInFile(self,fn,pa=None):
from astropysics.coords import AngularCoordinate
pri = 1000*self.sto.priorities
ni = len(pri)
mg,ma = self.sto._guidealignmasks()
ra,dec = self.sto.cmda.locs
magband = self.sto.priband if self.sto.priband else self.sto.agband
mags = self.sto.cmda.getData(magband)
self.sto.groupcutoff
if self.sto.cmda.datagroups is None:
samp = np.ones(ni)
samp[mags > self.sto.groupcutoff] = 2
pri[mg] = -1
pri[ma] = -2
else:
samp = self.sto.cmda.datagroups.copy()
ssamp = set(samp)
keepbright = samp==1 if self.sto.agigg1 else np.zeros(samp.shape,dtype=bool)
pri[(samp==-1) & ~ma] = -1
pri[(samp==-2) & ~mg] = -2
#pri[samp==0] = 0 #do this automatically or not from the GUI
pri[mg & ~keepbright] = -1
pri[ma & ~keepbright] = -2
samp[(samp==-1) | (pri==-1)] = 6
samp[(samp==-2) | (pri==-2)] = 6
samp[(mags > self.sto.groupcutoff) & (samp>0)] = 5
#samp[pri==0] = 5
if pa is not None:
pa = np.ones(ni)*pa
#pri[pri==0]=1 #avoid priority-0 cases due to dsim weirdness
presel = np.zeros(ni)
with open(fn,'w') as f:
for i,n in enumerate(self.sto.cmda.datanames):
rao,deco = AngularCoordinate(ra[i]),AngularCoordinate(dec[i])
ras,decs = rao.getHmsStr(canonical = True),deco.getDmsStr(canonical = True)
if samp[i] != -9 or pri[i]==-1 or pri[i] == -2: #-9 is a code to not write at all
if pa is None:
t = (n[:16],ras,decs,2000,mags[i],magband,int(pri[i]),samp[i],presel[i])
f.write('%s\t%s %s %i %2.2f %s %4i %i %i INDEF\n'%t)
else:
t = (n[:16],ras,decs,2000.0,mags[i],magband,int(pri[i]),samp[i],presel[i],pa[i])
f.write('%s\t%s\t%s\t%06.1f\t%2.2f\t%s\t%i\t%i\t%i\t%i\n'%t)
# def fixPaFile(self,fn1,fn2,degoffset=5):
# #fn1 is th dsim output of the first dsim run, fn2 is the target file name
# from astropysics.coords import AngularPosition
# with open(fn1,'r') as fr:
# with open(fn2,'w') as fw:
# for l in fr:
# if 'PA' in l:
# ras,decs,epochs,pas = l.split()[-5:-1]
# panum = float(pas.replace('PA=',''))
# fw.write(l)
# else:
# ls = l.strip()
# if ls.startswith('#') or ls == '':
# fw.write(l)
# else:
# fw.write(ls)
# fw.write(' %i\n'%(panum+degoffset))
# ap = AngularPosition(ras,decs)
# return ap.ra.d,ap.dec.d,panum
def secondFile(self,fn1,fn2,degoffset=5):
#fn1 is th dsim output of the first dsim run, fn2 is the target file name
from astropysics.coords import AngularPosition,AngularCoordinate
with open(fn1,'r') as fr:
with open(fn2,'w') as fw:
for l in fr:
if 'PA' in l:
ls = l.split()
for i in range(len(ls)-1):
try:
raac = AngularCoordinate(ls[i],sghms=True)
decac = AngularCoordinate(ls[i+1],sghms=False)
break
except ValueError:
pass
pas = ls[-2]
panum = float(pas.replace('PA=',''))
fw.write(l)
else:
ls = l.strip()
if ls.startswith('#') or ls == '':
fw.write(l)
else:
#fw.write(ls)
#fw.write(' %i\n'%(panum+degoffset))
name,ra,dec,epoch,mag,band,pri,grp,presel = ls.split()[:9]
if int(presel):
pri = int(pri)
else:
pri = -2 if (float(mag) < self.sto.astarcut2) else int(pri)
t=(name,ra,dec,epoch,mag,band,pri,grp,presel,panum+degoffset)
fw.write('%s\t%s %s %s %s %s %4.1i %s %s %i\n'%t)
ap = AngularPosition(raac,decac)
return ap.ra.d,ap.dec.d,panum
@staticmethod
def doneToG0(fn,cmda,forceradec=False):
objs=[]
ras,decs=[],[]
with open(fn) as f:
watch = False
for l in f:
if watch and l.strip() and not l.lstrip().startswith('#'):
ls=l.split()
if int(ls[6]) >= 0 and int(ls[8]) >= 0:
objs.append(int(ls[0]))
ras.append(ls[1])
decs.append(ls[2])
if 'Selected Objects' in l:
watch = True
if 'Selected Guide' in l:
#watch = False
break
objs = np.array(objs)-1
if cmda.datagroups is not None:
g = cmda.datagroups
else:
g = np.ones(cmda._nd)
if forceradec or objs.max() >= len(g):
#if necessary, do ra/dec matching instead of direct indexing
from ..coords import match_coords,radec_str_to_decimal
rao,deco = cmda.locs
ras,decs = radec_str_to_decimal(ras,decs)
mask = match_coords(rao,deco,ras,decs,0.5/3600)[0]
g[mask] = 0
else:
if len(objs) > 0:
g[objs] = 0
if not all(g):
cmda.datagroups = g
def spec_target(*args):
"""
generates and shows a SpecTarget gui interface - accepts two forms:
* spec_target(cmdanalyzer) - provide a ready-made CMDAnalyzer
* spec_target(fiducialdict,data) - provide a dictionary of fiducial
data and a set of data (array or dict)
A GUI application instance must already exist (e.g. interactive mode of
ipython)
returns the SpecTarget instance
"""
st = SpecTarget(*args)
st.edit_traits()
return st | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/plugins/extra/numerated-headers/lib/numerated-headers-plugin.js | * Aloha Editor
* Author & Copyright (c) 2010 Gentics Software GmbH
* aloha-sales@gentics.com
* Licensed unter the terms of http://www.aloha-editor.com/license.html
*/
define([
'aloha/jquery',
'aloha/plugin',
'aloha/floatingmenu',
'i18n!numerated-headers/nls/i18n',
'i18n!aloha/nls/i18n',
'css!numerated-headers/css/numerated-headers.css'
],
function ($, Plugin, FloatingMenu, i18n, i18nCore) {
var Aloha = window.Aloha;
return Plugin.create('numerated-headers', {
config: {
numeratedactive: true,
headingselector: 'h1, h2, h3, h4, h5, h6',
trailingdot: false
},
/**
* Initialize the plugin
*/
init: function () {
var that = this;
// add button to toggle numerated-headers
this.numeratedHeadersButton = new Aloha.ui.Button({
'iconClass' : 'aloha-button aloha-button-numerated-headers',
'size' : 'small',
'onclick' : function () {
if (that.numeratedHeadersButton.isPressed()) {
that.removeNumerations();
} else {
that.createNumeratedHeaders();
}
},
'tooltip' : i18n.t('button.numeratedHeaders.tooltip'),
'toggle' : true /*,
'pressed' : this.numeratedactive */
});
FloatingMenu.addButton(
'Aloha.continuoustext',
this.numeratedHeadersButton,
i18nCore.t('floatingmenu.tab.format'),
1
);
// We need to bind to smart-content-changed event to recognize
// backspace and delete interactions.
Aloha.bind('aloha-smart-content-changed', function (event) {
that.cleanNumerations();
if (that.showNumbers()) {
that.createNumeratedHeaders();
}
});
// We need to listen to that event, when a block is formatted to
// header format. smart-content-changed would be not fired in
// that case
Aloha.bind('aloha-format-block', function () {
that.cleanNumerations();
if (that.showNumbers()) {
that.createNumeratedHeaders();
}
});
Aloha.bind('aloha-editable-activated', function (event) {
// hide the button, when numerating is off
if (that.numeratedHeadersButton) {
if (that.isNumeratingOn()) {
that.numeratedHeadersButton.show();
that.initForEditable(Aloha.activeEditable.obj);
} else {
that.numeratedHeadersButton.hide();
}
}
});
},
/**
* Init the toggle button (and numerating) for the current editable,
* if not yet done.
* If numerating shall be on by default and was not turned on, numbers will be created.
*/
initForEditable: function ($editable) {
var flag = $editable.attr('aloha-numerated-headers');
if (flag !== 'true' && flag !== 'false') {
flag = (true === this.getCurrentConfig().numeratedactive) ? 'true' : 'false';
$editable.attr('aloha-numerated-headers', flag);
}
if (flag === 'true') {
this.createNumeratedHeaders();
this.numeratedHeadersButton.setPressed(true);
} else {
this.numeratedHeadersButton.setPressed(false);
}
},
/**
* Get the config for the current editable
*/
getCurrentConfig: function () {
var config = this.getEditableConfig(Aloha.activeEditable.obj);
// normalize config (set default values)
if (config.numeratedactive === true || config.numeratedactive === 'true' || config.numeratedactive === '1') {
config.numeratedactive = true;
} else {
config.numeratedactive = false;
}
if (typeof config.headingselector !== 'string') {
config.headingselector = 'h1, h2, h3, h4, h5, h6';
}
config.headingselector = $.trim(config.headingselector);
if (config.trailingdot === true || config.trailingdot === 'true' || config.trailingdot === '1') {
config.trailingdot = true;
} else {
config.trailingdot = false;
}
return config;
},
/**
* Check whether numerating shall be possible in the current editable
*/
isNumeratingOn: function () {
return this.getCurrentConfig().headingselector !== '';
},
/**
* Check whether numbers shall currently be shown in the current editable
*/
showNumbers: function () {
return (
Aloha.activeEditable &&
this.isNumeratingOn() &&
(Aloha.activeEditable.obj.attr('aloha-numerated-headers') === 'true')
);
},
/**
* Remove all annotations in the current editable.
*/
cleanNumerations: function () {
var active_editable_obj = this.getBaseElement();
if (!active_editable_obj) {
return;
}
$(active_editable_obj).find('span[role=annotation]').each(function () {
$(this).remove();
});
},
/**
* Removed and disables numeration for the current editable.
*/
removeNumerations : function () {
$(Aloha.activeEditable.obj).attr('aloha-numerated-headers', 'false');
this.cleanNumerations();
},
getBaseElement: function () {
if (typeof this.baseobjectSelector !== 'undefined') {
return ($(this.baseobjectSelector).length > 0) ?
$(this.baseobjectSelector) : null;
}
return Aloha.activeEditable ? Aloha.activeEditable.obj : null;
},
/*
* checks if the given Object contains a note Tag that looks like this:
* <span annotation=''>
*
* @param {Object} obj - The Object to check
*/
hasNote: function (obj) {
if (!obj || $(obj).length <= 0) {
return false;
}
return $(obj).find('span[role=annotation]').length > 0;
},
/*
* checks if the given Object has textual content.
* A possible "<span annotation=''>" tag will be ignored
*
* @param {Object} obj - The Object to check
*/
hasContent: function (obj) {
if (!obj || 0 === $(obj).length) {
return false;
}
// we have to check the content of this object without the annotation span
var $objCleaned = $(obj).clone()
.find('span[role=annotation]')
.remove()
.end();
// check for text, also in other possible sub tags
return $.trim($objCleaned.text()).length > 0;
},
createNumeratedHeaders: function () {
var active_editable_obj = this.getBaseElement();
if (!active_editable_obj) {
return;
}
var config = this.getCurrentConfig();
var headingselector = config.headingselector;
var headers = active_editable_obj.find(headingselector);
Aloha.activeEditable.obj.attr('aloha-numerated-headers', 'true');
if (typeof headers === 'undefined' || headers.length === 0) {
return;
}
// base rank is the lowest rank of all selected headers
var base_rank = 7;
var that = this;
headers.each(function () {
if (that.hasContent(this)) {
var current_rank = parseInt(this.nodeName.substr(1), 10);
if (current_rank < base_rank) {
base_rank = current_rank;
}
}
});
if (base_rank > 6) {
return;
}
var prev_rank = null,
current_annotation = [],
annotation_pos = 0,
i;
// initialize the base annotations
for (i = 0; i < (6 - base_rank) + 1; i++) {
current_annotation[i] = 0;
}
headers.each(function () {
// build and count annotation only if there is content in this header
if (that.hasContent(this)) {
var current_rank = parseInt(this.nodeName.substr(1), 10);
if (prev_rank === null && current_rank !== base_rank) {
// when the first found header has a rank
// different from the base rank, we omit it
$(this).find('span[role=annotation]').remove();
return;
} else if (prev_rank === null) {
// increment the main annotation
current_annotation[annotation_pos]++;
} else if (current_rank > prev_rank) {
// starts a sub title
current_annotation[++annotation_pos]++;
} else if (current_rank === prev_rank) {
// continues subtitles
current_annotation[annotation_pos]++;
} else if (current_rank < prev_rank) {
//goes back to a main title
var current_pos = current_rank - base_rank;
var j;
for (j = annotation_pos; j > (current_pos); j--) {
current_annotation[j] = 0; //reset current sub-annotation
}
annotation_pos = current_pos;
current_annotation[annotation_pos]++;
}
prev_rank = current_rank;
var annotation_result = '', i;
if (config.trailingdot === true) {
annotation_result = '';
for (i = 0; i < current_annotation.length; i++) {
if (current_annotation[i] !== 0) {
annotation_result += (current_annotation[i] + '.');
}
}
} else {
annotation_result = current_annotation[0];
for (i = 1; i < current_annotation.length; i++) {
if (current_annotation[i] !== 0) {
annotation_result += ('.' + current_annotation[i]);
}
}
}
if (that.hasNote(this)) {
$(this).find('span[role=annotation]').html(annotation_result);
} else {
$(this).prepend('<span role="annotation">' +
annotation_result + '</span>');
}
} else {
// no Content, so remove the Note, if there is one
if (that.hasNote(this)) {
$(this).find('span[role=annotation]').remove();
}
}
});
}
});
}); | PypiClean |
/OBP_reliability_pillar-0.2.0.tar.gz/OBP_reliability_pillar-0.2.0/OBP_reliability_pillar/ec2/ec2_instance_detailed_monitoring_enabled.py | import botocore
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# checks compliance.py for ec2 instance detailed monitoring enabled
def ec2_instance_detailed_monitoring_enabled(self) -> dict:
"""
:param self:
:return:
"""
logger.info(" ---Inside ec2 :: ec2_instance_detailed_monitoring_enabled")
self.refresh_session()
result = True
failReason = ''
offenders = []
control_id = 'Id3.30'
compliance_type = "EC2 instance detailed monitoring enabled"
description = "Checks if detailed monitoring is enabled for EC2 instances."
resource_type = "EC2 Instance"
risk_level = 'Low'
regions = self.session.get_available_regions('ec2')
for region in regions:
try:
client = self.session.client('ec2', region_name=region)
marker = ''
while True:
response = client.describe_instances(
MaxResults=1000,
NextToken=marker
)
if len(response['Reservations']) > 0:
for reservation in response['Reservations']:
for instance in reservation['Instances']:
monitoring = instance['Monitoring']['State']
if monitoring != 'enabled':
result = False
failReason = "Monitoring is not enabled in instances"
offenders.append(region+': '+instance['InstanceId'])
try:
marker = response['NextToken']
if marker == '':
break
except KeyError:
break
except botocore.exceptions.ClientError as e:
logger.error('Something went wrong with region {}: {}'.format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'ControlId': control_id,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level
} | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/doc/api_doc/inginious.client.rst | inginious.client package
========================
.. automodule:: inginious.client
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
.. _inginious.client.client:
inginious.client.client module
------------------------------
.. automodule:: inginious.client.client
:members:
:undoc-members:
:show-inheritance:
inginious.client.client_buffer module
-------------------------------------
.. automodule:: inginious.client.client_buffer
:members:
:undoc-members:
:show-inheritance:
inginious.client.client_sync module
-----------------------------------
.. automodule:: inginious.client.client_sync
:members:
:undoc-members:
:show-inheritance:
| PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/person/ta_IN/__init__.py | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
"{{first_name_female}}",
"{{first_name_female}}",
"{{first_name_female}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
)
formats_male = (
"{{first_name_male}}",
"{{first_name_male}}",
"{{first_name_male}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
)
formats = formats_male + formats_female
# Source: http://tamilcube.com/babynames/tamil-baby-names.aspx
first_names_male = (
"அகண்டலன்",
"அகத்தியன்",
"அகரன்",
"அகரமுதல்வன்",
"அகற்கண்ணன்",
"அகற்குறி",
"அகலன்",
"அகலறிவன்",
"அகலார்சடையன்",
"அகல்",
"அகழ்மேனி",
"அகவன்",
"அகாத்",
"அகிரா",
"அகிலங்கடந்தான்",
"அகிலன்",
"அகில்",
"அகுல்,",
"அகோரா",
"அக்தர்",
"அக்னி",
"அக்னிகுமாரன்",
"அக்மல்,",
"அக்ரூர்,",
"அக்ரோதனன்",
"அங்கணன்",
"அங்கதன்",
"அச்சுதானந்தன்",
"அஜய்",
"ஆகர்ணா,",
"ஆகாஷ்",
"ஆகேந்திரா",
"ஆக்னேயா",
"ஆசைத்தம்பி",
"ஆஞ்சனேயா",
"ஆடலரசன்",
"ஆடலரசு",
"ஆட்டனத்தி",
"ஆண்டர்ஸன்",
"ஆண்ட்ரு",
"ஆதர்ஷ்",
"ஆதர்ஷ்,ஆதேஷ்",
"ஆதவன்",
"ஆதி",
"ஆதிகுணா",
"ஆதிகேசவன்",
"ஆதிசங்கரா",
"ஆதிசேஷா",
"ஆதிதேவா",
"ஆதித்யவர்த்தன்",
"ஆதித்யா",
"ஆதிநாதன்",
"ஆதிநாராயணா",
"ஆதிமூர்த்தி",
"ஆத்மஜா,",
"ஆனந்த",
"ஆனந்தகிரி",
"ஆனந்தசாகரன்",
"ஆனந்ததேவன்",
"இக்பால்",
"இக்ஷூ,",
"இசக்கிமுத்து",
"இசைக்கலை",
"இசைக்கலைவாணன்",
"இசைக்கோ",
"இசைச்செல்வன்",
"இசைச்செல்வம்",
"இசைத்தமிழன்",
"இசைத்தம்பி",
"இசைமணி",
"இசைமாமணி",
"இசைமுதல்வன்",
"இசையரசன்",
"இசையரசு",
"இசையறிவன்",
"இசையழகன்",
"இசையாளன்",
"இசையேந்தல்",
"இசைவளன்",
"இசைவளவன்",
"இசைவாணன்",
"இசைவேந்தன்",
"இடைக்காடன்",
"இடைக்காடர்",
"இந்தரஜித்",
"இந்திகாப்",
"இந்திரகுமார்",
"இந்திரநீல்",
"இந்திவார்",
"உஜாகர்",
"உஜேஷ்",
"உட்கர்ஷ்",
"உதயகுமார்;",
"உதயச்சல்",
"உதயன்",
"உதயபரிதி",
"உதயமூர்த்தி",
"உதயவன்",
"உதயவானன்",
"உதயா",
"உதய்",
"உதர்",
"உதாங்கன்",
"உதித்",
"உதியஞ்சேரல்",
"உதியன்",
"உதீப்",
"உத்கர்ஷா",
"உத்சவ்",
"உத்தம்",
"உத்தர்",
"உத்தவ்",
"உத்தியா",
"உத்பல்",
"உன்னத்",
"உன்மேஷ்",
"உபதேஷ்",
"உபமன்யூ",
"உபேந்திரா",
"ஊர்ஜித்",
"எகாம்பரம்",
"எட்டப்பன்",
"எழினி",
"எழிற்கண்",
"எழிற்கதிர்",
"எழிற்குமரன்",
"எழிற்கோ",
"எழிற்கோமகன்",
"எழிற்பாவியன்",
"எழிலகன்",
"எழிலன்",
"எழிலன்பன்",
"எழிலரசன்",
"எழிலழகன்",
"எழிலாம்பல்",
"எழிலேந்தி",
"எழில்",
"எழில்குமரன்",
"எழில்மணி",
"எழில்மதி",
"எழில்மன்னன்",
"எழில்மலை",
"எழில்முகிலன்",
"ஏகசந்திரா",
"ஏகசிந்த்",
"ஏகராஜ்",
"ஏகலைவன்",
"ஏகா",
"ஏகாங்கா",
"ஏகாம்பரம்",
"ஏக்ராம்",
"ஏந்தல்",
"ஏழிசை",
"ஏழிசைக்கதிர்",
"ஏழிசைக்கனல்",
"ஏழிசைக்கனி",
"ஏழிசைக்கலை",
"ஏழிசைக்குமரன்",
"ஏழிசைக்குரிசில்",
"ஐயனாரப்பன்",
"ஐயன்",
"ஐயப்பன்",
"ஐயம்பெருமான்",
"ஐயா",
"ஐஸக்",
"ஒட்டக்கூத்தன்",
"ஒட்டக்கூத்தர்",
"ஒளி",
"ஒளிஒவியம்",
"ஒளிமதி",
"ஒளியன்",
"ஒளியழகன்",
"ஒளியவன்",
"ஒளிர்நிலவன்",
"ஒளிவேந்தன்",
"ஒள்ளறிவன்",
"கங்கைகொண்டான்",
"கசரா",
"கடம்பன்",
"கடம்பா",
"கடர்",
"கடற்கோ",
"கடலரசன்",
"கடலிறை",
"கடல்வேந்தன்",
"கணியன்",
"கணேஷ்",
"கணைக்கால்",
"கண்ணதாசன்",
"கண்ணன்",
"கண்ணப்பன்",
"கண்ணாயிரம்",
"கண்ணுக்கினியன்",
"கண்ணையன்",
"கண்மணி",
"கண்மதியன்",
"கண்மயா",
"கதிரழகன்",
"கதிரவன்",
"கதிரொளி",
"கதிரேசன்",
"கதிரோன்",
"கதிர்",
"சகுந்தன்",
"சக்கரவர்த்தி",
"சக்திவேல",
"சங்கன்",
"சங்கிலி",
"சசி",
"சசிகாந்த்",
"சசிகுமார்",
"சசிதரன்",
"சச்சிதாநந்தம்",
"சஜீத்",
"சஞ்சய்",
"சஞ்ஜோக்",
"சடகோபன்",
"சதிஷ்வரன்",
"சதீஷ்",
"சத்தியலிங்கம்",
"சத்யநாராயணன்",
"சத்யமூர்த்தி",
"சத்யராஐ;",
"சத்யவாணன்",
"சத்யவிரதன்",
"சத்யா",
"சத்யேந்திரா",
"சத்ருகணன்",
"சந்தனு",
"சந்தானம்",
"சந்திர",
"தக்ஷேஷ்",
"தங்கக்கதிர்",
"தங்கச்சுடர்",
"தங்கதுரை",
"தங்கத்தமிழன்",
"தங்கத்தமிழ்",
"தங்கபாண்டியன்",
"தங்கமகன்",
"தங்கமணி",
"தங்கமதி",
"தங்கமுத்து",
"தங்கம்",
"தங்கராஐ;",
"தங்கவேலன்",
"தங்கவேலு",
"தங்கவேல்",
"தங்கேஷ்வரன்",
"தசரதன்",
"தஞ்சைவாணன்",
"தணி;சேரன்",
"தணிகேவேலன்",
"தணிகைச்செல்வன்",
"தணிகைத்தம்பி",
"தணிகைநம்பி",
"தணிகைமணி",
"தணிகைமுருகன்",
"தணிகைமுருகு",
"தணிகையரசு",
"தணிகைவேலன்",
"தணிகைவேல்",
"நக்கீரத்தமிழன்",
"நக்கீரன்",
"நக்கீரர்",
"நச்சினார்க்கினியன்",
"நச்சினார்க்கினியர்",
"நடராஜன்",
"நடவரசன்",
"நடேஷ்",
"நட்புச்செல்வன்",
"நந்தன்",
"நந்தா",
"நன்னன்",
"நன்னாடன்",
"நன்னாயகம்",
"நன்னி",
"நன்னூலன்",
"நன்னெறியன்",
"நன்மணி",
"நன்மதி",
"நன்மாறன்",
"நன்மொழியன்",
"நம்பி",
"நம்பிகுட்டுவன்",
"நம்பியருள்",
"நம்பியூரான்",
"நம்பிள்ளை",
"நம்பெருமான்",
"நம்பெருமாள்",
"நம்மாழ்வார்",
"பகத்",
"பகலவன்",
"பகவந்த்",
"பகீரதன்",
"பகுகுனன்",
"பகுதானன்",
"பகுபலி",
"பகுபாலன்",
"பகுபுத்ரன்",
"பகுப்ரியன்",
"பகுமான்யன்",
"பகுமித்ரன்",
"பக்தவச்சலம்",
"பசந்த்",
"பசவராஜ்",
"பசுபதி",
"பச்சையப்பன்",
"பஜன்",
"பஜரங்கன்",
"பதிரன்",
"பதுமனார்",
"பத்மநாபன்",
"பத்ரநிதி",
"பத்ராகன்",
"பத்ராயணர்",
"பத்ரி",
"பத்ரிநாதன்",
"பத்ரிநாராயணன்",
"பத்ருஹரி",
"பந்துல்",
"மகிணன்",
"மகிழரசன்",
"மகிழரசு",
"மகிழ்கோ",
"மகிழ்ச்சிக்கு",
"மகிழ்நன்",
"மகிழ்ந்தன்",
"மணவழகன்",
"மணவாளன்",
"மணி",
"மணிகண்டன்",
"மணிக்கதிர்",
"மணிக்கொடி",
"மணிக்கோ",
"மணிக்கோவன்",
"மணிச்சுடர்",
"மணிநிலவன்",
"மணிப்பவளன்",
"மணிமன்றவாணன்",
"மணிமலை",
"மணிமார்பன்",
"மணிமாறன்",
"மணிமுடி",
"மணிமுத்து",
"மணிமொழியன்",
"மணியன்",
"மணியன்செல்வம்",
"மணியரசன்",
"மணிரத்ணம்",
"மணிவண்ணன்",
"யஷ்வந்த்",
"யாழரசன்",
"யாழ்பாடி",
"யாழ்ப்பாணன்",
"யாழ்வாணன்",
"யூகேந்திரன்",
"யூகேஷ்",
"யூசுப்",
"யூவராஐன்",
"யூவராஜ்",
"யேவான்",
"யோகலிங்கம்",
"யோகாநந்தன்",
"யோகேஷ்",
"ரஃபி",
"ரகு",
"ரகுபதி",
"ரகுராம்",
"ரகுவரன்",
"ரங்கசாமி",
"ரஜினி",
"ரத்தினம்",
"ரமணன்",
"ரமணி",
"ரமேஷ்",
"ரமேஷ்கண்ணா",
"ரவி",
"ரவின்",
"ரஷஷுத்",
"ராகவன்",
"ராகவ்",
"ராஜ",
"ராஜகுரு",
"ராஜகோபால்",
"ராஜசேகர்",
"ராஜதுரை",
"ராஜப்பிரியன்",
"ராஜவேலு",
"ராஜா",
"ராஜீவ்",
"ராஜேஷ்",
"ராஜ்குமார்",
"ராபர்ட்",
"ராமசாமி",
"வசந்த்",
"வசந்த்குமார்",
"வடிவேற்கரசன்",
"வடிவேலன்",
"வடிவேல்",
"வடிவேல்முருகன்",
"வணங்காமுடி",
"வண்டார்குழலி",
"வண்ணநிலவன்",
"வண்ணன்",
"வரதராஐன்",
"வரதராஜ்",
"வருண்குமாH",
"வருனேஷ்",
"வல்லரசு",
"வல்லவன்",
"வளவன்",
"வள்ளல்",
"வள்ளிமணாளன்",
"வள்ளுவன்",
"வள்ளுவர்",
"வழுதி",
"வஷிஷ்டர்",
"வாகீசன்",
"வாசு",
"வாசுதேவன்",
"வாஞ்சினாதன்",
"வாணன்",
"வானத்து",
"வானமாமலை",
"ஷகுந்த்",
"ஷசி",
"ஷத்ருஞ்ஜய்",
"ஷபீர்",
"ஷம்பு",
"ஷரண்",
"ஷலின்",
"ஷஷாங்க்",
"ஷஸ்வத்",
"ஷா",
"ஷானவாஸ்",
"ஷிங்",
"ஷியாம்",
"ஷிஷிர்",
"ஷைலேந்திரா",
"ஷைலேஷ்",
"ஷைல்",
"ஷோபன்",
"ஷ்னேகல்",
"ஷ்யாமல்",
"ஷ்யாம்",
"ஷ்ராவண்",
"ஷ்வேதங்க்",
"ஸ்டீபன்",
"ஸ்ரீகாந்த்",
"ஸ்ரீசிவநாராயணன்",
"ஸ்ரீதர்",
"ஸ்ரீநிவாசன்",
"ஸ்ரீநிவாஸ்",
"ஸ்ரீபிரசாத்",
"ஸ்ரீராம்",
"ஸ்வப்நில்",
"ஹம்ரிஷ்",
"ஹரி",
"ஹரிகரண்",
"ஹரிதாஸ்",
"ஹரிஷ்",
"ஹரிஹரன்",
"ஹவினாஷன்",
"ஹஷ்விந்ரன்",
"ஹாருண்",
)
first_names_female = (
"அகத்தழகி",
"அகமணி",
"அகமதி",
"அகலிகை",
"அகல்யா",
"அகல்விழி",
"அகவழகு",
"அகவொளி",
"அகானா",
"அகிலா",
"அகிலாண்டம்",
"அகிலேஷ்வரி",
"அகில்",
"அக்னிகா",
"அக்னிமுகி",
"அக்னேயி",
"அக்ஷயா",
"அக்ஷரா",
"அக்ஷா",
"அக்ஷிதா",
"அங்கம்மாள்",
"அங்கயர்க்கண்ணி",
"அங்கவை",
"அங்கால",
"அங்கையர்க்கரசி",
"அசிரா",
"அச்சலா",
"அஜந்தா",
"ஆகமா",
"ஆசிரா",
"ஆசைச்செல்வி",
"ஆஞ்சல்",
"ஆடற்கொடி",
"ஆடற்கோமகள்",
"ஆடற்செல்வி",
"ஆடலரசி",
"ஆடலழகி",
"ஆடல்",
"ஆட்டநத்தி",
"ஆண்டாள்",
"ஆதர்ஷா",
"ஆதி",
"ஆதிசக்தி",
"ஆதித்தமிழ்",
"ஆதித்தா",
"ஆதித்தி",
"ஆதித்யபிரபா",
"ஆதிமகள்",
"ஆதிமறை",
"ஆதிமொழி",
"ஆதியரசி",
"இக்ஷிதா",
"இசை",
"இசைக்கதிர்",
"இசைக்கொடி",
"இசைக்கோமகள்",
"இசைச்செல்வம்",
"இசைச்செல்வி",
"இசைத்தேவி",
"இசைநேயம்",
"இசைமகள்",
"இசைமறை",
"இசைமுரசு",
"இசைமொழி",
"இசையமுதம்",
"இசையமுது",
"இசையரசி",
"இசையொளி",
"இசைவாணி",
"இதயா",
"இந்திரஜா",
"இந்திரா",
"இந்திராக்ஷி",
"இந்திராணி",
"இந்து",
"இந்துகலா",
"இந்துகா",
"இந்துஜா",
"இந்துமதி",
"இந்துமுகி",
"இந்துவதனி",
"உச்சிதா",
"உஜிலா",
"உண்மை",
"உண்மையொளி",
"உண்மைவிளம்பி",
"உதயசந்திரிகா",
"உதயா",
"உதயாதி",
"உத்தமி",
"உத்பலா",
"உன்னதி",
"உன்மைமொழி",
"உபாஸனா",
"உமயாள்",
"உமா",
"உமாமகேஷ்வரி",
"உமை",
"உமையம்மை",
"உமையரசி",
"உமையாள்",
"உயிரோவியம்",
"உலக",
"உலகநங்கை",
"உலகநேயம்",
"உலகமணி",
"உலகமதி",
"உலகம்மை",
"உலகிறை",
"உலகொளி",
"உலகோவியம்",
"ஊர்மிளா",
"ஊர்வசி",
"எமலி",
"எமல்டா",
"எமில்டா",
"எயினி",
"எரிதழல்",
"எரியீட்டி",
"எல்லி",
"எழிசை",
"எழினி",
"எழிற்கதிர்",
"எழிற்குமரி",
"எழிற்குவளை",
"எழிற்கோமகள்",
"எழிற்செல்வம்",
"எழிற்செல்வி",
"எழிலம்மை",
"எழிலரசி",
"எழிலழகி",
"எழிலி",
"எழிலிசை",
"எழிலேந்தி",
"எழிலோவியம்",
"எழில்",
"எழில்நிலவு",
"எழில்மகள்",
"எழில்மங்கை",
"ஏகாபரனா",
"ஏந்திசை",
"ஏந்திழை",
"ஏனாக்ஷி",
"ஏறுநடை",
"ஏலா",
"ஏழிசை",
"ஏழிசைக்கதிர்",
"ஏழிசைக்கனல்",
"ஏழிசைக்கனி",
"ஏழிசைக்கலை",
"ஏழிசைக்குமரி",
"ஏழிசைக்கொடி",
"ஏழிசைக்கோமகள்",
"ஏழிசைச்சுடர்",
"ஏழிசைச்செல்வம்",
"ஏழிசைச்செல்வி",
"ஏழிசைதேவி",
"ஏழிசைத்தென்றல்",
"ஏழிசைநாயகி",
"ஏழிசைநேயம்",
"ஏழிசைப்பாமகள்",
"ஏழிசைப்பாவை",
"ஏழிசைப்புதல்வி",
"ஏழிசைப்பொழில்",
"ஏழிசைமணி",
"ஏழிசைமதி",
"ஏழிசைமுரசு",
"ஐக்கியா",
"ஐராவதி",
"ஐஸ்வர்யா",
"ஒளவை",
"ஒளிசுடர",
"ஒளிமுகம்",
"ஒளிவாணி",
"கஐலட்சுமி",
"கங்கா",
"கங்கை",
"கஜோல்",
"கஜ்ரி",
"கடற்கோமகள்",
"கடலரசி",
"கடலிறை",
"கணையாழி",
"கண்ணகி",
"கண்ணம்மா",
"கண்ணிமை",
"கண்மணி",
"கண்மதி",
"கண்மலர்",
"கதிரழகி",
"கதிர்",
"கதிர்க்குமரி",
"கதிர்ச்செல்வி",
"கதிர்மாமணி",
"கத்ரினா",
"கனகவள்ளி",
"கனகா",
"கனல்",
"கனல்மொழி",
"கனிகா",
"கனிமதி",
"கனிமொழி",
"கனியமுது",
"கனிரா",
"சஃபா",
"சஃபியா",
"சகீனா",
"சகுண்",
"சக்தி",
"சங்கமித்ரா",
"சங்கமித்ரை",
"சங்கரி",
"சங்கவி",
"சங்கவை",
"சங்காரம்",
"சங்கீதா",
"சங்கு",
"சங்குக்கொடி",
"சங்குப்பூ",
"சங்குப்பூவழகி",
"சங்குமணி",
"சங்குமதி",
"சங்குமாலை",
"சங்கெழில்",
"சங்கொலி",
"சசிகலா",
"சசிரேகா",
"சச்சி",
"சஜனி",
"சஞ்சு",
"சதிகா",
"சத்தியவாணி",
"சந்தனம்",
"சந்தானலட்சுமி",
"தங்கக்கதிர்",
"தங்கச்சுடர்",
"தங்கத்தமிழ்",
"தங்கபாப்பா",
"தங்கபுஷ்பம்",
"தங்கமகள்",
"தங்கமணி",
"தங்கமதி",
"தங்கம்",
"தங்கம்மா",
"தங்கம்மாள்",
"தடாகை",
"தணிகைச்செல்வி",
"தண்ணிலவு",
"தண்ணொளி",
"தண்மதி",
"தத்தை",
"தனக்கோட்டி",
"தனபாக்கியம்",
"தனலட்சுமி",
"தனஸ்ரீ",
"தனித்தமிழ்",
"தனுப்பிரியா",
"தனுஷா",
"தனுஷ்கா",
"தனுஷ்ரி",
"தன்சி",
"தன்மானம்",
"தன்வி",
"தமயந்தி",
"நங்கை",
"நடவரசி",
"நதியா",
"நந்திகா",
"நந்திதா",
"நந்தினி",
"நன்முத்து",
"நன்மொழி",
"நப்பசலையார்",
"நயன்தாரா",
"நர்மதா",
"நறுமலர்",
"நறுமுகை",
"நற்றிணை",
"நல்ல",
"நல்லிசை",
"நளாயினி",
"நளினி",
"நவிதா",
"நவீனா",
"நவ்யா",
"நாகதேவி",
"நாகமணி",
"நாகமதி",
"நாகம்மாள்",
"நாகம்மை",
"நாகவல்லி",
"நாச்சி",
"நாச்சியார்",
"நாதவேணி",
"பகவதி",
"பகவத்",
"பச்சையம்மாள்",
"பஞ்சாமிர்தம்",
"பதுமை",
"பத்மபிரியா",
"பத்மா",
"பத்மினி",
"பனிமலர்",
"பன்னீர்",
"பன்னீர்செல்வி",
"பபிதா",
"பரணி",
"பரமேஷ்வரி",
"பரிமளம்",
"பரிமளா",
"பல்லவி",
"பழகுத்தமிழ்",
"பவதா",
"பவதாரணி",
"பவளக்கொடி",
"பவளமலை",
"பவளமல்லி",
"பவளம்",
"பவழமொழி",
"பவானி",
"பவித்ரா",
"பாக்கியலக்ஷ்மி",
"பாக்யஸ்ரீ",
"மகஷேவரி",
"மகிழினி",
"மகிழ்",
"மகிழ்வதனி",
"மங்களம்",
"மங்களா",
"மங்கை",
"மங்கையர்க்கரசி",
"மஞ்சனா",
"மஞ்சரி",
"மஞ்சள்",
"மஞ்சு",
"மணவழகி",
"மணி",
"மணிகா",
"மணிக்கதிர்",
"மணிக்கொடி",
"மணிச்சுடர்",
"மணிப்பவளம்",
"மணிமகள்",
"மணிமலர்",
"மணிமாலா",
"மணிமுகில்",
"மணிமேகலை",
"மணிமொழி",
"மணியம்மை",
"மணியரசி",
"மணியெழில்",
"மணியொளி",
"யசோதா",
"யமுனா",
"யஷ்வினி",
"யாமினி",
"யாளினி",
"யாழரசி",
"யாழிசை",
"யாழினி",
"யாழின்",
"யாழைப்போல்",
"யாழ்நங்கை",
"யாழ்மொழி",
"யூதிகா",
"யூவரானி",
"யேகம்மை",
"யோகமலர்",
"யோகராணி",
"யோகலட்சுமி",
"யோகவல்லி",
"யோஸ்னிதா",
"யோஹிதா",
"ரகசியா",
"ரக்ஷனா",
"ரக்ஷிகா",
"ரக்ஷிதா",
"ரக்ஷினி",
"ரங்கநாயகி",
"ரஞ்சனா",
"ரஞ்சிதம்",
"ரஞ்சிதா",
"ரஞ்சினி",
"ரட்சகா",
"ரதவனி",
"ரதி",
"ரனித்தா",
"ரமணி",
"ரம்ஜான்",
"ரம்யா",
"ராகினி",
"ராசாத்தி",
"ராஜகுமாரி",
"ராஜலட்சுமி",
"ராஜி",
"ராஜேஷ்வரி",
"ராணி",
"ராதா",
"ராதிகா",
"ரித்திகா",
"ரீஜா",
"ரீட்டா",
"ரீனா",
"வகேஷ்வரி",
"வசந்தசேனா",
"வசந்தா",
"வசந்தி",
"வசனா",
"வசுதா",
"வசுதாரிணி",
"வசுமதி",
"வஞ்சி",
"வஞ்சிக்கொடி",
"வஞ்சிப்பாமகள்",
"வஞ்சிமகள்",
"வடிவரசி",
"வடிவழகி",
"வடிவு",
"வடிவுக்கரசி",
"வண்டமிழ்",
"வண்டார்குழலி",
"வண்ணக்கதிர்",
"வண்ணமதி",
"வதனா",
"வதனி",
"வத்ஸலா",
"வனிதா",
"வமகேஷி",
"வருணி",
"வருனிதா",
"வர்ணவதி",
"வர்ஷா",
"வர்ஷினி",
"ஷக்தி",
"ஷண்சிலாதேவி",
"ஷதா",
"ஷதாக்ஷி",
"ஷந்தோஷி",
"ஷந்ஸா",
"ஷபரி",
"ஷப்னம்",
"ஷமா",
"ஷரணி",
"ஷரினி",
"ஷர்மிதா",
"ஷர்மிளா",
"ஷர்மிஸ்தா",
"ஷர்வானி",
"ஷஷி",
"ஷாந்தலா",
"ஷாந்தி",
"ஷானன்",
"ஷாமினி",
"ஷாரன்",
"ஷாலிகா",
"ஷாலினி",
"ஷாலு",
"ஷாஷினி,",
"ஷாஹ்னா",
"ஷிஃபாலி",
"ஷிகா",
"ஷிச்சி",
"ஷிபானி",
"ஸகஸ்ரா",
"ஸங்கரி",
"ஸத்யா",
"ஸத்வரி",
"ஸன்யுக்தா",
"ஸபீனா",
"ஸயூரி",
"ஸரயூ",
"ஸரளா",
"ஸரஸ்வதி",
"ஸரிகா",
"ஸஹிரா",
"ஸுபத்திரை",
"ஸுப்ரியா",
"ஸுப்ரீத்",
"ஸுமா",
"ஸுரபி",
"ஸெடெஃபானியா",
"ஸெடெபானி",
"ஸௌரா",
"ஸ்கந்தா",
"ஸ்திரிரத்னா",
"ஸ்துதி",
"ஸ்னேஹல்",
"ஸ்ப்ரிஹா",
"ஸ்மிதா",
"ஸ்மிருதி",
"ஸ்மேரா",
"ஸ்ராவந்தி",
"ஸ்ராவனி",
"ஸ்ரீகமா",
"ஸ்ரீகலா",
"ஸ்ரீகா",
"ஸ்ரீதேவி",
"ஸ்ரீநிதி",
"ஸ்ரீனா",
"ஸ்ரீமயி",
"ஸ்ரீமா",
"ஸ்ரீயா",
"ஸ்ரீயாதித்யா",
"ஸ்ரீலக்ஷ்மி",
"ஸ்ரீலா",
"ஸ்ரீலேகா",
"ஸ்ரீவல்லி",
"ஸ்ரீவித்யா",
"ஹசினிகா",
"ஹனிஷா",
"ஹன்சா",
"ஹன்யா",
"ஹன்ஷிகா",
"ஹம்சவர்த்தினி",
"ஹம்சவானி",
"ஹம்சா",
"ஹரிதா",
"ஹரினி",
"ஹரினிவேதா",
"ஹர்ஷா",
"ஹர்ஷிகா",
"ஹர்ஷிதா",
"ஹர்ஷினி",
"ஹலிமா",
"ஹவிஷ்மதி",
"ஹஸிதா",
"ஹஸினா",
"ஹஸ்னா",
"ஹாசினி",
"ஹிரண்யா",
"ஹெலன்",
"ஹேமந்தினி",
"ஹேமலதா",
"ஹேமா",
)
first_names = first_names_male + first_names_female
last_names = first_names | PypiClean |
/GNotifier-0.5.tar.gz/GNotifier-0.5/notifier/__init__.py | from restkit import Resource
from simplejson import loads, dumps
import urllib2
import socket
def to_str(value):
if isinstance(value, str):
return value.decode('utf-8', 'ignore')
return value.encode('utf-8', 'ignore')
def notify(title, message='', url='http://localhost:5222', path='/', recipients=[]):
"""
- **title**: message title (required).
- **url**: the url of the GNotifier server.
- **path**: allowed paths are:
* ``/`` send message on gtalk and gmail
* ``/gmail`` send message on gmail
* ``/gtalk`` send message on gtalk
"""
try:
res = Resource(url)
data = dict(title=title, message=message, recipients=recipients or None)
page = res.post(path=path, payload=dumps(data), headers={'Content-Type': 'application/json'})
return loads(page.body)
except Exception, e:
return dict(status=1, notify_error=str(e))
def quick_notify(title, message='', url='http://localhost:5222', path='/', recipients=[], timeout=0.5):
"""
Same as :func:`notifier.notify` but with a timeout
"""
opener = urllib2.build_opener()
opener.addheaders = [('Content-Type', 'application/json')]
data = dict(title=title, message=message, recipients=recipients or None)
socket.setdefaulttimeout(timeout)
try:
data = opener.open('%s%s' % (url, path), dumps(data)).read()
except Exception, e:
data = dict(status=1, quick_notify_error=str(e))
else:
data = loads(data)
socket.setdefaulttimeout(None)
return data
def _main(args=[]):
from optparse import OptionParser
import logging as log
import time
import sys
cook = dict(
hard_eggs=(10, 0),
boiled_eggs=(3, 15),
)
cook_str = ', '.join(['%s (%s)' % (k, '%sm%ss' % v) for k, v in sorted(cook.items())])
parser = OptionParser()
parser.usage = '%prog [options] message'
parser.add_option("-u", "--url", dest="url",
default='http://localhost:5222',
help="HTTP Host")
parser.add_option("-r", "--recipient", dest="recipients",
action="append", default=[],
help="Recipients. You can have more than one -r")
parser.add_option("-q", "--quick", dest="quick",
action="store_true", default=False,
help="Use quick notifier")
parser.add_option("-i", "--im", dest="im",
action="store_true", default=False,
help="Only send on IM")
parser.add_option("-m", "--minutes", dest="minutes",
type="int", default="0",
help="Wait X minutes before sending")
parser.add_option("-s", "--seconds", dest="seconds",
type="int", default="0",
help="Wait X seconds before sending")
parser.add_option("-c", "--cook", dest="cook", metavar="INGREDIENT",
help="Send notification for cooking. Valid ingredients are: %s" % cook_str)
parser.add_option("-v", "--verbose", dest="verbose",
action="count", default=0,
help="More output")
if args:
options, args = parser.parse_args(args)
else:
options, args = parser.parse_args()
log.basicConfig(
stream=sys.stdout,
level=options.verbose and log.DEBUG or log.WARN,
format='%(levelname)s %(message)s'
)
minutes = options.minutes
seconds = options.seconds
if options.cook:
if minutes or seconds:
parser.error("You can use minutes/seconds with the cook option")
if options.cook in cook:
minutes, seconds = cook.get(options.cook)
args = ['Your %s are ready' % options.cook.replace('_', ' ')]
options.im = True
else:
parser.error('%s is not in the cook list' % options.cook)
if not args:
parser.error('You must specify a message')
if minutes or seconds:
log.debug('Waiting %sm %ss before sending...', minutes, seconds)
time.sleep((minutes * 60) + seconds)
if options.quick:
meth = quick_notify
else:
meth = notify
log.debug('Sending notification...')
results = meth(to_str(' '.join(args)),
url=options.url.strip('/'),
path=options.im and '/gtalk' or '/',
recipients=options.recipients,
)
if results.get('status') == 0:
results['recipients'] = ', '.join(results.get('recipients', []))
log.warn('Request sent to %(recipients)s in %(time)ss', results)
else:
log.error('Request failure\n\t%r', results)
def main():
try:
_main()
except KeyboardInterrupt:
pass | PypiClean |
/DTMC/spatialModel/Hub/HubSEIRSD.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .HubSEIRD import HubSEIRD
class HubSEIRSD(HubSEIRD):
"""
Object that represents the Hub Model with compartments S, E, I, R, and D. In this model, E is assumed to not be
able to spread the virus.
Parameters
----------
S0: int
Initial amount of susceptibles at the start of the simulation.
E0: int
Initial amount of exposed at the start of the simulation.
I0: int
Initial amount of infected at the start of the simulation.
R0: int
Initial amount of recovered at the start of the simulation.
pss: float
The probability that the randomly generated person at the start of the simulation is a super spreader.
rho: float
Rho is the probability of someone moving from E to I compartment. Rho is in [0, 1].
gamma: float
The probability of someone going from I to R.
kappa: float
The probability of someone going from R to S.
mu: float
The probability of going from I->D, given that they didn't go from I -> R.
rstart: float
The spreading radius of a normal spreader.
days: int
The nubmer of days being simulated.
w0: float optional
The probability of a susceptible getting infected if the distance between the infectious person and susceptible is 0. Default is 1.0.
hubConstant: float optional
The scale by which the spreading radius of a super spreader increases. Default is sqrt(6).
alpha: float optional
Constant used in the infect probability generator. Default is 2.0.
Attributes
----------
S: ndarray
A numpy array that stores the number of people in the susceptible state on each given day of the simulation.
E: ndarray
A numpy array that stores the number of people in the exposed state on each given day of the simulation.
I: ndarray
A numpy array that stores the number of people in the infected state on each given day of the simulation.
R: ndarray
A numpy array that stores the number of people in the recovered state on each given day of the simulation.
D: ndarray
A numpy array that stores the number of people in the death state on each given day of the simulation.
popsize: int
The total size of the population in the simulation. Given by S0 + E0 + I0 + R0.
Scollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is SUSCEPTIBLE. Has a total of popsize Person objects,
with numbers [0, popsize).
Ecollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is EXPOSED. Has a total of popsize Person objects,
with numbers [0, popsize).
Icollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is INFECTED. Has a total of popsize Person objects,
with numbers [0, popsize).
Rcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is RECOVERED. Has a total of popsize Person objects,
with numbers [0, popsize).
Dcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is DEAD. Has a total of popsize Person objects,
with numbers [0, popsize).
details: Simul_Details
An object that can be returned to give a more in-depth look into the simulation. With this object,
one can see transmission chains, state changes, the movement history of each individaul, the state
history of each person, and more.
"""
def __init__(self, S0: int, E0: int, I0: int, R0: int, pss: float, rho: float,
gamma: float, kappa: float, mu: float, side: float, rstart:float, days: int, w0=1.0, hubConstant=6**0.5, alpha=2.0):
#error checking
self.intCheck([S0, E0, I0, R0, days])
self.floatCheck([pss, rho, gamma, kappa, mu, side, rstart, w0, alpha, hubConstant])
self.negValCheck([S0, E0, I0, R0, pss, rho, gamma, kappa, mu, side, rstart, days, w0, hubConstant, alpha])
self.probValCheck([pss, rho, gamma, kappa, mu, w0])
super().__init__(S0=S0, E0=E0, I0=I0, R0=R0, pss=pss, rho=rho, gamma=gamma, mu=mu,side=side, rstart=rstart, alpha=alpha,
days=days, hubConstant=hubConstant)
self.kappa = kappa
def _RtoS(self):
return self._changeHelp(self.Rcollect, self.kappa)
def run(self, getDetails=True):
for i in range(1, self.days + 1):
#print("Day: ", i)
# run the transfers from different compartments
transferSE = self._StoE(i)
transferEI = self._EtoI()
transferIR = self._ItoR()
# put it after I->R state change bc conditional probability
transferID = self._ItoD()
transferRS = self._RtoS()
# go after and change the indices in the collection data structure thing
for index in transferSE:
self.Ecollect[index].isIncluded = True
self.details.addStateChange(index, "E", i)
for index in transferEI:
self.Icollect[index].isIncluded = True
self.details.addStateChange(index, "I", i)
for index in transferIR:
self.Rcollect[index].isIncluded = True
self.details.addStateChange(index, "R", i)
self._stateChanger(transferID, self.Dcollect, 'D', i)
self._stateChanger(transferRS, self.Scollect, "S", i)
# change the number of people in each state on the day i by adjusting the previous day's count
self.S[i] = self.S[i - 1] - len(transferSE) + len(transferRS)
self.E[i] = self.E[i-1] +len(transferSE) - len(transferEI)
self.I[i] = self.I[i - 1] + len(transferEI) - len(transferIR) - len(transferID)
self.R[i] = self.R[i-1] + len(transferIR) - len(transferRS)
self.D[i] = self.D[i-1] + len(transferID)
if getDetails:
return self.details
def plot(self):
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("# Susceptibles")
ax1.set_title("Hub Model SEIRSD Simulation")
ax2.plot(t, self.E, label="Exposed", color='g')
ax2.set_ylabel("# Exposed")
ax3.plot(t, self.I, label="Active Cases", color='b')
ax3.set_ylabel("# Active Infections")
ax5.set_xlabel("Days")
ax4.set_ylabel("# Recovered")
ax4.plot(t, self.R, label="Recovered")
ax5.plot(t, self.D, label="Dead")
ax5.set_ylabel("# Dead")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
ax5.legend()
plt.show() | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/survey.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def survey(path):
"""Student Survey Data
This data frame contains the responses of 237 Statistics I students at
the University of Adelaide to a number of questions.
The components of the data frame are:
`Sex`
The sex of the student. (Factor with levels `"Male"` and
`"Female"`.)
`Wr.Hnd`
span (distance from tip of thumb to tip of little finger of spread
hand) of writing hand, in centimetres.
`NW.Hnd`
span of non-writing hand.
`W.Hnd`
writing hand of student. (Factor, with levels `"Left"` and
`"Right"`.)
`Fold`
“Fold your arms! Which is on top” (Factor, with levels `"R on L"`,
`"L on R"`, `"Neither"`.)
`Pulse`
pulse rate of student (beats per minute).
`Clap`
‘Clap your hands! Which hand is on top?’ (Factor, with levels
`"Right"`, `"Left"`, `"Neither"`.)
`Exer`
how often the student exercises. (Factor, with levels `"Freq"`
(frequently), `"Some"`, `"None"`.)
`Smoke`
how much the student smokes. (Factor, levels `"Heavy"`,
`"Regul"` (regularly), `"Occas"` (occasionally), `"Never"`.)
`Height`
height of the student in centimetres.
`M.I`
whether the student expressed height in imperial (feet/inches) or
metric (centimetres/metres) units. (Factor, levels `"Metric"`,
`"Imperial"`.)
`Age`
age of the student in years.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `survey.csv`.
Returns:
Tuple of np.ndarray `x_train` with 237 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'survey.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/survey.csv'
maybe_download_and_extract(path, url,
save_file_name='survey.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/DuHast-1.0.7-py3-none-any.whl/duHast/APISamples/RevitFamilyReloadAdvancedUtils.py |
from collections import namedtuple
from timer import Timer
import Result as res
import Utility as util
import RevitFamilyBaseDataUtils as rFamBaseDataUtils
# tuples containing base family data and changed family data read from files
changedFamily = namedtuple('changedFamily', 'name category filePath')
#baseFamily = namedtuple('baseFamily', 'name category rootPath filePath')
# row structure of family change data file
_CHANGE_LIST_INDEX_FAMILY_NAME = 0
_CHANGE_LIST_INDEX_FAMILY_FILE_PATH = 1
_CHANGE_LIST_INDEX_CATEGORY = 2
_TASK_COUNTER_FILE_PREFIX = 'TaskOutput'
def WriteReloadListToFile(reloadFamilies, directoryPath, counter = 0):
'''
Writes task list file to disk. File contains single column of fully qualified file path.
:param reloadFamilies: List of tuples representing families requiring their nested families to be re-loaded.
:type reloadFamilies: [baseFamily]
:param directoryPath: Fully qualified directory path to which the task files will be written.
:type directoryPath: str
:param counter: Task file name suffix, defaults to 0
:type counter: int, optional
:return: True if file was written successfully, otherwise False.
:rtype: bool
'''
# write out file list without header
header = []
# data to be written to file
overallData = []
fileName = directoryPath + '\\' + _TASK_COUNTER_FILE_PREFIX + str(counter)+ ".txt"
# loop over families to get file path
for r in reloadFamilies:
# row data
data = []
data.append(r.filePath)
overallData.append(data)
try:
# write data
util.writeReportData(fileName, header, overallData, writeType = 'w')
return True
except Exception:
return False
def DeleteOldTaskLists(directoryPath):
'''
Deletes all overall task files in given directory.
:param directoryPath: Fully qualified directory path containing the task files to be deleted.
:type directoryPath: str
:return: True if all files got deleted successfully, otherwise False.
:rtype: bool
'''
flag = True
# find all files in folder starting with and delete them
files = util.GetFiles(directoryPath, '.txt')
if (len(files) > 0):
for f in files:
if (util.GetFileNameWithoutExt(f).startswith(_TASK_COUNTER_FILE_PREFIX)):
flag = flag & util.FileDelete(f)
return flag
def WriteOutEmptyTaskList(directoryPath, counter = 0):
'''
Writes out an empty task list in case nothing is to be reloaded.
:param directoryPath: Fully qualified directory path to which the task files will be written.
:type directoryPath: str
:param counter: Task file name suffix, defaults to 0
:type counter: int, optional
:return: True if file was written successfully, otherwise False.
:rtype: bool
'''
fileName = directoryPath + '\\' + 'TaskOutput' + str(counter)+ ".txt"
# write out file list without header
header = []
# write out empty data
overallData = []
try:
# write data
util.writeReportData(fileName, header, overallData, writeType = 'w')
return True
except Exception:
return False
def _RemoveRFAFromFileName(familyName):
'''
Removes any .rfa file extensions from the family name. (not sure why these are sometimes present)
:param familyName: the family name
:type familyName: str
:return: the family name with out .rfa (if present in the first place.)
:rtype: str
'''
if(familyName.lower().endswith('.rfa')):
familyName = familyName[:-len('.rfa')]
return familyName
def ReadChangeList(filePath):
'''
Reads list of changed families from file into named tuples.
:param filePath: Fully qualified file path to change list file.
:type filePath: str
:raises Exception: "Changed families list files does not exist."
:raises Exception: "Empty families list file!"
:return: list of named tuples
:rtype: [changedFamily]
'''
rows = []
if(util.FileExist(filePath)):
rows = util.ReadCSVfile(filePath)
else:
raise Exception("Changed families list files does not exist.")
if(len(rows) > 0):
pass
else:
raise Exception("Empty families list file!")
returnValue = []
# skip header row
for i in range(1, len(rows)):
#TODO: do i need any .rfa from end of family name?
famName = _RemoveRFAFromFileName(rows[i][_CHANGE_LIST_INDEX_FAMILY_NAME])
data = changedFamily(
famName,
rows[i][_CHANGE_LIST_INDEX_CATEGORY],
rows[i][_CHANGE_LIST_INDEX_FAMILY_FILE_PATH]
)
returnValue.append(data)
return returnValue | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/README.md | CKEditor 4
==========
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
http://ckeditor.com - See LICENSE.md for license information.
CKEditor is a text editor to be used inside web pages. It's not a replacement
for desktop text editors like Word or OpenOffice, but a component to be used as
part of web applications and websites.
## Documentation
The full editor documentation is available online at the following address:
http://docs.ckeditor.com
## Installation
Installing CKEditor is an easy task. Just follow these simple steps:
1. **Download** the latest version from the CKEditor website:
http://ckeditor.com. You should have already completed this step, but be
sure you have the very latest version.
2. **Extract** (decompress) the downloaded file into the root of your website.
**Note:** CKEditor is by default installed in the `ckeditor` folder. You can
place the files in whichever you want though.
## Checking Your Installation
The editor comes with a few sample pages that can be used to verify that
installation proceeded properly. Take a look at the `samples` directory.
To test your installation, just call the following page at your website:
http://<your site>/<CKEditor installation path>/samples/index.html
For example:
http://www.example.com/ckeditor/samples/index.html
| PypiClean |
/GLManager-1.1.6.tar.gz/GLManager-1.1.6/bot/dealer/respository.py | from typing import List
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey, func
DB_URI = 'sqlite:///db.sqlite3'
BASE = declarative_base()
class DBConnection:
def __init__(self, uri: str = DB_URI):
self.__uri = uri
self.__engine = create_engine(self.__uri)
self.__session = None
BASE.metadata.create_all(self.__engine)
@property
def uri(self) -> str:
return self.__uri
@property
def engine(self):
return self.__engine
@property
def session(self) -> sessionmaker:
return self.__session
def __enter__(self) -> 'DBConnection':
self.__session = sessionmaker()(bind=self.engine)
return self.__session
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.session.close()
class Model(BASE):
__abstract__ = True
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(DateTime, nullable=False, default=func.now(), onupdate=func.now())
class Dealer(Model):
__tablename__ = 'dealers'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
username = Column(String, nullable=False)
account_creation_limit = Column(Integer, nullable=False, default=0)
expires_at = Column(DateTime, nullable=False, default=func.now())
active = Column(Boolean, nullable=False, default=True)
def to_dict(self) -> dict:
return {
'id': self.id,
'name': self.name,
'username': self.username,
'account_creation_limit': self.account_creation_limit,
'expires_at': self.expires_at.strftime('%d/%m/%Y'),
'active': self.active,
}
class Account(Model):
__tablename__ = 'accounts'
id = Column(Integer, primary_key=True)
dealer_id = Column(Integer, ForeignKey('dealers.id'), nullable=False)
class DealerRepository:
def create(
self,
id: int,
name: str,
username: str,
account_creation_limit: int,
expires_at: str,
) -> Dealer:
with DBConnection() as session:
dealer = Dealer(
id=id,
name=name,
username=username,
account_creation_limit=account_creation_limit,
expires_at=expires_at,
)
session.add(dealer)
session.commit()
session.refresh(dealer)
return dealer
def get_by_id(self, id: int) -> Dealer:
with DBConnection() as session:
return session.query(Dealer).filter(Dealer.id == id).first()
def get_by_username(self, username: str) -> Dealer:
with DBConnection() as session:
return session.query(Dealer).filter(Dealer.username == username).first()
def get_all(self) -> list:
with DBConnection() as session:
return session.query(Dealer).all()
def update(self, dealer: Dealer) -> Dealer:
with DBConnection() as session:
session.merge(dealer)
session.commit()
return dealer
def delete(self, dealer: Dealer) -> Dealer:
with DBConnection() as session:
session.delete(dealer)
session.commit()
return dealer
class AccountRepository:
def create(self, account_id: int, dealer_id: int) -> Account:
with DBConnection() as session:
account = Account(
id=account_id,
dealer_id=dealer_id,
)
session.add(account)
session.commit()
session.refresh(account)
return account
def get_by_id(self, dealer_id: int, id: int) -> Account:
with DBConnection() as session:
return (
session.query(Account)
.filter(Account.id == id, Account.dealer_id == dealer_id)
.first()
)
def get_by_dealer_id(self, dealer_id: int) -> List[Account]:
with DBConnection() as session:
return session.query(Account).filter(Account.dealer_id == dealer_id).all()
def get_all(self) -> list:
with DBConnection() as session:
return session.query(Account).all()
def update(self, account: Account) -> Account:
with DBConnection() as session:
session.merge(account)
session.commit()
return account
def delete(self, account: Account) -> Account:
with DBConnection() as session:
session.delete(account)
session.commit()
return account | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/config-forms.min.c5e2ea705623.js | (function(){window.Djblets=window.Djblets||{};Djblets.Config={};"use strict";Djblets.Config.ListItems=Backbone.Collection.extend({initialize(models,options){this.options=options},fetch(options){this.trigger("fetching");Backbone.Collection.prototype.fetch.call(this,options)}});"use strict";Djblets.Config.ListItem=Backbone.Model.extend({defaults:{text:null,editURL:null,showRemove:false,canRemove:true,loading:false,removeLabel:gettext("Remove"),itemState:null},itemStateTexts:{disabled:gettext("Disabled"),enabled:gettext("Enabled"),error:gettext("Error")},initialize(){let options=arguments.length>0&&arguments[0]!==undefined?arguments[0]:{};this.actions=options.actions||[];if(this.get("showRemove")){this.actions.push({id:"delete",label:this.get("removeLabel"),danger:true,enabled:this.get("canRemove")})}},setActions(actions){this.actions=actions;this.trigger("actionsChanged")}});Djblets.Config.List=Backbone.Model.extend();"use strict";Djblets.Config.ListItemView=Backbone.View.extend({tagName:"li",className:"djblets-c-config-forms-list__item",iconBaseClassName:"djblets-icon",itemStateClasses:{disabled:"-is-disabled",enabled:"-is-enabled",error:"-has-error"},actionHandlers:{},template:_.template(`<% if (editURL) { %>
<a href="<%- editURL %>"><%- text %></a>
<% } else { %>
<%- text %>
<% } %>`),initialize(){this.listenTo(this.model,"actionsChanged",this.render);this.listenTo(this.model,"request",this.showSpinner);this.listenTo(this.model,"sync",this.hideSpinner);this.listenTo(this.model,"destroy",this.remove);this.$spinnerParent=null;this.$spinner=null},render(){const model=this.model;this.$el.empty().append(this.template(_.defaults(model.attributes,this.getRenderContext())));this._$itemState=this.$(".djblets-c-config-forms-list__item-state");this.listenTo(model,"change:itemState",this._onItemStateChanged);this._onItemStateChanged();this.addActions(this.getActionsParent());return this},getRenderContext(){return{}},remove(){this.$el.fadeOut("normal",()=>Backbone.View.prototype.remove.call(this))},getActionsParent(){return this.$el},showSpinner(){if(this.$spinner){return}this.$el.attr("aria-busy","true");this.$spinner=$("<span>").addClass("djblets-o-spinner").attr("aria-hidden","true").prependTo(this.$spinnerParent).hide().css("visibility","visible").fadeIn()},hideSpinner(){if(!this.$spinner){return}this.$spinner.fadeOut("slow",()=>{this.$spinner.remove();this.$spinner=null});this.$el.removeAttr("aria-busy")},addActions($parentEl){const $actions=$("<span>").addClass("djblets-c-config-forms-list__item-actions");this.model.actions.forEach(action=>{const $action=this._buildActionEl(action).appendTo($actions);if(action.children){if(action.label){$action.append(" ▾")}$action.click(()=>_.defer(()=>this._showActionDropdown(action,$action)))}});this.$spinnerParent=$actions;$actions.prependTo($parentEl)},_showActionDropdown(action,$action){const actionPos=$action.position();const $menu=$("<div/>").css({minWidth:$action.outerWidth(),position:"absolute"}).addClass("djblets-c-config-forms-popup-menu").click(e=>e.stopPropagation());const $items=$("<ul/>").addClass("djblets-c-config-forms-popup-menu__items").appendTo($menu);const actionLeft=actionPos.left+$action.getExtents("m","l");action.children.forEach(childAction=>$("<li/>").addClass("djblets-c-config-forms-popup-menu__item "+`config-forms-list-action-row-${childAction.id}`).append(this._buildActionEl(childAction)).appendTo($items));this.trigger("actionMenuPopUp",{action:action,$action:$action,$menu:$menu});$menu.appendTo($action.parent());const winWidth=$(window).width();const paneWidth=$menu.width();$menu.move($action.offset().left+paneWidth>winWidth?actionLeft+$action.innerWidth()-paneWidth:actionLeft,actionPos.top+$action.outerHeight(),"absolute");$(document).one("click",()=>{this.trigger("actionMenuPopDown",{action:action,$action:$action,$menu:$menu});$menu.remove()})},_buildActionEl(action){const enabled=action.enabled!==false;const actionHandlerName=enabled?this.actionHandlers[action.id]:null;const isCheckbox=action.type==="checkbox";const isRadio=action.type==="radio";let $action;let $result;if(isCheckbox||isRadio){const inputID=_.uniqueId("action_"+action.type);$action=$("<input/>").attr({name:action.name,type:action.type,id:inputID});const $label=$("<label>").attr("for",inputID).text(action.label);if(action.id){$label.addClass(`config-forms-list-action-label-${action.id}`)}$result=$("<span/>").append($action).append($label);if(action.propName){if(isCheckbox){$action.bindProperty("checked",this.model,action.propName)}else if(isRadio){$action.bindProperty("checked",this.model,action.propName,{radioValue:action.radioValue})}}if(action.enabledPropName){$action.bindProperty("disabled",this.model,action.enabledPropName,{inverse:action.enabledPropInverse!==true})}if(actionHandlerName){const actionHandler=_.debounce(_.bind(this[actionHandlerName],this),50,true);$action.change(actionHandler);if(isRadio&&action.dispatchOnClick){$action.click(actionHandler)}}}else{if(action.url){$action=$('<a class="btn" role="button">').attr("href",action.url)}else{$action=$('<button type="button">')}$result=$action;if(action.label){$action.text(action.label)}if(action.ariaLabel){$action.attr("aria-label",action.ariaLabel)}if(action.iconName){$action.prepend($("<span>").addClass(this.iconBaseClassName).addClass(`${this.iconBaseClassName}-${action.iconName}`))}if(actionHandlerName){$action.click(evt=>{evt.preventDefault();evt.stopPropagation();this._onActionButtonClicked(evt,actionHandlerName,$action)})}}$action.addClass("djblets-c-config-forms-list__item-action");if(action.id){$action.addClass(`config-forms-list-action-${action.id}`)}if(action.danger){$action.addClass("-is-danger")}if(action.primary){$action.addClass("-is-primary")}if(!enabled){$action.prop("disabled",true)}return $result},_onItemStateChanged(){const model=this.model;const oldItemState=model.previous("itemState");const itemState=model.get("itemState");if(oldItemState){this.$el.removeClass(this.itemStateClasses[oldItemState])}if(itemState){this.$el.addClass(this.itemStateClasses[itemState]);this._$itemState.text(model.itemStateTexts[itemState])}},_onActionButtonClicked(evt,actionHandlerName,$action){const promise=this[actionHandlerName].call(this,evt);if(promise&&typeof promise.then==="function"){$action.prop("disabled",true);const childrenHTML=$action.html();$action.empty();const $spinner=$('<span class="djblets-o-spinner">').appendTo($action);promise.finally(()=>{$spinner.remove();$action.html(childrenHTML);$action.prop("disabled",false)})}}});"use strict";Djblets.Config.ListView=Backbone.View.extend({tagName:"ul",className:"djblets-c-config-forms-list",defaultItemView:Djblets.Config.ListItemView,initialize(){let options=arguments.length>0&&arguments[0]!==undefined?arguments[0]:{};const collection=this.model.collection;this.ItemView=options.ItemView||this.defaultItemView;this.views=[];this.animateItems=!!options.animateItems;this.once("rendered",()=>{this.listenTo(collection,"add",this._addItem);this.listenTo(collection,"remove",this._removeItem);this.listenTo(collection,"reset",this._renderItems)})},getBody(){return this.$el},render(){this.$listBody=this.getBody();this._renderItems();this.trigger("rendered");return this},_addItem(item,collection){let options=arguments.length>2&&arguments[2]!==undefined?arguments[2]:{};const animateItem=options.animate!==false;const view=new this.ItemView({model:item});view.render();if(this.animateItems&&animateItem){view.$el.fadeIn()}this.$listBody.append(view.$el);this.views.push(view)},_removeItem(item,collection){let options=arguments.length>2&&arguments[2]!==undefined?arguments[2]:{};const animateItem=options.animate!==false;const view=_.find(this.views,view=>view.model===item);if(view){this.views=_.without(this.views,view);if(this.animateItems&&animateItem){view.$el.fadeOut(function(){view.remove()})}else{view.remove()}}},_renderItems(){this.views.forEach(function(view){view.remove()});this.views=[];this.$listBody.empty();this.model.collection.each(item=>{this._addItem(item,item.collection,{animate:false})})}});"use strict";Djblets.Config.PagesView=Backbone.View.extend({initialize(){this.router=new Backbone.Router({routes:{":pageID":"page"}});this.listenTo(this.router,"route:page",this._onPageChanged);this._$activeNav=null;this._$activePage=null;this._preserveMessages=true},render(){this._$pageNavs=this.$(".djblets-c-config-forms-page-nav__item");this._$pages=this.$(".djblets-c-config-forms-subpage");this._$activeNav=this._$pageNavs.eq(0).addClass("-is-active");this._$activePage=this._$pages.eq(0).addClass("-is-active");Backbone.history.start({root:window.location.pathname});return this},_onPageChanged(pageID){this._$activeNav.removeClass("-is-active");this._$activePage.removeClass("-is-active");this._$activePage=$(`#page_${pageID}`);if(this._$activePage.length===0){this.router.navigate(this._$pageNavs.find("a").attr("href").substr(1),{trigger:true,replace:true})}else{this._$activeNav=this._$pageNavs.filter(`:has(a[href="#${pageID}"])`).addClass("-is-active");this._$activePage.addClass("-is-active");if(!this._preserveMessages){$("#messages").remove()}this._preserveMessages=false}}});"use strict";Djblets.Config.TableItemView=Djblets.Config.ListItemView.extend({tagName:"tr",template:_.template(`<td>
<% if (editURL) { %>
<a href="<%- editURL %>"><%- text %></a>
<% } else { %>
<%- text %>
<% } %>
</td>`),getActionsParent(){return this.$("td:last")}});"use strict";Djblets.Config.TableView=Djblets.Config.ListView.extend({tagName:"table",defaultItemView:Djblets.Config.TableItemView,render(){const $body=this.getBody();if($body.length===0){this.$el.append("<tbody>")}return Djblets.Config.ListView.prototype.render.call(this)},getBody(){return this.$("tbody")}})}).call(this); | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/minimist/index.js | module.exports = function (args, opts) {
if (!opts) opts = {};
var flags = { bools : {}, strings : {}, unknownFn: null };
if (typeof opts['unknown'] === 'function') {
flags.unknownFn = opts['unknown'];
}
if (typeof opts['boolean'] === 'boolean' && opts['boolean']) {
flags.allBools = true;
} else {
[].concat(opts['boolean']).filter(Boolean).forEach(function (key) {
flags.bools[key] = true;
});
}
var aliases = {};
Object.keys(opts.alias || {}).forEach(function (key) {
aliases[key] = [].concat(opts.alias[key]);
aliases[key].forEach(function (x) {
aliases[x] = [key].concat(aliases[key].filter(function (y) {
return x !== y;
}));
});
});
[].concat(opts.string).filter(Boolean).forEach(function (key) {
flags.strings[key] = true;
if (aliases[key]) {
flags.strings[aliases[key]] = true;
}
});
var defaults = opts['default'] || {};
var argv = { _ : [] };
Object.keys(flags.bools).forEach(function (key) {
setArg(key, defaults[key] === undefined ? false : defaults[key]);
});
var notFlags = [];
if (args.indexOf('--') !== -1) {
notFlags = args.slice(args.indexOf('--')+1);
args = args.slice(0, args.indexOf('--'));
}
function argDefined(key, arg) {
return (flags.allBools && /^--[^=]+$/.test(arg)) ||
flags.strings[key] || flags.bools[key] || aliases[key];
}
function setArg (key, val, arg) {
if (arg && flags.unknownFn && !argDefined(key, arg)) {
if (flags.unknownFn(arg) === false) return;
}
var value = !flags.strings[key] && isNumber(val)
? Number(val) : val
;
setKey(argv, key.split('.'), value);
(aliases[key] || []).forEach(function (x) {
setKey(argv, x.split('.'), value);
});
}
function setKey (obj, keys, value) {
var o = obj;
for (var i = 0; i < keys.length-1; i++) {
var key = keys[i];
if (isConstructorOrProto(o, key)) return;
if (o[key] === undefined) o[key] = {};
if (o[key] === Object.prototype || o[key] === Number.prototype
|| o[key] === String.prototype) o[key] = {};
if (o[key] === Array.prototype) o[key] = [];
o = o[key];
}
var key = keys[keys.length - 1];
if (isConstructorOrProto(o, key)) return;
if (o === Object.prototype || o === Number.prototype
|| o === String.prototype) o = {};
if (o === Array.prototype) o = [];
if (o[key] === undefined || flags.bools[key] || typeof o[key] === 'boolean') {
o[key] = value;
}
else if (Array.isArray(o[key])) {
o[key].push(value);
}
else {
o[key] = [ o[key], value ];
}
}
function aliasIsBoolean(key) {
return aliases[key].some(function (x) {
return flags.bools[x];
});
}
for (var i = 0; i < args.length; i++) {
var arg = args[i];
if (/^--.+=/.test(arg)) {
// Using [\s\S] instead of . because js doesn't support the
// 'dotall' regex modifier. See:
// http://stackoverflow.com/a/1068308/13216
var m = arg.match(/^--([^=]+)=([\s\S]*)$/);
var key = m[1];
var value = m[2];
if (flags.bools[key]) {
value = value !== 'false';
}
setArg(key, value, arg);
}
else if (/^--no-.+/.test(arg)) {
var key = arg.match(/^--no-(.+)/)[1];
setArg(key, false, arg);
}
else if (/^--.+/.test(arg)) {
var key = arg.match(/^--(.+)/)[1];
var next = args[i + 1];
if (next !== undefined && !/^-/.test(next)
&& !flags.bools[key]
&& !flags.allBools
&& (aliases[key] ? !aliasIsBoolean(key) : true)) {
setArg(key, next, arg);
i++;
}
else if (/^(true|false)$/.test(next)) {
setArg(key, next === 'true', arg);
i++;
}
else {
setArg(key, flags.strings[key] ? '' : true, arg);
}
}
else if (/^-[^-]+/.test(arg)) {
var letters = arg.slice(1,-1).split('');
var broken = false;
for (var j = 0; j < letters.length; j++) {
var next = arg.slice(j+2);
if (next === '-') {
setArg(letters[j], next, arg)
continue;
}
if (/[A-Za-z]/.test(letters[j]) && /=/.test(next)) {
setArg(letters[j], next.split('=')[1], arg);
broken = true;
break;
}
if (/[A-Za-z]/.test(letters[j])
&& /-?\d+(\.\d*)?(e-?\d+)?$/.test(next)) {
setArg(letters[j], next, arg);
broken = true;
break;
}
if (letters[j+1] && letters[j+1].match(/\W/)) {
setArg(letters[j], arg.slice(j+2), arg);
broken = true;
break;
}
else {
setArg(letters[j], flags.strings[letters[j]] ? '' : true, arg);
}
}
var key = arg.slice(-1)[0];
if (!broken && key !== '-') {
if (args[i+1] && !/^(-|--)[^-]/.test(args[i+1])
&& !flags.bools[key]
&& (aliases[key] ? !aliasIsBoolean(key) : true)) {
setArg(key, args[i+1], arg);
i++;
}
else if (args[i+1] && /^(true|false)$/.test(args[i+1])) {
setArg(key, args[i+1] === 'true', arg);
i++;
}
else {
setArg(key, flags.strings[key] ? '' : true, arg);
}
}
}
else {
if (!flags.unknownFn || flags.unknownFn(arg) !== false) {
argv._.push(
flags.strings['_'] || !isNumber(arg) ? arg : Number(arg)
);
}
if (opts.stopEarly) {
argv._.push.apply(argv._, args.slice(i + 1));
break;
}
}
}
Object.keys(defaults).forEach(function (key) {
if (!hasKey(argv, key.split('.'))) {
setKey(argv, key.split('.'), defaults[key]);
(aliases[key] || []).forEach(function (x) {
setKey(argv, x.split('.'), defaults[key]);
});
}
});
if (opts['--']) {
argv['--'] = new Array();
notFlags.forEach(function(key) {
argv['--'].push(key);
});
}
else {
notFlags.forEach(function(key) {
argv._.push(key);
});
}
return argv;
};
function hasKey (obj, keys) {
var o = obj;
keys.slice(0,-1).forEach(function (key) {
o = (o[key] || {});
});
var key = keys[keys.length - 1];
return key in o;
}
function isNumber (x) {
if (typeof x === 'number') return true;
if (/^0x[0-9a-f]+$/i.test(x)) return true;
return /^[-+]?(?:\d+(?:\.\d*)?|\.\d+)(e[-+]?\d+)?$/.test(x);
}
function isConstructorOrProto (obj, key) {
return key === 'constructor' && typeof obj[key] === 'function' || key === '__proto__';
} | PypiClean |
/BUPTNetLogin-0.1.0.tar.gz/BUPTNetLogin-0.1.0/app/bupt_net_login.py |
import argparse
import requests as requests
from lxml import etree
from app import version
from app.line_option import LineOption
class BUPTNetLogin:
def __init__(self):
self.ngw = "http://ngw.bupt.edu.cn"
self.ngw_login = self.ngw + "/login"
self.ngw_logout = self.ngw + "/logout"
def login(self, username, password, line):
params = {
"user": username,
"pass": password,
"line": line
}
r = requests.post(self.ngw_login, params=params)
self.parse_login_response(r)
@staticmethod
def parse_login_response(r):
root = etree.HTML(r.text)
errors = root.xpath("//div[contains(@class, 'error message')]")
if len(errors):
print("[错误]", end=" ")
for error in errors:
print(error.text.strip(), end="。")
for notices in root.xpath("/html/body/div[2]/div/div[2]/div/div[2]/div[1]/div[2]"):
for notice in notices:
print(notice.text.strip())
def logout(self):
r = requests.get(self.ngw_logout)
if r.status_code == 200:
print("成功注销北邮校园网网关")
def enter():
parser = argparse.ArgumentParser(description="北邮校园网网关登陆工具")
parser.add_argument("-l", "--login", dest="line", action="store",
choices={"xyw", "lt", "yd", "dx"},
help="登陆北邮校园网网关,LINE可用参数 xyw(校园网)、lt(联通)、yd(移动)、dx(电信)")
parser.add_argument("-u", "--username", dest="username", action="store", help="校园网账户名称")
parser.add_argument("-p", "--password", dest="password", action="store", help="校园网账户密码")
parser.add_argument("-lo", "--logout", dest="logout", action="store_true", help="注销北邮校园网网关")
parser.add_argument("-v", "--version", dest="version", action="store_true", help="版本信息")
args = parser.parse_args()
bnl = BUPTNetLogin()
if args.line:
line = LineOption[args.line.upper()].value
if args.username and args.password:
bnl.login(args.username, args.password, line)
elif args.logout:
bnl.logout()
elif args.version:
print(version)
else:
parser.print_help()
if __name__ == '__main__':
enter() | PypiClean |
/Messenger_client_dmitry_vokh-1.0.0.tar.gz/Messenger_client_dmitry_vokh-1.0.0/client/database.py | import datetime
from common.variables import *
from sqlalchemy import create_engine, Table, Column, Integer, String, Text, MetaData, DateTime
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.sql import default_comparator
import os
class ClientDatabase:
'''
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
'''
class KnownUsers:
'''
Класс - отображение для таблицы всех пользователей.
'''
def __init__(self, user):
self.id = None
self.username = user
class MessageStat:
'''
Класс - отображение для таблицы статистики переданных сообщений.
'''
def __init__(self, contact, direction, message):
self.id = None
self.contact = contact
self.direction = direction
self.message = message
self.date = datetime.datetime.now()
class Contacts:
'''
Класс - отображение для таблицы контактов.
'''
def __init__(self, contact):
self.id = None
self.name = contact
# Конструктор класса:
def __init__(self, name):
# Создаём движок базы данных, поскольку разрешено несколько
# клиентов одновременно, каждый должен иметь свою БД
# Поскольку клиент мультипоточный необходимо отключить
# проверки на подключения с разных потоков,
# иначе sqlite3.ProgrammingError
path = os.getcwd()
filename = f'client_{name}.db3'
self.database_engine = create_engine(
f'sqlite:///{os.path.join(path, filename)}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу известных пользователей
users = Table('known_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String)
)
# Создаём таблицу истории сообщений
history = Table('message_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('contact', String),
Column('direction', String),
Column('message', Text),
Column('date', DateTime)
)
# Создаём таблицу контактов
contacts = Table('contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.KnownUsers, users)
mapper(self.MessageStat, history)
mapper(self.Contacts, contacts)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Необходимо очистить таблицу контактов, т.к. при запуске они
# подгружаются с сервера.
self.session.query(self.Contacts).delete()
self.session.commit()
def add_contact(self, contact):
'''Метод добавляющий контакт в базу данных.'''
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
'''Метод очищающий таблицу со списком контактов.'''
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
'''Метод удаляющий определённый контакт.'''
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
'''Метод заполняющий таблицу известных пользователей.'''
self.session.query(self.KnownUsers).delete()
for user in users_list:
user_row = self.KnownUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
'''Метод сохраняющий сообщение в базе данных.'''
message_row = self.MessageStat(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
'''Метод возвращающий список всех контактов.'''
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
'''Метод возвращающий список всех известных пользователей.'''
return [user[0]
for user in self.session.query(self.KnownUsers.username).all()]
def check_user(self, user):
'''Метод проверяющий существует ли пользователь.'''
if self.session.query(
self.KnownUsers).filter_by(
username=user).count():
return True
else:
return False
def check_contact(self, contact):
'''Метод проверяющий существует ли контакт.'''
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
'''Метод возвращающий историю сообщений с определённым пользователем.'''
query = self.session.query(
self.MessageStat).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# отладка
if __name__ == '__main__':
test_db = ClientDatabase('test1')
# for i in ['test3', 'test4', 'test5']:
# test_db.add_contact(i)
# test_db.add_contact('test4')
# test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
# test_db.save_message('test2', 'in', f'Привет! я тестовое сообщение от {datetime.datetime.now()}!')
# test_db.save_message('test2', 'out', f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!')
# print(test_db.get_contacts())
# print(test_db.get_users())
# print(test_db.check_user('test1'))
# print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
# test_db.del_contact('test4')
# print(test_db.get_contacts()) | PypiClean |
/Fumagalli_Motta_Tarantino_2020-0.5.3.tar.gz/Fumagalli_Motta_Tarantino_2020-0.5.3/Fumagalli_Motta_Tarantino_2020/Notebooks/Interactive.ipynb | ```
%matplotlib notebook
%config InlineBackend.figure_format ='svg'
from ipywidgets import Layout, widgets
from IPython.display import clear_output, display
import Fumagalli_Motta_Tarantino_2020 as FMT20
def create_float_input(
value: float, description: str, min_value=0.0, max_value=1.0, step=0.01
) -> widgets.BoundedFloatText:
return widgets.BoundedFloatText(
value=value,
min=min_value,
max=max_value,
step=step,
description=description,
disabled=False,
)
def get_merger_policy(policy: str) -> FMT20.MergerPolicies:
if policy == "Strict":
return FMT20.MergerPolicies.Strict
if policy == "Intermediate (more lenient than strict)":
return FMT20.MergerPolicies.Intermediate_late_takeover_prohibited
if policy == "Intermediate (stricter than laissez-faire)":
return FMT20.MergerPolicies.Intermediate_late_takeover_allowed
return FMT20.MergerPolicies.Laissez_faire
plot_kwargs = {
"figure_title": "",
"thresholds": True,
"optimal_policy": True,
"y_offset": -35,
"fontsize": 9,
"parameter_number": 4,
}
model = widgets.Dropdown(
options=[
FMT20.NotebookUtilities.get_model_label(FMT20.OptimalMergerPolicy),
FMT20.NotebookUtilities.get_model_label(FMT20.ProCompetitive),
FMT20.NotebookUtilities.get_model_label(FMT20.ResourceWaste),
FMT20.NotebookUtilities.get_model_label(FMT20.PerfectInformation),
],
)
merger_policy = widgets.Dropdown(
options=[
FMT20.MergerPolicies.Strict,
FMT20.MergerPolicies.Intermediate_late_takeover_prohibited,
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
FMT20.MergerPolicies.Laissez_faire,
],
)
development_costs = create_float_input(0.1, "$K$")
startup_assets = create_float_input(0.05, "$A$")
success_probability = create_float_input(0.7, "$p$")
private_benefit = create_float_input(0.05, "$B$")
consumer_surplus_without_innovation = create_float_input(0.2, "$CS^m$")
incumbent_profit_without_innovation = create_float_input(0.4, "$\\pi^m_I$")
consumer_surplus_duopoly = create_float_input(0.5, "$CS^d$")
incumbent_profit_duopoly = create_float_input(0.2, "$\\pi^d_I$")
startup_profit_duopoly = create_float_input(0.2, "$\\pi^d_S$")
consumer_surplus_with_innovation = create_float_input(0.3, "$CS^M$")
incumbent_profit_with_innovation = create_float_input(0.5, "$\\pi^M_I$")
development_outcome = widgets.Checkbox(
value=True, description="Attempted development successful?"
)
configurations = widgets.Dropdown(
description="Configuration",
options=FMT20.NotebookUtilities.get_configurations(),
)
button = widgets.Button(description="draw", layout=Layout(width="300px", height="auto"))
input_widgets = [
development_outcome,
development_costs,
startup_assets,
success_probability,
private_benefit,
consumer_surplus_without_innovation,
incumbent_profit_without_innovation,
consumer_surplus_duopoly,
incumbent_profit_duopoly,
startup_profit_duopoly,
consumer_surplus_with_innovation,
incumbent_profit_with_innovation,
model,
merger_policy,
configurations,
button,
]
grid = widgets.GridBox(
input_widgets, layout=widgets.Layout(grid_template_columns="repeat(3, 310px)")
)
def set_configuration(e):
config_list = configurations.value.split(" - ")
config = FMT20.LoadParameters(config_id=int(config_list[0]))
model.value = config_list[1]
development_costs.value = config.params.get("development_costs")
startup_assets.value = config.params.get("startup_assets")
success_probability.value = config.params.get("success_probability")
private_benefit.value = config.params.get("private_benefit")
consumer_surplus_without_innovation.value = config.params.get(
"consumer_surplus_without_innovation"
)
incumbent_profit_without_innovation.value = config.params.get(
"incumbent_profit_without_innovation"
)
consumer_surplus_duopoly.value = config.params.get("consumer_surplus_duopoly")
incumbent_profit_duopoly.value = config.params.get("incumbent_profit_duopoly")
startup_profit_duopoly.value = config.params.get("startup_profit_duopoly")
consumer_surplus_with_innovation.value = config.params.get(
"consumer_surplus_with_innovation"
)
incumbent_profit_with_innovation.value = config.params.get(
"incumbent_profit_with_innovation"
)
def draw_figure(e):
model_kwargs = {
"merger_policy": merger_policy.value,
"development_costs": development_costs.value,
"startup_assets": startup_assets.value,
"development_success": development_outcome.value,
"success_probability": success_probability.value,
"private_benefit": private_benefit.value,
"consumer_surplus_without_innovation": consumer_surplus_without_innovation.value,
"incumbent_profit_without_innovation": incumbent_profit_without_innovation.value,
"consumer_surplus_duopoly": consumer_surplus_duopoly.value,
"incumbent_profit_duopoly": incumbent_profit_duopoly.value,
"startup_profit_duopoly": startup_profit_duopoly.value,
"consumer_surplus_with_innovation": consumer_surplus_with_innovation.value,
"incumbent_profit_with_innovation": incumbent_profit_with_innovation.value,
}
try:
if model.value == FMT20.NotebookUtilities.get_model_label(
FMT20.OptimalMergerPolicy
):
model_type = FMT20.OptimalMergerPolicy
elif model.value == FMT20.NotebookUtilities.get_model_label(
FMT20.ProCompetitive
):
model_type = FMT20.ProCompetitive
elif model.value == FMT20.NotebookUtilities.get_model_label(
FMT20.ResourceWaste
):
model_type = FMT20.ResourceWaste
elif model.value == FMT20.NotebookUtilities.get_model_label(
FMT20.PerfectInformation
):
model_type = FMT20.PerfectInformation
else:
raise NotImplementedError("No such model type available")
clear_output()
display(grid)
fig, ax = FMT20.Overview(
model_type(**model_kwargs), figsize=(9.5, 8), default_style=False
).plot(**plot_kwargs)
fig.set_label("Interactive use of Fumagalli et al. (2020)")
except AssertionError as e:
print(e)
configurations.observe(set_configuration)
button.on_click(draw_figure)
grid
```
| PypiClean |
/LinkPython-0.1.1.tar.gz/LinkPython-0.1.1/modules/pybind11/docs/benchmark.py | import datetime as dt
import os
import random
nfns = 4 # Functions per class
nargs = 4 # Arguments per function
def generate_dummy_code_pybind11(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % (fn, cl, fn)
decl += "};\n\n"
bindings += " ;\n"
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + "\n"
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
return result
def generate_dummy_code_boost(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += (
' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n'
% (fn, cl, fn)
)
decl += "};\n\n"
bindings += " ;\n"
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + "\n"
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
return result
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print("{")
for i in range(0, 10):
nclasses = 2 ** i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
os.system(
"g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so"
)
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat("test.so").st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print("}") | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/backbones/resnet.py | import warnings
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import ResLayer
class BasicBlock(BaseModule):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(BaseModule):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(init_cfg)
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(out)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(BaseModule):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
stem_channels (int | None): Number of stem channels. If not specified,
it will be the same as `base_channels`. Default: None.
base_channels (int): Number of base channels of res layer. Default: 64.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=None,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if stem_channels is None:
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
init_cfg=block_init_cfg)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""Make plugins for ResNet ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block`` into the backbone
like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
r"""ResNetV1d variant described in `Bag of Tricks
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs) | PypiClean |
/CtrlF_TF-1.0b1.tar.gz/CtrlF_TF-1.0b1/src/ctrlf_tf/ctrlf_core.py | import copy
from dataclasses import dataclass, fields
from io import StringIO
import sys
from typing import Iterable, Tuple
import numpy as np
import pandas as pd
import ctrlf_tf.pwm_utils
import ctrlf_tf.str_utils
import ctrlf_tf.parse_utils
import ctrlf_tf.compile_utils
import ctrlf_tf.site_call_utils
__version__ = "1.0b1"
__author__ = "Zachery Mielko"
@dataclass
class AlignParameters:
"""Dataclass for input parameters to align k-mers to a model.
:param kmer_file: Pathway to a text file of kmers. Must have columns
representing: k-mer, reverse_complement k-mer, score where the score
will be used to determine the rank of the k-mer.
:type kmer_file: str
:param pwm_file: Pathway to a text file for a position weight matrix to use
as a model for alignment. Format is specified by the *pwm_file_format*
argument, which is "Tabular" by default. The pwm file is assumed to
define probabilities.
:type pwm_file: str
:param pwm_file_format: Indicates if the position weight matrix specified
in the pwm_file param is in a tabular or meme format by specifying
either *"Tabular"* or *"MEME"*.
:type pwm_file_format: str
:param core_start: 1-based, inclusive position for the start of the core.
The core defines the positions that must be described by kmers to call
a site.
:type core_start: int
:param core_end: 1-based inclusive position for the end of the core. The
core defines the positions that must be described by kmers to call a
site.
:type core_end: int
:param core_gaps: 1-based positions relative to core start that define
sections within a core that do not need to be described. These
positions will be equiprobable during alignment.
:type core_gaps: Iterable
:param range_consensus: A kmer input that defines core_start, core_end, and
core_gaps by aligning the kmer to the PWM based on the maximum score
and selecting the described positions as a core. Using a '.' in the
kmer input will make that position a core_gap.
:type range_consensus: str
:param gap_limit: Limit on the number of gaps to allow kmers to have. Must
be 0 or greater.
:type gap_limit: int
:param threshold: A score to filter aligned kmers from. The column used to
filter is determined by *threshold_column*.
:type threshold: float
:param threshold_column: Column name in the kmer file to filter for kmers
above a threshold determined by the *threshold* parameter.
:type threshold_column: str
:param palindrome: If *true*, both orientations of kmers are aligned to the
core pwm model and orientations in called sites will me '.'. If *false*
then the maximum scoring kmer of the two orientations will be used.
Sites will be called with a '+' or '-' orientation.
:type palindrome: bool
:param version: Version of ctrlf_tf used in alignment
:type version: str
:param pwm_reverse_complement: If *true*, the PWM orientation is the
reverse complement of the input PWM.
:type pwm_reverse_complement: bool
"""
pwm_file: str
pwm_file_format: str = "Tabular"
core_start: int = 0
core_end: int = 0
core_gaps: Iterable[int] = None
range_consensus: str = None
gap_limit: int = 0
threshold: int = None
threshold_column: str = None
palindrome: bool = False
version: str = __version__
kmer_file: str = None
pwm_reverse_complement: bool = False
def __post_init__(self):
"""Parse and validate input parameters."""
# Validate PWM model parameters
ctrlf_tf.parse_utils.validate_align_parameters(self.core_start,
self.core_end,
self.range_consensus,
self.threshold,
self.threshold_column)
# If a range consensus is specified, update the core and gap parameters
if self.range_consensus:
pwm = ctrlf_tf.pwm_utils.read_pwm_file(self.pwm_file,
self.pwm_file_format)
full_pwm_dict = ctrlf_tf.pwm_utils.pwm_ndarray_to_dict(pwm)
full_pwm_dict_rc = ctrlf_tf.pwm_utils.pwm_ndarray_to_dict(pwm[::-1, ::-1])
parsed_params = ctrlf_tf.pwm_utils.model_params_from_consensus(self.range_consensus,
full_pwm_dict,
full_pwm_dict_rc)
self.core_start, self.core_end, self.core_gaps, self.pwm_reverse_complement = parsed_params
# If no range consensus or core specification, update core as whole PWM
elif self.core_start == 0 and self.core_end == 0:
pwm = ctrlf_tf.pwm_utils.read_pwm_file(self.pwm_file,
self.pwm_file_format)
self.core_start = 1
self.core_end = pwm.shape[1]
self.core_gaps = []
def save_parameters(self, file_path: str, mode='w'):
"""Saves the parameters to a file."""
with open(file_path, mode) as file_obj:
for i in fields(self):
label = i.name
if label != "range_consensus":
value = getattr(self, label)
file_obj.write(f"#{label}: {value}\n")
@classmethod
def _from_parameter_dict(cls, param_dict):
return cls(pwm_file=param_dict["pwm_file"],
pwm_file_format=param_dict["pwm_file_format"],
core_start=param_dict["core_start"],
core_end=param_dict["core_end"],
core_gaps=param_dict["core_gaps"],
gap_limit=param_dict["gap_limit"],
threshold=param_dict["threshold"],
threshold_column=param_dict["threshold_column"],
palindrome=param_dict["palindrome"],
version=param_dict["version"],
kmer_file=param_dict["kmer_file"],
pwm_reverse_complement=param_dict["pwm_reverse_complement"])
@classmethod
def from_parameter_file(cls, file_path: str):
param_dict = ctrlf_tf.parse_utils.parameter_dict_from_file(file_path)
return cls._from_parameter_dict(param_dict)
@classmethod
def from_str_iterable(cls, iterable: Iterable[str]):
param_dict = ctrlf_tf.parse_utils.parameter_dict_from_strs(iterable)
return cls._from_parameter_dict(param_dict)
class AlignedKmers:
"""Aligns kmers to a PWM model."""
# Initialization and constructors
def __init__(self,
core_positions: Tuple[int] = None,
aligned_kmer_dataframe: pd.DataFrame = None,
k: int = None,
palindrome: bool = None,
pwm: np.ndarray = None,
version: str = None,
kmer_dataframe: pd.DataFrame = None):
"""Class initialization."""
self.core_positions = core_positions
self.aligned_kmer_dataframe = aligned_kmer_dataframe
self.k = k
self.palindrome = palindrome
self.pwm = pwm
self.version = version
self.kmer_dataframe = kmer_dataframe
@classmethod
def from_parameters(cls, parameters: AlignParameters):
"""Construcor using an AlignedParameters object.
A factory constructor that aligned kmers to a PWM model using
parameters defined in a *AlignParameters* class.
"""
# Read kmer data
if parameters.kmer_file:
kmer_df = ctrlf_tf.str_utils.read_kmer_data(parameters.kmer_file,
parameters.threshold,
parameters.threshold_column,
parameters.gap_limit)
else:
raise ValueError("Parameters do not contain a kmer or selex source.")
# Adj threshold column
rank_score_label = parameters.threshold_column
if rank_score_label is None:
rank_score_label = kmer_df.columns[2]
# Read PWM information
k = ctrlf_tf.str_utils.k_from_kmers(kmer_df.iloc[:, 0])
pad_length = ctrlf_tf.str_utils.max_length_from_kmers(kmer_df.iloc[:, 0])
pwm = ctrlf_tf.pwm_utils.read_pwm_file(parameters.pwm_file,
parameters.pwm_file_format)
if parameters.pwm_reverse_complement:
pwm = pwm[::-1, ::-1]
# Specify core range
core_range = (parameters.core_start, parameters.core_end)
pwm = ctrlf_tf.pwm_utils.trim_pwm_by_core(pwm, core_range, parameters.core_gaps)
# Find core absolute start position and relative positions
core_positions = ctrlf_tf.pwm_utils.core_positions_from_pwm(pwm,
parameters.core_gaps)
core_absolute_start = core_positions[0] + pad_length
# Pad PWM with equiprobable flanks
pwm_padded = ctrlf_tf.pwm_utils.pad_pwm_equiprobable(pwm, pad_length)
pwm_dict = ctrlf_tf.pwm_utils.pwm_ndarray_to_dict(pwm_padded)
# Generate aligned kmers and hamming distance graph
aligned_kmer_df = ctrlf_tf.pwm_utils.align_kmers_from_df(kmer_df,
pwm_dict,
parameters.palindrome,
core_absolute_start,
rank_score_label)
return cls(core_positions,
aligned_kmer_df,
k,
parameters.palindrome,
pwm,
parameters.version,
kmer_df)
@classmethod
def from_alignment_file(cls, file_path: str):
"""Construct class using a previous AlignedKmers output file.
Parses the information saved from using the method
*.save_alignment* to make a new class instance.
:param file_path: Path to saved alignment file
:type file_path: str
:returns: Class instance with data from the alignment file
"""
# Read alignment file
with open(file_path) as file_obj:
aligned_kmer_data = file_obj.read()
# Parse parameters in header
with StringIO(aligned_kmer_data) as data_obj:
version = data_obj.readline().split(': ')[1].strip()
palindrome = ctrlf_tf.parse_utils.parse_boolean(data_obj.readline().strip())
core_positions = ctrlf_tf.parse_utils.parse_core_positions(data_obj.readline().strip())
pwm = np.loadtxt(data_obj, delimiter='\t', skiprows=1, max_rows=4)
# Parse k-mer dataframe
aligned_kmer_dataframe = pd.read_csv(data_obj, sep='\t', skiprows=1)
# Parse k and max length
k = ctrlf_tf.str_utils.k_from_kmers(aligned_kmer_dataframe.iloc[:, 0])
# Return AlignedKmers object
return cls(core_positions,
aligned_kmer_dataframe,
k,
palindrome,
pwm,
version)
def copy(self):
"""Create a deep copy of the AlignedKmers object."""
return copy.deepcopy(self)
# Public instance methods
def save_alignment(self, location: str = None):
"""Save alignment data to stdout or a file.
:param location: Output location, if None output will be stdout
:type location: str
"""
# Determine if output is stdout or another location
if location is None:
output_file_object = sys.stdout
else:
output_file_object = open(location, "w")
# Write version, alignment flag, and core positions
output_file_object.write((f"#CtrlF Version: {__version__}\n"
f"#Palindrome Alignment: {self.palindrome}\n"
"#Core Aligned Positions:"))
for i in self.core_positions:
output_file_object.write(f" {i} ")
output_file_object.write('\n')
# Write position weight matrix used in alignment
output_file_object.write('#Alignment Model\n')
np.savetxt(output_file_object, self.pwm, delimiter='\t')
# Write the aligned kmers
output_file_object.write("#Aligned Kmers\n")
# Set Align_Score to scientific notation for output
output_df = self.aligned_kmer_dataframe.copy(deep=True)
output_df["Align_Score"] = output_df["Align_Score"].apply(lambda x: "{:e}".format(x))
output_df.to_csv(output_file_object, sep='\t', index=False)
# Close file object if needed
if location != sys.stdout:
output_file_object.close()
class CompiledKmers(AlignedKmers):
"""Compiles aligned k-mers into consensus sites.
This class is a child class of AlignedKmers that compiles the
k-mers into consensus sites are part of the initialization
process. The class also defines how compiled sites are saved
and loaded to populate a new CompiledKmers object.
"""
def __init__(self,
core_positions: Tuple[int] = None,
aligned_kmer_dataframe: pd.DataFrame = None,
k: int = None,
palindrome: bool = None,
pwm: np.ndarray = None,
version: str = None,
kmer_dataframe: pd.DataFrame = None,
compiled_site_dataframe: pd.DataFrame = None,
abs_core_start: int = None,
abs_core_end: int = None):
super().__init__(core_positions=core_positions,
aligned_kmer_dataframe=aligned_kmer_dataframe,
k=k,
palindrome=palindrome,
pwm=pwm,
version=version,
kmer_dataframe=kmer_dataframe)
self.compiled_site_dataframe = compiled_site_dataframe
self.abs_core_start = abs_core_start
self.abs_core_end = abs_core_end
if self.compiled_site_dataframe is None:
# Generate compiled sequences
self.compiled_site_dataframe = ctrlf_tf.compile_utils.compile_consensus_sites(self.aligned_kmer_dataframe, self.core_positions)
# Trim edges to minimal bounds
left_idx, right_idx = ctrlf_tf.compile_utils.bounds_from_aligned_sequences(self.compiled_site_dataframe[ctrlf_tf.compile_utils.COMPILED_LABEL])
self.compiled_site_dataframe[ctrlf_tf.compile_utils.COMPILED_LABEL] = self.compiled_site_dataframe[ctrlf_tf.compile_utils.COMPILED_LABEL].apply(lambda x: x[left_idx:right_idx])
if self.abs_core_start is None and self.abs_core_end is None:
self.abs_core_start = abs(min(self.aligned_kmer_dataframe["Align_Position"])) + 1 - left_idx
self.abs_core_end = self.abs_core_start + max(self.core_positions)
if self.abs_core_start is None or self.abs_core_end is None:
raise ValueError("CompiledKmers object initialize with 1 of [abs_core_start, abs_core_end]. Must specify neither or both.")
self.core_span = self.abs_core_end - self.abs_core_start
def save_compiled_sites(self, output=None, minimal=True):
"""Saves compiled sites as a table to a file or stdout.
:param output: Output location (default = stdout)
:type output: str
:param minimal: If *true*, removes column showing which kmer indexes
were used to generate the solution.
:type minimlal: bool
"""
if output is None:
output_file_object = sys.stdout
else:
output_file_object = open(output, "w")
search_orientation = "+/-"
if self.palindrome:
search_orientation = '.'
output_file_object.write((f"#CtrlF Version: {__version__}\n"
f"#Search Orientation: {search_orientation}\n"
f"#Core Range: {self.abs_core_start} {self.abs_core_end}\n"))
if minimal:
self.compiled_site_dataframe[[ctrlf_tf.compile_utils.COMPILED_LABEL, "Rank_Score"]].to_csv(output_file_object,
sep='\t',
index=False)
else:
self.compiled_site_dataframe.to_csv(output_file_object,
sep='\t',
index=False)
if output is not None:
output_file_object.close()
@classmethod
def from_compiled_sites(cls, file_path):
"""Construct class from *save_compiled_sites()* output.
:param file_path: File location of compiled sites.
:type file_path: str
"""
with open(file_path) as file_obj:
version = file_obj.readline().strip().split(": ")[1]
palindrome = ctrlf_tf.parse_utils.parse_orientation_bool(file_obj.readline().rstrip())
abs_core_start, abs_core_end = ctrlf_tf.parse_utils.parse_integer_parameters(file_obj.readline())
compiled_site_df = pd.read_csv(file_obj, sep='\t')
return cls(version=version,
palindrome=palindrome,
abs_core_start=abs_core_start,
abs_core_end=abs_core_end,
compiled_site_dataframe=compiled_site_df)
class CtrlF(CompiledKmers):
"""Class used to align, compile, and call sites."""
def __init__(self,
core_positions: Tuple[int] = None,
aligned_kmer_dataframe: pd.DataFrame = None,
k: int = None,
palindrome: bool = None,
pwm: np.ndarray = None,
version: str = None,
kmer_dataframe: pd.DataFrame = None,
compiled_site_dataframe: pd.DataFrame = None,
abs_core_start: int = None,
abs_core_end: int = None):
super().__init__(core_positions=core_positions,
aligned_kmer_dataframe=aligned_kmer_dataframe,
k=k,
palindrome=palindrome,
pwm=pwm,
version=version,
kmer_dataframe=kmer_dataframe,
compiled_site_dataframe=compiled_site_dataframe,
abs_core_start=abs_core_start,
abs_core_end=abs_core_end)
# Setup internal copy with end positions for rigid and flexible searches.
self._site_len = ctrlf_tf.str_utils.total_length_aligned_strs(self.compiled_site_dataframe[ctrlf_tf.compile_utils.COMPILED_LABEL])
self.site_span = self._site_len - 1
self._internal_cs_df = self.compiled_site_dataframe.copy(deep=True)
self._internal_cs_df["Site_End_Pos"] = self._internal_cs_df[ctrlf_tf.compile_utils.COMPILED_LABEL].apply(lambda x: ctrlf_tf.str_utils.relative_end_positions(x))
self._internal_cs_df["Core_End_Pos"] = self._internal_cs_df[ctrlf_tf.compile_utils.COMPILED_LABEL].apply(lambda x: ctrlf_tf.str_utils.relative_end_positions(x, start_position=self.abs_core_start - 1))
self._internal_cs_df["Search_Sites"] = self._internal_cs_df[ctrlf_tf.compile_utils.COMPILED_LABEL].apply(lambda x: x.strip('.'))
self.automata = ctrlf_tf.site_call_utils.automata_from_sites(self._internal_cs_df["Search_Sites"])
self.fixed_length_search_dict = ctrlf_tf.site_call_utils.compiled_dict_from_compiled_sequences(self._internal_cs_df["Search_Sites"],
self._internal_cs_df["Site_End_Pos"],
self._internal_cs_df["Rank_Score"])
self.variable_length_search_dict = ctrlf_tf.site_call_utils.compiled_dict_from_compiled_sequences(self._internal_cs_df["Search_Sites"],
self._internal_cs_df["Core_End_Pos"],
self._internal_cs_df["Rank_Score"])
def call_sites(self, sequence: str, fixed_length=True):
"""Returns a list of SiteTuples from an input sequence.
Given a sequence, returns a list of SiteTuples for each called site.
:param sequence: Input DNA sequence
:type sequence: str
:param fixed_length: Search mode assumes a fixed model length
:type fixed_length: bool
:returns: List of SiteTuples
"""
# Set sequence to uppercase so match is case insensisitivw
sequence = sequence.upper()
# Use appropriate span and compiled site dictionary for the mode given
compiled_site_dict = self.fixed_length_search_dict
site_span = self.site_span
if not fixed_length:
compiled_site_dict = self.variable_length_search_dict
site_span = self.core_span
# Call sites from the input sequence orientation, if palindrome return the results
orient1 = ctrlf_tf.site_call_utils.site_dict_from_sequence(sequence,
self.automata,
compiled_site_dict)
if self.palindrome:
return ctrlf_tf.site_call_utils.site_dict_to_sitetuples(orient1, sequence, '.', site_span)
# Otherwise call sites on the reverse complement and return results from both orientations
orient2 = ctrlf_tf.site_call_utils.site_dict_from_sequence(ctrlf_tf.str_utils.reverse_complement(sequence),
self.automata,
compiled_site_dict)
pos_sites = ctrlf_tf.site_call_utils.site_dict_to_sitetuples(orient1, sequence, '+', site_span)
neg_sites = ctrlf_tf.site_call_utils.site_dict_to_sitetuples(orient2, sequence, '-', site_span)
return pos_sites + neg_sites
def call_sites_as_bed(self,
sequence: str,
chromosome: str,
chromosome_start: int,
fixed_length=True):
"""Call sites in BED format.
Given a seuqence, chromosome, and chromosome_start information, returns
called sites as a list of BedTuples
:param sequence: Input DNA sequence
:type sequence: str
:param chromosome: Chromosome label
:type chromosome: str
:param chromosome_start: Start position of the input sequence
:type chromosome_start: int
:returns: List of BedTuples
"""
sites = self.call_sites(sequence, fixed_length)
chromosome_end = chromosome_start + len(sequence)
bedtuple_result = ctrlf_tf.site_call_utils.site_tuples_to_bed(sites,
chromosome,
chromosome_start,
chromosome_end)
return bedtuple_result
def call_sites_from_fasta(self,
fasta_file: str,
genomic_label: bool,
to_file: str = None):
"""Given a fasta file input, calls sites in BED format.
With a fasta file as input, calls sites from every sequence. Returns
output in BED format. By default, the chromosome is the full header and
the start and end are relatve to sequence length. With the genomic
label parameter set to True, the header is parsed if given in the
format: Chromosome:start-end. By default the method returns a pandas
DataFrame, but with the to_file parameter set to a file location, the
function will write output to the location. This differs in that called
site data is not kept in memory and is written in groups of sites
called per sequence.
:param fasta_file: A fasta file input of sequences
:type fasta_file: str
:param genomic_label: Flag for if the headers are genomic coordinates
:type genomic_label: bool
:param to_file: File location output. If specified, outputs sites
for each sequence one set at a time.
:type to_file: str
:returns: By default a Pandas DataFrame of called sites in bed format but if to_file is specified, writes the results to a file in bed format
"""
fasta_file_object = open(fasta_file)
# If to_file, open output location, else initialize empty list
if to_file is None:
results = []
output_file_object = None
elif isinstance(to_file, str):
output_file_object = open(to_file, 'w')
elif to_file == sys.stdout:
output_file_object = sys.stdout
# For each pair of rows in the fasta file
reading = True
while reading:
header, sequence = ctrlf_tf.parse_utils.read_fasta_entry(fasta_file_object)
if header:
# Parse the chromosome, start position, and sequence
chromosome, start = ctrlf_tf.parse_utils.parse_fasta_header(header.rstrip(), genomic_label)
sequence = sequence.rstrip().upper()
# Call sites and convert them to bed format
bed_list = self.call_sites_as_bed(sequence, chromosome, start)
# If outputing to a file or stdout, write output
if output_file_object:
for site in bed_list:
formated_site = "\t".join([str(i) for i in site])
output_file_object.write(formated_site + '\n')
# Otherwise add to result list
else:
results += bed_list
else:
reading = False
fasta_file_object.close()
# If output is a file, close it
if isinstance(to_file, str):
output_file_object.close()
elif to_file is None:
return pd.DataFrame(results) | PypiClean |
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/apex/apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py | import torch
import PMTD.maskrcnn_benchmark.apex.apex.contrib.multihead_attn.fast_encdec_multihead_attn
class FastEncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs_q, inputs_kv, input_weights_q, input_weights_kv, output_weights, pad_mask, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
input_lin_q_results, \
input_lin_kv_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_encdec_multihead_attn.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs_q, \
inputs_kv, \
input_weights_q, \
input_weights_kv, \
output_weights, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_q_results, \
input_lin_kv_results, \
inputs_q, \
inputs_kv, \
input_weights_q, \
input_weights_kv, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_q_results, \
input_lin_kv_results, \
inputs_q, \
inputs_kv, \
input_weights_q, \
input_weights_kv, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
input_q_grads, \
input_kv_grads, \
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads = \
fast_encdec_multihead_attn.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_q_results, \
input_lin_kv_results, \
inputs_q, \
inputs_kv, \
input_weights_q, \
input_weights_kv, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, input_q_grads, input_kv_grads, input_weight_q_grads, input_weight_kv_grads, output_weight_grads, None, None
fast_encdec_attn_func = FastEncdecAttnFunc.apply | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/drawing/ui/Button.js | if(!dojo._hasResource["dojox.drawing.ui.Button"]){
dojo._hasResource["dojox.drawing.ui.Button"]=true;
dojo.provide("dojox.drawing.ui.Button");
dojox.drawing.ui.Button=dojox.drawing.util.oo.declare(function(_1){
_1.subShape=true;
dojo.mixin(this,_1);
this.width=_1.data.width;
this.height=_1.data.height;
this.id=this.id||this.util.uid(this.type);
this.util.attr(this.container,"id",this.id);
if(this.callback){
this.hitched=dojo.hitch(this.scope||window,this.callback,this);
}
this.shape=new dojox.drawing.stencil.Rect(_1);
var _2=function(s,p,v){
dojo.forEach(["norm","over","down","selected"],function(nm){
s[nm].fill[p]=v;
});
};
_2(this.style.button,"y2",this.data.height+this.data.y);
_2(this.style.button,"y1",this.data.y);
if(_1.icon&&!_1.icon.text){
var _3=this.drawing.getConstructor(_1.icon.type);
var o=this.makeOptions(_1.icon);
o.data=dojo.mixin(o.data,this.style.button.icon.norm);
if(o.data&&o.data.borderWidth===0){
o.data.fill=this.style.button.icon.norm.fill=o.data.color;
}else{
if(_1.icon.type=="line"||(_1.icon.type=="path"&&!_1.icon.closePath)){
this.style.button.icon.selected.color=this.style.button.icon.selected.fill;
}else{
}
}
this.icon=new _3(o);
}else{
if(_1.text||_1.icon.text){
var o=this.makeOptions(_1.text||_1.icon.text);
o.data.color=this.style.button.icon.norm.color;
this.style.button.icon.selected.color=this.style.button.icon.selected.fill;
this.icon=new dojox.drawing.stencil.Text(o);
this.icon.attr({height:this.icon._lineHeight,y:((this.data.height-this.icon._lineHeight)/2)+this.data.y});
}
}
var c=this.drawing.getConstructor(this.toolType);
if(c){
this.drawing.addUI("tooltip",{data:{text:c.setup.tooltip},button:this});
}
this.onOut();
},{callback:null,scope:null,hitched:null,toolType:"",onClick:function(_4){
},makeOptions:function(d,s){
s=s||1;
d=dojo.clone(d);
var o={util:this.util,mouse:this.mouse,container:this.container,subShape:true};
if(typeof (d)=="string"){
o.data={x:this.data.x-5,y:this.data.y+2,width:this.data.width,height:this.data.height,text:d,makeFit:true};
}else{
if(d.points){
dojo.forEach(d.points,function(pt){
pt.x=pt.x*this.data.width*0.01*s+this.data.x;
pt.y=pt.y*this.data.height*0.01*s+this.data.y;
},this);
o.data={};
for(var n in d){
if(n!="points"){
o.data[n]=d[n];
}
}
o.points=d.points;
}else{
for(var n in d){
if(/x|width/.test(n)){
d[n]=d[n]*this.data.width*0.01*s;
}else{
if(/y|height/.test(n)){
d[n]=d[n]*this.data.height*0.01*s;
}
}
if(/x/.test(n)&&!/r/.test(n)){
d[n]+=this.data.x;
}else{
if(/y/.test(n)&&!/r/.test(n)){
d[n]+=this.data.y;
}
}
}
delete d.type;
o.data=d;
}
}
o.drawingType="ui";
return o;
if(d.borderWidth!==undefined){
o.data.borderWidth=d.borderWidth;
}
return o;
},enabled:true,selected:false,type:"drawing.library.UI.Button",select:function(){
this.selected=true;
this.icon.attr(this.style.button.icon.selected);
this._change(this.style.button.selected);
this.shape.shadow&&this.shape.shadow.hide();
},deselect:function(){
this.selected=false;
this.icon.attr(this.style.button.icon.norm);
this.shape.shadow&&this.shape.shadow.show();
this._change(this.style.button.norm);
},_change:function(_5){
this.shape.attr(_5);
this.shape.shadow&&this.shape.shadow.container.moveToBack();
this.icon.shape.moveToFront();
},onOver:function(){
if(this.selected){
return;
}
this._change(this.style.button.over);
},onOut:function(){
if(this.selected){
return;
}
this._change(this.style.button.norm);
},onDown:function(){
if(this.selected){
return;
}
this._change(this.style.button.selected);
},onUp:function(){
this._change(this.style.button.over);
if(this.hitched){
this.hitched();
}
this.onClick(this);
}});
dojox.drawing.register({name:"dojox.drawing.ui.Button"},"stencil");
} | PypiClean |
/Deeplodocus-0.3.0-py3-none-any.whl/deeplodocus/data/load/data_entry.py | from typing import List
from typing import Any
from typing import Tuple
from typing import Union
import weakref
# Deeplodocus imports
from deeplodocus.utils.notification import Notification
from deeplodocus.data.load.source import Source
from deeplodocus.data.load.source_wrapper import SourceWrapper
from deeplodocus.data.load.source_pointer import SourcePointer
from deeplodocus.utils.generic_utils import get_module
from deeplodocus.data.load.loader import Loader
# Import flags
from deeplodocus.flags import *
class Entry(object):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Entry class
An Entry instance represents one entry for the model.
Each entry is composed of one or multiple sources of data combined into one unique list whatever the origin (file, folder, database ...)
The Entry class manages everything for an entry:
- Analyses the type of entry
- Analyses the loading method
- Join a parent directory if a relative path is given in data
- Store data in memory if requested by the user
"""
def __init__(self,
index: int,
name: str,
dataset: weakref,
load_as: str,
enable_cache: bool = False,
cv_library: Union[str, None, Flag] = DEEP_LIB_OPENCV):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Initialize an entry for the Dataset
PARAMETERS:
-----------
:param dataset(weakref): Weak reference to the dataset
:param
RETURN:
-------
:return: None
"""
# ID of the entry
self.index = index
# Optional name of the Entry
self.name = name
# Data type
self.load_as = load_as
# Weak reference to the dataset
self.dataset = dataset
# Loader
self.loader = Loader(data_entry=weakref.ref(self),
load_as=load_as,
cv_library=cv_library)
# List of sources into the entry
self.sources = list()
# Enable cache memory for pointer
self.enable_cache = enable_cache
# Cache Memory for pointers
if self.enable_cache is True:
self.cache_memory = list()
else:
self.cache_memory = None
self.num_instances = None
def __getitem__(self, index: int):
"""
AUTHORS:
--------
:author: Alix Leroy
:param index:
:return:
"""
# If cache memory enabled, reset the cache
if self.enable_cache is True:
self.cache_memory = list()
# Compute the Source ID and the Instance ID in the Source
source_index, instance_index = self.__compute_source_indices(index=index)
# Get the corresponding source
s = self.sources[source_index]
# Get the items from the Source instance
items, is_loaded, is_transformed = s.__getitem__(instance_index)
if is_loaded is False:
items = self.loader.load_from_str(items)
# If cache memory enabled, store the items in cache
if self.enable_cache is True:
self.cache_memory = items
# The item is either the unique item returned or the desired item of the list
if isinstance(items, tuple):
items = items[s.get_instance_id()]
return items, is_transformed
def __len__(self) -> Union[int, None]:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get the number of raw instances in the Entry
PARAMETERS:
-----------
None
RETURN:
-------
:return self.num_instances (Union[int, None]): The number of raw instances within the Entry (None is unlimited Entry)
"""
return self.num_instances
def get_first_item(self) -> Any:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get the first item of the Entry instance
PARAMETERS:
-----------
None
RETURN:
-------
:return items (Any): The first item of the entry
"""
# If cache memory enabled, reset the cache
if self.enable_cache is True:
self.cache_memory = list()
# Get the corresponding source
s = self.sources[0]
# Get the items from the Source instance
items, is_loaded, is_transformed = s.__getitem__(0)
# If cache memory enabled, store the items in cache
if self.enable_cache is True:
self.cache_memory = items
# The item is either the unique item returned or the desired item of the list
if isinstance(items, tuple):
items = items[s.get_instance_id()]
return items
def clear_cache_memory(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Clear the cache memory
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
self.cache_memory = list()
def __compute_source_indices(self, index: int) -> Tuple[int, int]:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Compute the source index
PARAMETERS:
-----------
:param index(int): The index of the data to load
RETURN:
-------
:return (Tuple[int, int]): [The index of the source to load from, The index of the instance in the source]
"""
# Initialize a temporary index and a temporary variable for a previous index
temp_index = 0
prev_temp_index = 0
# For each source
for i, source in enumerate(self.sources):
# Add the length of the Source to the temp variable
temp_index += source.get_num_instances()
# If the index is smaller than the summed of th Sources, return the index of the Source and the relative index of the instance within this Source
if index < temp_index:
return i, index - prev_temp_index
# Else update the temp value
prev_temp_index = temp_index
Notification(DEEP_NOTIF_DEBUG, "Error in computing the source index... Please check the algorithm")
def get_item_from_cache(self, index: int)-> Any:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get an item stored in the cache of the Entry
Accessible only by SourcePointer instances
1) Check the Entry has enabled the cache memory
2) Check the index is within the range of the cache memory
3) Get the item from the cache
PARAMETERS:
-----------
:param index(int): Index of the cache we want to access
RETURN:
-------
:return item(Any): The cached item
"""
# 1) Check if the cache memory is enabled on this Entry
if self.enable_cache is False:
# Get info on Entry and Dataset for DEEP_FATAL Notification display
entry_info = self.get_info()
dataset_info = self.dataset().get_info()
# Display Fatal error if cache memory disabled
Notification(DEEP_NOTIF_FATAL,
message="The Entry %s in the Dataset %s does not have cache memory enabled. SourcePointer instances cannot access the data in cache memory." % (entry_info, dataset_info),
solutions="Set 'enable_cache' to True for Entry %s in Dataset %s" % (entry_info, dataset_info))
# 2) Check if the index is within the range of the cache memory
self.check_cache_size(index)
# 3) Get the item from the cache memory
return self.cache_memory[index]
def generate_sources(self, sources: List[dict]) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Generate the sources
Does not generate the SourcePointer instances
PARAMETERS:
-----------
:param sources(List[dict]): The configuration of the Source instances to generate
RETURN:
-------
:return: None
"""
list_sources = []
# Create sources
for i, source in enumerate(sources):
# Get the Source module and add it to the list
module, origin = get_module(name=source["name"], module=source["module"], browse=DEEP_MODULE_SOURCES)
# If the source is not a real source
if issubclass(module, Source) is False:
# Remove the id from the initial kwargs
index = source["kwargs"].pop('index', None)
# Create a source wrapper with the new ID
s = SourceWrapper(index=index,
name=source["name"],
module=source["module"],
kwargs=source["kwargs"])
else:
# If the subclass is a real Source
s = module(**source["kwargs"])
# Check the module inherits the generic Source class
self.check_type_sources(s, i)
# Add the module to the list of Source instances
list_sources.append(s)
# Set the list as the attribute
self.sources = list_sources
def calculate_cache_length(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Calculate the length of the cache memory
Useful to know if the given instance index from a SourcePointer fits within the size of the cache
PARAMETERS:
-----------
None
RETURN:
-------
:return cache_length (int): The length of the cache
"""
# TODO:
pass
def reorder_sources(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Reorder the list of Source instances
SourcePointer instances are generated after the normal Source instances and therefore are appended at the end of the list
However, a SourcePointer position might be before a normal Source
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
# List of current Source order
source_order = []
# Get all the source ID
for source in self.sources:
source_order.append(source.get_index())
# Reorder the sources
self.sources = [self.sources[i] for i in source_order]
def is_next_source_pointer(self, index: int) -> bool:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check whether the Source required for a specific index is a SourcePointer instance or not
PARAMETERS:
-----------
:param index (int): Index of a specific item in the Entry
RETURN:
-------
:return is_pointer (bool): Whether the Source instance require for a specific index is a SourcePointer instance
"""
is_pointer = False
# Compute the Source ID which will be called
source_index, _ = self.__compute_source_indices(index=index)
# Get the specific Source instance
source = self.sources[source_index]
# Check if it is a SourcePointer instance
if isinstance(source, SourcePointer):
is_pointer = True
return is_pointer
def compute_num_raw_instances(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Compute the number of raw instances within the Entry
If any Source instance is unlimited (None):
=> Considered the Entry as unlimited
Else :
=> Sum the length of all Source instances
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
# Initialize a list which will contain the length of all Source instances
source_lengths = [None]*len(self.sources)
# Get the length of each Source
for i in range(len(self.sources)):
l = self.sources[i].get_num_instances()
# If the length is None => The whole Entry is considered as unlimited
if l is None:
return None
# Else we store in memory the length of the Source instance
else:
source_lengths[i] = l
# The length of the Entry is the sum of all the Source instances length
self.num_instances = sum(source_lengths)
############
# CHECKERS #
############
def check(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check the Entry:
1) Check the Source instances
2) Verify the custom Source were correctly checked
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
# Check Source instances
for i in range(len(self.sources)):
self.sources[i].check()
# Very custom Source instances were correctly checked
for i in range(len(self.sources)):
self.sources[i].verify_custom_source()
def check_loader(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check the Loader
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
# Check the Loader
self.loader.check()
############
# CHECKERS #
############
def check_type_sources(self, s: Any, source_index: int) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check that all the items in self.sources are Source instances
PARAMETERS:
-----------
:param s(Any): A supposed Source instance to check
RETURN:
-------
:return: None
"""
# Check if the item inherits the generic Source class
if isinstance(s, Source) is False:
# Get Dataset and Entry info
entry_info = self.get_info()
dataset_info = self.dataset().get_info()
# Display Error message
Notification(DEEP_NOTIF_FATAL,
"The source item #%i in Entry %s of the Dataset %s does not inherit the generic Source class of Deeplodocus" %(source_index, entry_info, dataset_info),
solutions="Make sure the given Source follows the Deeplodocus Source format")
def check_existing_source(self, index: int):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check if a specific source exists using its index
PARAMETERS:
-----------
:param index(int): The index of the Source instance
RETURN:
-------
:return exists(bool): Whether the Source instance exists or not
"""
exists = False
# If the index is smaller than the length of the list of sources then it has to exist
if index < len(self.sources):
exists = True
return exists
def check_cache_size(self, index: int):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check if the item of a specific index is available in the cache
PARAMETERS:
-----------
:param index(int): Index of the item desired in the cache memory
RETURN:
-------
:return (bool): Whether or not the index is in the cache size range
"""
# Get the length of the cache memory
length_cache = len(self.cache_memory)
# If we have no info on the length of the cache we check the number of output arguments
if length_cache == 0:
# Get the real length of the cache memory
# length_cache = self.calculate_cache_length()
Notification(DEEP_NOTIF_FATAL, "Cache memory not filled before usage")
if index < length_cache:
return True
else:
return False
def check_output_sizes_sources(self, sources_output_sizes: List[int]) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check all the Source instances output the same number of arguments
PARAMETERS:
-----------
:param sources_output_sizes(List[int]): List of output sizes for all Source instances in the Entry
RETURN:
-------
:return: None
"""
first_output_size = sources_output_sizes[0]
for outputs_size in sources_output_sizes:
if first_output_size != outputs_size:
# Get info on Entry and Dataset for DEEP_FATAL Notification display
entry_info = self.get_info()
dataset_info = self.dataset().get_info()
# Display Fatal Error
Notification(DEEP_NOTIF_FATAL,
"All the Source instance in the Entry %s in Dataset %s do not have the same number of output items" % (entry_info, dataset_info),
solutions="Please check each Source instance of Entry %s in Dataset %s and make sure they output the same number of arguments in __getitem__() "% (entry_info, dataset_info))
###########
# GETTERS #
###########
def get_source(self, index: int):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Check if a specific source exists
Get a specific source in the entry
PARAMETERS:
-----------
:param index(int): index of the Source instance in the Entry instance
RETURN:
-------
:return source(Source): The requested source
"""
# Check if the source exists
self.check_existing_source(index)
# return the requested source
return self.sources[index]
def get_sources(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get the list of Source instances
PARAMETERS:
-----------
None
RETURN:
-------
:return (List[Source]): The list of Source instances in the Entry
"""
return self.sources
def get_ref(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get the weak reference to the entry
PARAMETER:
----------
:param: None
RETURN:
-------
:return self (weakref): The weak reference to the entry
"""
return weakref.ref(self)
def get_info(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Format and return info for the entry
PARAMETERS:
-----------
None
RETURN:
-------
:return entry_info(str): Information on the Entry
"""
# If there is no name for the Entry we use the Entry ID
if self.name is None:
entry_info = "#" + str(self.index)
else:
entry_info = self.name
return entry_info
def get_index(self) -> int:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get the index of the Entry in the Dataset
PARAMETERS:
-----------
None
RETURN:
-------
:return self.index(int): The Entry index
"""
return self.index
def get_type(self):
return self.type
def set_source_pointer_weakref(self, source_id: int, entry_weakref: weakref):
self.sources[source_id].set_entry_weakref(entry_weakref) | PypiClean |
/Card%20Validator-1.0.0.zip/Card Validator-1.0.0/cardvalidator/formatter.py | import re
def is_hipercard(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if re.match('^606282|3841\d{2}',''.join(n[:6])):
return True
return False
def is_dankort(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if re.match('^(5019)\d+$',n):
return True
return False
def is_bcglobal(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if re.match('^(6541|6556)[0-9]{12}$',n):
return True
return False
def is_koreancard(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if re.match('^9[0-9]{15}$',n):
return True
return False
def is_carteblanche(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if re.match('^389[0-9]{11}$',n):
return True
return False
def is_instapayment(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 16 and length <= 19:
if re.match('^(6360)\d+$',n) or re.match('^63[7-9][0-9]{13}$',n):
return True
return False
def is_laser(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['6706','6709','6771']
if length >= 16 and length <= 19:
if ''.join(n[:4]) in form or re.match('^(6304|6706|6709|6771)[0-9]{12,15}$',n):
return True
return False
def is_bcmc(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['6703']
if length >= 13 and length <= 19:
if ''.join(n[:4]) in form:
return True
return False
def is_solo(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['6334','6767']
if length >= 16 and length <= 19:
if ''.join(n[:4]) in form or re.match('^(6334|6767)[0-9]{12}|(6334|6767)[0-9]{14}|(6334|6767)[0-9]{15}$',n):
return True
return False
def is_switch(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['633110','633312','633304','633303','633301','633300']
if length >= 16 and length <= 19:
if ''.join(n[:6]) in form or re.match('^(4903|4905|4911|4936|6333|6759)[0-9]{12}|(4903|4905|4911|4936|6333|6759)[0-9]{14}|(4903|4905|4911|4936|6333|6759)[0-9]{15}|564182[0-9]{10}|564182[0-9]{12}|564182[0-9]{13}|633110[0-9]{10}|633110[0-9]{12}|633110[0-9]{13}$',n):
return True
return False
def is_jcb(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length == 16:
if ''.join(n[:4]) in strings_between(3528, 3589)or re.match('^(?:2131|1800|35\d{3})\d{11}$',n):
return True
return False
def is_unionpay(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 12 and length <= 19:
if re.match('^(62|88)\d+$',n) or re.match('^(62[0-9]{14,17})$',n):
return True
return False
def is_visa(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 16:
if n[0] == '4':
if(re.match('^4[0-9]{12}(?:[0-9]{3})?$',n)):
return True
return False
def is_dinersclub(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['30','36']
if length >= 13 and length <= 19:
if ''.join(n[:2]) in form or (re.match('^3(?:0[0-5]|[68][0-9])[0-9]{11}$',n)):
return True
return False
def is_cartebancaire(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['4035','4360']
if length >= 13 and length <= 19:
if ''.join(n[:4]) in form:
return True
return False
def is_vpay(n):
"""Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n))
form = ['4370','482']
if length >= 13 and length <= 19:
if ''.join(n[:4]) in form or ''.join(n[:3]) in form:
return True
return False
def is_visa_electron(n):
"""Checks if credit card number fits the visa electron format."""
n, length = str(n), len(str(n))
form = ['026', '508', '844', '913', '917','405']
if length == 16:
if n[0] == '4':
if ''.join(n[1:4]) in form or ''.join(n[1:6]) == '17500':
return True
return False
def is_mastercard(n):
"""Checks if credit card number fits the mastercard format."""
n, length = str(n), len(str(n))
if length >= 16 and length <= 19:
if ''.join(n[:2]) in strings_between(50, 56) or (re.match('^5[1-5][0-9]{14}$',n)):
return True
return False
def is_rupay(n):
"""Checks if credit card number fits the mastercard format."""
n, length = str(n), len(str(n))
if length >= 13 and length <= 19:
if ''.join(n[:6]) in strings_between(508500, 508999) or ''.join(n[:6]) in strings_between(606985, 607984) or ''.join(n[:6]) in strings_between(608001, 608500) or ''.join(n[:6]) in strings_between(652150, 653149):
return True
return False
def is_elo(n):
"""Checks if credit card number fits the mastercard format."""
n, length = str(n), len(str(n))
form = ['5066']
if length >= 16 and length <= 19:
if ''.join(n[:4]) in form:
return True
return False
def is_amex(n):
"""Checks if credit card number fits the american express format."""
n, length = str(n), len(str(n))
if length == 15:
if n[0] == '3' and (n[1] == '4' or n[1] == '7'):
if(re.match('^3[47][0-9]{13}$',n)):
return True
else:
return False
return False
def is_maestro(n):
"""Checks if credit card number fits the maestro format."""
n, length = str(n), len(str(n))
form = ['5018', '5020', '5038', '5893', '6304',
'6759', '6761', '6762', '6763','6731',
'06','6779','677','678','679']
if length >= 12 and length <= 19:
if ''.join(n[:4]) in form:
return True
elif ''.join(n[:3]) in form:
return True;
elif ''.join(n[:2]) in form:
return True;
return False
def is_discover(n):
"""Checks if credit card number fits the discover card format."""
n, length = str(n), len(str(n))
if length == 16:
if n[0] == '6':
if ''.join(n[1:4]) == '011' or n[1] == '5':
return True
elif n[1] == '4' and n[2] in strings_between(4, 10):
return True
elif ''.join(n[1:6]) in strings_between(22126, 22926):
return True
return False
def get_format(n):
"""Gets a list of the formats a credit card number fits."""
formats = []
if is_visa(n):
formats.append('visa')
if is_visa_electron(n):
formats.append('visa electron')
if is_mastercard(n):
formats.append('mastercard')
if is_amex(n):
formats.append('amex')
if is_maestro(n):
formats.append('maestro')
if is_discover(n):
formats.append('discover')
if is_rupay(n):
formats.append('rupay')
if is_hipercard(n):
formats.append('hipercard')
if is_dankort(n):
formats.append('dankort')
if is_instapayment(n):
formats.append('instapayment')
if is_laser(n):
formats.append('laser')
if is_bcmc(n):
formats.append('bcmc')
if is_jcb(n):
formats.append('jcb')
if is_unionpay(n):
formats.append('unionpay')
if is_solo(n):
formats.append('solo')
if is_dinersclub(n):
formats.append('dinersclub')
if is_cartebancaire(n):
formats.append('cartebancaire')
if is_elo(n):
formats.append('elo')
if is_vpay(n):
formats.append('vpay')
if is_switch(n):
formats.append('switch')
if is_carteblanche(n):
formats.append('carteblanche')
if is_bcglobal(n):
formats.append('bcglobal')
if is_koreancard(n):
formats.append('koreancard')
return formats
def strings_between(a, b):
"""Generates a list of strings between a and b."""
return list(map(str, range(a, b))) | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/framework/parallel.py | import atexit
import os
import random
import time
import traceback
import pickle
from mpire.pool import WorkerPool
from ditk import logging
import tempfile
import socket
from os import path
from typing import Callable, Dict, List, Optional, Tuple, Union, Set
from threading import Thread
from ding.framework.event_loop import EventLoop
from ding.utils.design_helper import SingletonMetaclass
from ding.framework.message_queue import *
from ding.utils.registry_factory import MQ_REGISTRY
# Avoid ipc address conflict, random should always use random seed
random = random.Random()
class Parallel(metaclass=SingletonMetaclass):
def __init__(self) -> None:
# Init will only be called once in a process
self._listener = None
self.is_active = False
self.node_id = None
self.local_id = None
self.labels = set()
self._event_loop = EventLoop("parallel_{}".format(id(self)))
self._retries = 0 # Retries in auto recovery
def _run(
self,
node_id: int,
local_id: int,
n_parallel_workers: int,
labels: Optional[Set[str]] = None,
auto_recover: bool = False,
max_retries: int = float("inf"),
mq_type: str = "nng",
startup_interval: int = 1,
**kwargs
) -> None:
self.node_id = node_id
self.local_id = local_id
self.startup_interval = startup_interval
self.n_parallel_workers = n_parallel_workers
self.labels = labels or set()
self.auto_recover = auto_recover
self.max_retries = max_retries
self._mq = MQ_REGISTRY.get(mq_type)(**kwargs)
time.sleep(self.local_id * self.startup_interval)
self._listener = Thread(target=self.listen, name="mq_listener", daemon=True)
self._listener.start()
self.mq_type = mq_type
self.barrier_runtime = Parallel.get_barrier_runtime()(self.node_id)
@classmethod
def runner(
cls,
n_parallel_workers: int,
mq_type: str = "nng",
attach_to: Optional[List[str]] = None,
protocol: str = "ipc",
address: Optional[str] = None,
ports: Optional[Union[List[int], int]] = None,
topology: str = "mesh",
labels: Optional[Set[str]] = None,
node_ids: Optional[Union[List[int], int]] = None,
auto_recover: bool = False,
max_retries: int = float("inf"),
redis_host: Optional[str] = None,
redis_port: Optional[int] = None,
startup_interval: int = 1
) -> Callable:
"""
Overview:
This method allows you to configure parallel parameters, and now you are still in the parent process.
Arguments:
- n_parallel_workers (:obj:`int`): Workers to spawn.
- mq_type (:obj:`str`): Embedded message queue type, i.e. nng, redis.
- attach_to (:obj:`Optional[List[str]]`): The node's addresses you want to attach to.
- protocol (:obj:`str`): Network protocol.
- address (:obj:`Optional[str]`): Bind address, ip or file path.
- ports (:obj:`Optional[List[int]]`): Candidate ports.
- topology (:obj:`str`): Network topology, includes:
`mesh` (default): fully connected between each other;
`star`: only connect to the first node;
`alone`: do not connect to any node, except the node attached to;
- labels (:obj:`Optional[Set[str]]`): Labels.
- node_ids (:obj:`Optional[List[int]]`): Candidate node ids.
- auto_recover (:obj:`bool`): Auto recover from uncaught exceptions from main.
- max_retries (:obj:`int`): Max retries for auto recover.
- redis_host (:obj:`str`): Redis server host.
- redis_port (:obj:`int`): Redis server port.
- startup_interval (:obj:`int`): Start up interval between each task.
Returns:
- _runner (:obj:`Callable`): The wrapper function for main.
"""
all_args = locals()
del all_args["cls"]
args_parsers = {"nng": cls._nng_args_parser, "redis": cls._redis_args_parser}
assert n_parallel_workers > 0, "Parallel worker number should bigger than 0"
def _runner(main_process: Callable, *args, **kwargs) -> None:
"""
Overview:
Prepare to run in subprocess.
Arguments:
- main_process (:obj:`Callable`): The main function, your program start from here.
"""
runner_params = args_parsers[mq_type](**all_args)
params_group = []
for i, runner_kwargs in enumerate(runner_params):
runner_kwargs["local_id"] = i
params_group.append([runner_kwargs, (main_process, args, kwargs)])
if n_parallel_workers == 1:
cls._subprocess_runner(*params_group[0])
else:
with WorkerPool(n_jobs=n_parallel_workers, start_method="spawn", daemon=False) as pool:
# Cleanup the pool just in case the program crashes.
atexit.register(pool.__exit__)
pool.map(cls._subprocess_runner, params_group)
return _runner
@classmethod
def _nng_args_parser(
cls,
n_parallel_workers: int,
attach_to: Optional[List[str]] = None,
protocol: str = "ipc",
address: Optional[str] = None,
ports: Optional[Union[List[int], int]] = None,
topology: str = "mesh",
node_ids: Optional[Union[List[int], int]] = None,
**kwargs
) -> Dict[str, dict]:
attach_to = attach_to or []
nodes = cls.get_node_addrs(n_parallel_workers, protocol=protocol, address=address, ports=ports)
def cleanup_nodes():
for node in nodes:
protocol, file_path = node.split("://")
if protocol == "ipc" and path.exists(file_path):
os.remove(file_path)
atexit.register(cleanup_nodes)
def topology_network(i: int) -> List[str]:
if topology == "mesh":
return nodes[:i] + attach_to
elif topology == "star":
return nodes[:min(1, i)] + attach_to
elif topology == "alone":
return attach_to
else:
raise ValueError("Unknown topology: {}".format(topology))
runner_params = []
candidate_node_ids = cls.padding_param(node_ids, n_parallel_workers, 0)
for i in range(n_parallel_workers):
runner_kwargs = {
**kwargs,
"node_id": candidate_node_ids[i],
"listen_to": nodes[i],
"attach_to": topology_network(i),
"n_parallel_workers": n_parallel_workers,
}
runner_params.append(runner_kwargs)
return runner_params
@classmethod
def _redis_args_parser(cls, n_parallel_workers: int, node_ids: Optional[Union[List[int], int]] = None, **kwargs):
runner_params = []
candidate_node_ids = cls.padding_param(node_ids, n_parallel_workers, 0)
for i in range(n_parallel_workers):
runner_kwargs = {**kwargs, "n_parallel_workers": n_parallel_workers, "node_id": candidate_node_ids[i]}
runner_params.append(runner_kwargs)
return runner_params
@classmethod
def _subprocess_runner(cls, runner_kwargs: dict, main_params: Tuple[Union[List, Dict]]) -> None:
"""
Overview:
Really run in subprocess.
Arguments:
- runner_params (:obj:`Tuple[Union[List, Dict]]`): Args and kwargs for runner.
- main_params (:obj:`Tuple[Union[List, Dict]]`): Args and kwargs for main function.
"""
logging.getLogger().setLevel(logging.INFO)
main_process, args, kwargs = main_params
with Parallel() as router:
router.is_active = True
router._run(**runner_kwargs)
time.sleep(0.3) # Waiting for network pairing
router._supervised_runner(main_process, *args, **kwargs)
def _supervised_runner(self, main: Callable, *args, **kwargs) -> None:
"""
Overview:
Run in supervised mode.
Arguments:
- main (:obj:`Callable`): Main function.
"""
if self.auto_recover:
while True:
try:
main(*args, **kwargs)
break
except Exception as e:
if self._retries < self.max_retries:
logging.warning(
"Auto recover from exception: {}, node: {}, retries: {}".format(
e, self.node_id, self._retries
)
)
logging.warning(traceback.format_exc())
self._retries += 1
else:
logging.warning(
"Exceed the max retries, node: {}, retries: {}, max_retries: {}".format(
self.node_id, self._retries, self.max_retries
)
)
raise e
else:
main(*args, **kwargs)
@classmethod
def get_node_addrs(
cls,
n_workers: int,
protocol: str = "ipc",
address: Optional[str] = None,
ports: Optional[Union[List[int], int]] = None
) -> None:
if protocol == "ipc":
node_name = "".join(random.choices("abcdefghijklmnopqrstuvwxyz0123456789", k=4))
tmp_dir = tempfile.gettempdir()
nodes = ["ipc://{}/ditask_{}_{}.ipc".format(tmp_dir, node_name, i) for i in range(n_workers)]
elif protocol == "tcp":
address = address or cls.get_ip()
ports = cls.padding_param(ports, n_workers, 50515)
assert len(ports) == n_workers, "The number of ports must be the same as the number of workers, \
now there are {} ports and {} workers".format(len(ports), n_workers)
nodes = ["tcp://{}:{}".format(address, port) for port in ports]
else:
raise Exception("Unknown protocol {}".format(protocol))
return nodes
@classmethod
def padding_param(cls, int_or_list: Optional[Union[List[int], int]], n_max: int, start_value: int) -> List[int]:
"""
Overview:
Padding int or list param to the length of n_max.
Arguments:
- int_or_list (:obj:`Optional[Union[List[int], int]]`): Int or list typed value.
- n_max (:obj:`int`): Max length.
- start_value (:obj:`int`): Start from value.
"""
param = int_or_list
if isinstance(param, List) and len(param) == 1:
param = param[0] # List with only 1 element is equal to int
if isinstance(param, int):
param = range(param, param + n_max)
else:
param = param or range(start_value, start_value + n_max)
return param
def listen(self):
self._mq.listen()
while True:
if not self._mq:
break
msg = self._mq.recv()
# msg is none means that the message queue is no longer being listened to,
# especially if the message queue is already closed
if not msg:
break
topic, msg = msg
self._handle_message(topic, msg)
def on(self, event: str, fn: Callable) -> None:
"""
Overview:
Register an remote event on parallel instance, this function will be executed \
when a remote process emit this event via network.
Arguments:
- event (:obj:`str`): Event name.
- fn (:obj:`Callable`): Function body.
"""
if self.is_active:
self._mq.subscribe(event)
self._event_loop.on(event, fn)
def once(self, event: str, fn: Callable) -> None:
"""
Overview:
Register an remote event which will only call once on parallel instance,
this function will be executed when a remote process emit this event via network.
Arguments:
- event (:obj:`str`): Event name.
- fn (:obj:`Callable`): Function body.
"""
if self.is_active:
self._mq.subscribe(event)
self._event_loop.once(event, fn)
def off(self, event: str) -> None:
"""
Overview:
Unregister an event.
Arguments:
- event (:obj:`str`): Event name.
"""
if self.is_active:
self._mq.unsubscribe(event)
self._event_loop.off(event)
def emit(self, event: str, *args, **kwargs) -> None:
"""
Overview:
Send an remote event via network to subscribed processes.
Arguments:
- event (:obj:`str`): Event name.
"""
if self.is_active:
payload = {"a": args, "k": kwargs}
try:
data = pickle.dumps(payload, protocol=pickle.HIGHEST_PROTOCOL)
except AttributeError as e:
logging.error("Arguments are not pickable! Event: {}, Args: {}".format(event, args))
raise e
self._mq.publish(event, data)
def _handle_message(self, topic: str, msg: bytes) -> None:
"""
Overview:
Recv and parse payload from other processes, and call local functions.
Arguments:
- topic (:obj:`str`): Recevied topic.
- msg (:obj:`bytes`): Recevied message.
"""
event = topic
if not self._event_loop.listened(event):
logging.debug("Event {} was not listened in parallel {}".format(event, self.node_id))
return
try:
payload = pickle.loads(msg)
except Exception as e:
logging.error("Error when unpacking message on node {}, msg: {}".format(self.node_id, e))
return
self._event_loop.emit(event, *payload["a"], **payload["k"])
@classmethod
def get_ip(cls):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
def get_attch_to_len(self) -> int:
"""
Overview:
Get the length of the 'attach_to' list of message queue.
Returns:
int: the length of the self._mq.attach_to. Returns 0 if self._mq is not initialized
"""
if self._mq:
if hasattr(self._mq, 'attach_to'):
return len(self._mq.attach_to)
return 0
def __enter__(self) -> "Parallel":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def stop(self):
logging.info("Stopping parallel worker on node: {}".format(self.node_id))
self.is_active = False
time.sleep(0.03)
if self._mq:
self._mq.stop()
self._mq = None
if self._listener:
self._listener.join(timeout=1)
self._listener = None
self._event_loop.stop()
@classmethod
def get_barrier_runtime(cls):
# We get the BarrierRuntime object in the closure to avoid circular import.
from ding.framework.middleware.barrier import BarrierRuntime
return BarrierRuntime | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/utils/parsers/movie.py | import re
from datetime import datetime
from loguru import logger
from flexget.utils import qualities
from flexget.utils.parsers.parser import TitleParser
from flexget.utils.tools import str_to_int
logger = logger.bind(name='movieparser')
def diff_pos(string1, string2):
"""Returns first position where string1 and string2 differ."""
for count, c in enumerate(string1):
if len(string2) <= count:
return count
if string2[count] != c:
return count
class MovieParser(TitleParser):
def __init__(self):
self.data = None
self.reset()
TitleParser.__init__(self)
def reset(self):
# parsing results
self.name = None
self.year = None
self.year_pos = None
self.quality = qualities.Quality()
self.proper_count = 0
def __str__(self):
return f"<MovieParser(name={self.name},year={self.year},quality={self.quality})>"
def parse(self, data=None):
"""Parse movie name. Populates name, year, quality and proper_count attributes"""
# Reset before parsing, so the parser can be reused.
self.reset()
if data is None:
data = self.data
# Move anything in leading brackets to the end
data = re.sub(r'^\[(.*?)\](.*)', r'\2 \1', data)
for char in '[]()_,.':
data = data.replace(char, ' ')
# if there are no spaces
if data.find(' ') == -1:
data = data.replace('-', ' ')
# remove unwanted words (imax, ..)
self.remove_words(data, self.remove)
data = self.strip_spaces(data)
# split to parts
parts = data.split(' ')
cut_part = 256
all_caps = True
for part_pos, part in enumerate(parts):
cut = False
# Don't let the first word be cutoff word
if part_pos < 1:
continue
# check for year
num = str_to_int(part)
if num is not None:
if 1930 < num <= datetime.now().year:
if self.year_pos == cut_part:
# Looks like a year, but we already set the cutpoint to a year, let's move it forward
cut_part = part_pos
self.year = num
self.year_pos = part_pos
cut = True
# Don't consider all caps words cut words if the whole title has been all caps
if not part.isupper():
all_caps = False
# if length > 3 and whole word in uppers, consider as cut word (most likely a group name)
if len(part) > 3 and part.isupper() and part.isalpha() and not all_caps:
cut = True
# check for cutoff words
if part.lower() in self.cutoffs:
cut = True
# check for propers
if part.lower() in self.propers:
# 'real' and 'final' are too common in movie parsers, only cut if it comes after year
if part.lower() not in ['real', 'final'] or self.year:
self.proper_count += 1
cut = True
# update cut position
if cut and parts.index(part) < cut_part:
cut_part = part_pos
if cut_part != 256:
logger.debug('parts: {}, cut is: {}', parts, parts[cut_part])
# calculate cut positon from cut_part
abs_cut = len(' '.join(parts[:cut_part]))
logger.debug(
'after parts check, cut data would be: `{}` abs_cut: {}', data[:abs_cut], abs_cut
)
# parse quality
quality = qualities.Quality(data)
if quality:
self.quality = quality
# remaining string is same as data but quality information removed
# find out position where there is first difference, this is earliest
# quality bit, anything after that has no relevance to the movie name
dp = diff_pos(data, quality.clean_text)
if dp is not None:
logger.debug('quality start: {}', dp)
if dp < abs_cut:
logger.debug('quality cut is even shorter')
abs_cut = dp
# make cut
data = data[:abs_cut].strip()
logger.debug('data cut to `{}` - this will be the name', data)
# save results
self.name = data | PypiClean |
/MDP-3.6.tar.gz/MDP-3.6/mdp/nodes/gsfa_nodes.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import mdp
from mdp import numx, NodeException, TrainingException
from mdp.utils import (mult, symeig, pinv, SymeigException)
from mdp.nodes import GeneralExpansionNode
# For Python 2 & 3 compatibility
try:
basestring
except NameError:
basestring = str
class GSFANode(mdp.Node):
""" This node implements "Graph-Based SFA (GSFA)", which is the main
component of hierarchical GSFA (HGSFA).
For further information, see: Escalante-B A.-N., Wiskott L, "How to solve
classification and regression problems on high-dimensional data with a
supervised extension of Slow Feature Analysis". Journal of Machine
Learning Research 14:3683-3719, 2013
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None,
block_size=None, train_mode=None, verbose=False):
"""Initializes the GSFA node, which is a subclass of the SFA node.
The parameters block_size and train_mode are not necessary and it is
recommended to skip them here and provide them as parameters to the
train method.
See the _train method for details.
"""
super(GSFANode, self).__init__(input_dim, output_dim, dtype)
self.pinv = None
self.block_size = block_size
self.train_mode = train_mode
self.verbose = verbose
self._symeig = symeig
self._covdcovmtx = CovDCovMatrix()
# SFA eigenvalues and eigenvectors, will be set after training
self.d = None
self.sf = None # second index for outputs
self.avg = None
self._bias = None # avg multiplied with sf
def _train(self, x, block_size=None, train_mode=None, node_weights=None,
edge_weights=None, verbose=None):
"""This is the main training function of GSFA.
x: training data (each sample is a row)
The semantics of the remaining parameters depends on the training mode
(train_mode) parameter in order to train as in standard SFA:
set train_mode="regular" (the scale of the features should be
corrected afterwards)
in order to train using the clustered graph:
set train_mode="clustered". The cluster size is given by block_size
(integer). Variable cluster sizes are possible if block_size is a
list of integers. Samples belonging to the same class should be
adjacent.
in order to train for classification:
set train_mode=("classification", labels, weight), where labels is
an array with the class information and weight is a scalar value
(e.g., 1.0).
in order to train for regression:
set train_mode=("serial_regression#", labels, weight), where # is
an integer that specifies the block size used by a serial graph,
labels is an array with the label information and weight is a
scalar value.
in order to train using a graph without edges:
set train_mode="unlabeled".
in order to train using the serial graph:
set train_mode="serial", and use block_size (integer) to specify
the group size.
in order to train using the mixed graph:
set train_mode="mixed", and use block_size (integer) to specify
the group size.
in order to train using an arbitrary user-provided graph:
set train_mode="graph", specify the node_weights (numpy 1D array),
and the edge_weights (numpy 2D array).
"""
if train_mode is None:
train_mode = self.train_mode
if verbose is None:
verbose = self.verbose
if block_size is None:
if verbose:
print("parameter block_size was not provided ",
"using default value self.block_size")
block_size = self.block_size
self.set_input_dim(x.shape[1])
if verbose:
print("train_mode=", train_mode)
if isinstance(train_mode, list):
train_modes = train_mode
else:
train_modes = [train_mode]
for train_mode in train_modes:
if train_mode is None:
train_mode = "regular"
if isinstance(train_mode, tuple):
method = train_mode[0]
labels = train_mode[1]
weight = train_mode[2]
if method == "classification":
if verbose:
print("update classification")
ordering = numx.argsort(labels)
x2 = x[ordering, :]
unique_labels = numx.unique(labels)
unique_labels.sort()
block_sizes = []
for label in unique_labels:
block_sizes.append((labels == label).sum())
self._covdcovmtx.update_clustered(x2,
block_sizes=block_sizes,
weight=weight)
elif method.startswith("serial_regression"):
block_size = int(method[len("serial_regression"):])
if verbose:
print("update serial_regression, block_size=",
block_size)
ordering = numx.argsort(labels)
x2 = x[ordering, :]
self._covdcovmtx.update_serial(x2, block_size=block_size,
weight=weight)
else:
er = "method unknown: %s" % (str(method))
raise ValueError(er)
elif isinstance(train_mode, basestring):
if train_mode == 'unlabeled':
if verbose:
print("update_unlabeled")
self._covdcovmtx.update_unlabeled(x, weight=1.0)
elif train_mode == "regular":
if verbose:
print("update_regular")
self._covdcovmtx.update_regular(x, weight=1.0)
elif train_mode == 'clustered':
if verbose:
print("update_clustered")
self._covdcovmtx.update_clustered(x,
block_sizes=block_size,
weight=1.0)
elif train_mode.startswith('compact_classes'):
if verbose:
print("update_compact_classes:", train_mode)
J = int(train_mode[len('compact_classes'):])
self._covdcovmtx.update_compact_classes(
x, block_sizes=block_size, Jdes=J)
elif train_mode == 'serial':
if verbose:
print("update_serial")
self._covdcovmtx.update_serial(x, block_size=block_size)
elif train_mode.startswith('DualSerial'):
if verbose:
print("updateDualSerial")
num_blocks = len(x) // block_size
dual_num_blocks = int(train_mode[len("DualSerial"):])
dual_block_size = len(x) // dual_num_blocks
chunk_size = block_size // dual_num_blocks
if verbose:
print("dual_num_blocks = ", dual_num_blocks)
self._covdcovmtx.update_serial(x, block_size=block_size)
x2 = numx.zeros_like(x)
for i in range(num_blocks):
for j in range(dual_num_blocks):
x2[j * dual_block_size + i * chunk_size:
j * dual_block_size + (i + 1) * chunk_size] = \
x[i * block_size + j * chunk_size:
i * block_size + (j + 1) * chunk_size]
self._covdcovmtx.update_serial(x2,
block_size=dual_block_size,
weight=0.0)
elif train_mode == 'mixed':
if verbose:
print("update mixed")
bs = block_size
self._covdcovmtx.update_clustered_homogeneous_block_sizes(
x[0:bs], weight=2.0, block_size=block_size)
self._covdcovmtx.update_clustered_homogeneous_block_sizes(
x[bs:-bs], weight=1.0, block_size=block_size)
self._covdcovmtx.update_clustered_homogeneous_block_sizes(
x[-bs:], weight=2.0, block_size=block_size)
self._covdcovmtx.update_serial(x, block_size=block_size)
elif train_mode[0:6] == 'window':
window_halfwidth = int(train_mode[6:])
if verbose:
print("Window (%d)" % window_halfwidth)
self._covdcovmtx.update_sliding_window(
x, weight=1.0, window_halfwidth=window_halfwidth)
elif train_mode[0:7] == 'fwindow':
window_halfwidth = int(train_mode[7:])
if verbose:
print("Fast Window (%d)" % window_halfwidth)
self._covdcovmtx.update_fast_sliding_window(
x, weight=1.0, window_halfwidth=window_halfwidth)
elif train_mode[0:13] == 'mirror_window':
window_halfwidth = int(train_mode[13:])
if verbose:
print("Mirroring Window (%d)" % window_halfwidth)
self._covdcovmtx.update_mirroring_sliding_window(
x, weight=1.0, window_halfwidth=window_halfwidth)
elif train_mode[0:14] == 'smirror_window':
window_halfwidth = int(train_mode[14:])
if verbose:
print("Slow Mirroring Window (%d)" % window_halfwidth)
self._covdcovmtx.update_slow_mirroring_sliding_window(
x, weight=1.0, window_halfwidth=window_halfwidth)
elif train_mode == 'graph':
if verbose:
print("update_graph")
self._covdcovmtx.update_graph(
x, node_weights=node_weights,
edge_weights=edge_weights, weight=1.0)
elif train_mode == 'graph_old':
if verbose:
print("update_graph_old")
self._covdcovmtx.update_graph_old(
x, node_weights=node_weights,
edge_weights=edge_weights, weight=1.0)
elif train_mode == 'smart_unlabeled2':
if verbose:
print("smart_unlabeled2")
N2 = x.shape[0]
N1 = Q1 = self._covdcovmtx.num_samples * 1.0
R1 = self._covdcovmtx.num_diffs * 1.0
sum_x_labeled_2D = self._covdcovmtx.sum_x.reshape((1, -1))
sum_prod_x_labeled = self._covdcovmtx.sum_prod_x
if verbose:
print("Original sum_x[0]/num_samples=",
self._covdcovmtx.sum_x[0] /
self._covdcovmtx.num_samples)
weight_fraction_unlabeled = 0.2
additional_weight_unlabeled = -0.025
w1 = Q1 * 1.0 / R1 * (1.0 - weight_fraction_unlabeled)
if verbose:
print("weight_fraction_unlabeled=",
weight_fraction_unlabeled)
print("N1=Q1=", Q1, "R1=", R1, "w1=", w1)
print("")
self._covdcovmtx.sum_prod_diffs *= w1
self._covdcovmtx.num_diffs *= w1
if verbose:
print("After diff scaling: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs, "\n")
node_weights2 = Q1 * weight_fraction_unlabeled / N2
w12 = node_weights2 / N1 # One directional weights
if verbose:
print("w12 (one dir)", w12)
sum_x_unlabeled_2D = x.sum(axis=0).reshape((1, -1))
sum_prod_x_unlabeled = mdp.utils.mult(x.T, x)
self._covdcovmtx.add_samples(sum_prod_x_unlabeled,
sum_x_unlabeled_2D.flatten(),
num_samples=N2,
weight=node_weights2)
if verbose:
print("After adding unlabeled nodes: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
print("sum_x[0]/num_samples=",
self._covdcovmtx.sum_x[0] /
self._covdcovmtx.num_samples)
print("")
print("N2=", N2, "node_weights2=", node_weights2)
additional_diffs = sum_prod_x_unlabeled * N1 - \
mdp.utils.mult(sum_x_labeled_2D.T,
sum_x_unlabeled_2D) - \
mdp.utils.mult(sum_x_unlabeled_2D.T,
sum_x_labeled_2D) + \
sum_prod_x_labeled * N2
if verbose:
print("w12=", w12, "additional_diffs=",
additional_diffs)
# accounts for both directions
self._covdcovmtx.add_diffs(2 * additional_diffs,
2 * N1 * N2,
weight=w12)
if verbose:
print("After mixed diff addition: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
print("sum_x[0]/num_samples=",
self._covdcovmtx.sum_x[0] / \
self._covdcovmtx.num_samples)
print("\n Adding complete graph for unlabeled data")
self._covdcovmtx.update_clustered_homogeneous_block_sizes(
x, weight=additional_weight_unlabeled, block_size=N2)
if verbose:
print("After complete x2 addition: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
print("sum_x[0]/num_samples=",
self._covdcovmtx.sum_x[0] /
self._covdcovmtx.num_samples)
elif train_mode == 'smart_unlabeled3':
if verbose:
print("smart_unlabeled3")
N2 = x.shape[0]
N1 = Q1 = self._covdcovmtx.num_samples * 1.0
R1 = self._covdcovmtx.num_diffs * 1.0
if verbose:
print("N1=Q1=", Q1, "R1=", R1, "N2=", N2)
v = 2.0 ** (-9.5) # weight of unlabeled samples
C = 10.0 # Clustered graph assumed, with C classes,
# and each class having N1/C samples
if verbose:
print("v=", v, "C=", C)
v_norm = v / C
N1_norm = N1 / C
sum_x_labeled = self._covdcovmtx.sum_x.reshape((1, -1))
sum_prod_x_labeled = self._covdcovmtx.sum_prod_x + 0.0
if verbose:
print("Original (Diag(C')/num_diffs.avg)**0.5 =",
((numx.diagonal(self._covdcovmtx.sum_prod_diffs)
/ self._covdcovmtx.num_diffs).mean()) ** 0.5)
weight_adjustment = (N1_norm - 1) / \
(N1_norm - 1 + v_norm * N2)
if verbose:
print("weight_adjustment =", weight_adjustment,
"w11=", 1 / (N1_norm - 1 + v_norm * N2))
self._covdcovmtx.sum_x *= weight_adjustment
self._covdcovmtx.sum_prod_x *= weight_adjustment
self._covdcovmtx.num_samples *= weight_adjustment
self._covdcovmtx.sum_prod_diffs *= weight_adjustment
self._covdcovmtx.num_diffs *= weight_adjustment
node_weights_complete_1 = weight_adjustment
if verbose:
print("num_diffs (w11) after weight_adjustment=",
self._covdcovmtx.num_diffs)
w11 = 1 / (N1_norm - 1 + v_norm * N2)
if verbose:
print("Adjusted (Diag(C')/num_diffs.avg)**0.5 =",
((numx.diagonal(self._covdcovmtx.sum_prod_diffs)
/ self._covdcovmtx.num_diffs).mean()) ** 0.5)
print("")
# Connections within unlabeled data (notice that C times
# this is equivalent to v*v/(N1+v*(N2-1)) once)
w22 = 0.5 * 2 * v_norm * v_norm / (N1_norm +
v_norm * (N2 - 1))
sum_x_unlabeled = x.sum(axis=0).reshape((1, -1))
sum_prod_x_unlabeled = mdp.utils.mult(x.T, x)
node_weights_complete_2 = w22 * (N2 - 1) * C
self._covdcovmtx.update_clustered_homogeneous_block_sizes(
x, weight=node_weights_complete_2, block_size=N2)
if verbose:
print("w22=", w22, "node_weights_complete_2*N2=",
node_weights_complete_2 * N2)
print("After adding complete 2: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
print(" (Diag(C')/num_diffs.avg)**0.5 =",
((numx.diagonal(self._covdcovmtx.sum_prod_diffs)
/ self._covdcovmtx.num_diffs).mean()) ** 0.5)
print("")
# Connections between labeled and unlabeled samples
# Accounts for transitions in both directions
w12 = (2 * 0.5 * v_norm * (1 / (N1_norm - 1 + v_norm * N2)
+ 1 / (N1_norm +
v_norm * (N2 - 1))))
if verbose:
print("(twice) w12=", w12)
sum_prod_diffs_mixed = \
w12 * (N1 * sum_prod_x_unlabeled -
(mdp.utils.mult(sum_x_labeled.T, sum_x_unlabeled) +
mdp.utils.mult(sum_x_unlabeled.T, sum_x_labeled)) +
N2 * sum_prod_x_labeled)
self._covdcovmtx.sum_prod_diffs += sum_prod_diffs_mixed
# w12 is already counted twice
self._covdcovmtx.num_diffs += C * N1_norm * N2 * w12
if verbose:
print(" (Diag(mixed)/num_diffs.avg)**0.5 =",
((numx.diagonal(sum_prod_diffs_mixed) /
(C * N1_norm * N2 * w12)).mean()) ** 0.5)
# Additional adjustment for node weights of unlabeled data
missing_weight_unlabeled = v - node_weights_complete_2
missing_weight_labeled = 1.0 - node_weights_complete_1
if verbose:
print("missing_weight_unlabeled=",
missing_weight_unlabeled)
print("Before two final add_samples: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
self._covdcovmtx.add_samples(sum_prod_x_unlabeled,
sum_x_unlabeled, N2,
missing_weight_unlabeled)
self._covdcovmtx.add_samples(sum_prod_x_labeled,
sum_x_labeled, N1,
missing_weight_labeled)
if verbose:
print("Final transformation: num_samples=",
self._covdcovmtx.num_samples)
print("num_diffs=", self._covdcovmtx.num_diffs)
print("v11=%f+%f, v22=%f+%f" %
(weight_adjustment,
missing_weight_labeled,
node_weights_complete_2,
missing_weight_unlabeled))
print("w11=%f, w22=%f, w12(two ways)=%f" % (w11, w22,
w12))
print("(N1/C-1)*w11=%f, N2*w12 (one way)=%f" %
((N1 / C - 1) * w11, N2 * w12 / 2))
print("(N2-1)*w22*C=%f, N1*w12 (one way)=%f" %
((N2 - 1) * w22 * C, N1 * w12 / 2))
print("(Diag(C')/num_diffs.avg)**0.5 =",
((numx.diagonal(self._covdcovmtx.sum_prod_diffs)
/ self._covdcovmtx.num_diffs).mean()) ** 0.5)
elif train_mode == 'ignore_data':
if verbose:
print("Training graph: ignoring data")
else:
ex = "Unknown training method" + train_mode
raise ValueError(ex)
else:
ex = "Unknown training method" + str(train_mode)
raise ValueError(ex)
def _stop_training(self, debug=False, verbose=None):
if verbose is None:
verbose = self.verbose
if verbose:
print("stop_training: self.block_size=", self.block_size)
print("self._covdcovmtx.num_samples = ",
self._covdcovmtx.num_samples)
print("self._covdcovmtx.num_diffs= ", self._covdcovmtx.num_diffs)
cov_mtx, self.avg, dcov_mtx = self._covdcovmtx.fix()
if verbose:
print("Finishing GSFA training: ", self._covdcovmtx.num_samples)
print(" num_samples, and ", self._covdcovmtx.num_diffs,
" num_diffs")
print("DCov[0:3,0:3] is", self.dcov_mtx[0:3, 0:3])
rng = self._set_range()
# Solve the generalized eigenvalue problem
# the eigenvalues are already ordered in ascending order
try:
if verbose:
print("***Range used=", rng)
self.d, self.sf = self._symeig(dcov_mtx, cov_mtx,
range=rng, overwrite=(not debug))
d = self.d
# check that we get only non-negative eigenvalues
if d.min() < 0:
raise SymeigException("Got negative eigenvalues: %s." % str(d))
except SymeigException as exception:
ex = str(exception) + "\n Covariance matrices may be singular."
raise NodeException(ex)
if not debug:
del self._covdcovmtx
self._covdcovmtx = None
self._bias = mult(self.avg, self.sf)
if verbose:
print("shape of GSFANode.sf is=", self.sf.shape)
def _execute(self, x, n=None):
"""Compute the output of the slowest functions.
If 'n' is an integer, then use the first 'n' slowest components."""
if n:
sf = self.sf[:, :n]
bias = self._bias[:n]
else:
sf = self.sf
bias = self._bias
return mult(x, sf) - bias
def _inverse(self, y):
""" This function uses a pseudoinverse of the matrix sf to approximate
an inverse to the transformation.
"""
if self.pinv is None:
self.pinv = pinv(self.sf)
return mult(y, self.pinv) + self.avg
def _set_range(self):
if self.output_dim is not None and (self.output_dim <= self.input_dim
or self.input_dim is None):
# (eigenvalues sorted in ascending order)
rng = (1, self.output_dim)
else:
# otherwise, keep all output components
rng = None
self.output_dim = self.input_dim
return rng
###############################################################################
# HELPER FUNCTIONS #
###############################################################################
def graph_delta_values(y, edge_weights):
""" Computes delta values from an arbitrary graph as in the objective
function of GSFA. The feature vectors are not normalized to weighted
unit variance or weighted zero mean.
"""
R = 0
deltas = 0
for (i, j) in edge_weights.keys():
w_ij = edge_weights[(i, j)]
deltas += w_ij * (y[j] - y[i]) ** 2
R += w_ij
return deltas / R
def comp_delta(x):
""" Computes delta values as in the objective function of SFA.
The feature vectors are not normalized to unit variance or zero mean.
"""
xderiv = x[1:, :] - x[:-1, :]
return (xderiv ** 2).mean(axis=0)
def Hamming_weight(integer_list):
""" Computes the Hamming weight of an integer or a list of integers (number
of bits equal to one).
"""
if isinstance(integer_list, list):
weights = [Hamming_weight(k) for k in integer_list]
return weights
elif isinstance(integer_list, int):
w = 0
n = integer_list
while n > 0:
if n % 2:
w += 1
n = n // 2
return w
else:
er = "unsupported input type for Hamming_weight:" + str(integer_list)
raise ValueError(er)
class CovDCovMatrix(object):
"""Special purpose class to compute the covariance/second moment matrices
used by GSFA. It supports efficiently training methods for various graphs:
e.g., clustered, serial, mixed.
Joint computation of these matrices is typically more efficient than
their separate computation.
"""
def __init__(self, verbose=False):
"""Variable descriptions:
sum_x: a vector with the sum of all data samples
sum_prod_x: a matrix with sum of all samples multiplied by their
transposes
num_samples: the total weighted number of samples
sum_prod_diffs: a matrix with sum of all sample differences
multiplied by their transposes
num_diffs: the total weighted number of sample differences
verbose: a Boolean verbosity parameter
The following variables are available after fix() has been called.
cov_mtx: the resulting covariance matrix of the samples
avg: the average sample
dcov_mtx: the resulting second-moment matrix of the sample
differences
"""
self.sum_x = None
self.sum_prod_x = None
self.num_samples = 0
self.sum_prod_diffs = None
self.num_diffs = 0
self.verbose = verbose
# Variables used to store the final matrices
self.cov_mtx = None
self.avg = None
self.dcov_mtx = None
def add_samples(self, sum_prod_x, sum_x, num_samples, weight=1.0):
""" The given sample information (sum_prod_x, sum_x, num_samples) is
added to the cumulative
computation of the covariance matrix.
"""
weighted_sum_x = sum_x * weight
weighted_sum_prod_x = sum_prod_x * weight
weighted_num_samples = num_samples * weight
if self.sum_prod_x is None:
self.sum_prod_x = weighted_sum_prod_x
self.sum_x = weighted_sum_x
else:
self.sum_prod_x = self.sum_prod_x + weighted_sum_prod_x
self.sum_x = self.sum_x + weighted_sum_x
self.num_samples = self.num_samples + weighted_num_samples
def add_diffs(self, sum_prod_diffs, num_diffs, weight=1.0):
""" The given sample differences information (sum_prod_diffs,
num_diffs) is added to the cumulative computation of the second-moment
differences matrix.
"""
weighted_sum_prod_diffs = sum_prod_diffs * weight
weighted_num_diffs = num_diffs * weight
if self.sum_prod_diffs is None:
self.sum_prod_diffs = weighted_sum_prod_diffs
else:
self.sum_prod_diffs = self.sum_prod_diffs + weighted_sum_prod_diffs
self.num_diffs = self.num_diffs + weighted_num_diffs
def update_unlabeled(self, x, weight=1.0):
""" Add unlabeled samples to the covariance matrix
(DCov remains unmodified).
"""
num_samples = x.shape[0]
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
def update_regular(self, x, weight=1.0):
"""This is equivalent to regular SFA training (except for the final
feature scale).
"""
num_samples = x.shape[0]
# Update Cov Matrix
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# Update DCov Matrix
diffs = x[1:, :] - x[:-1, :]
num_diffs = num_samples - 1
sum_prod_diffs = mdp.utils.mult(diffs.T, diffs)
self.add_diffs(sum_prod_diffs, num_diffs, weight)
def update_graph(self, x, node_weights=None, edge_weights=None,
weight=1.0):
"""Updates the covariance/second moment matrices using an user-provided
graph specified by (x, node weights, edge weights, and a global
weight).
Usually sum(node_weights) = num_samples.
"""
num_samples, dim = x.shape
if node_weights is None:
node_weights = numx.ones(num_samples)
if len(node_weights) != num_samples:
er = "Node weights should have the same length " + \
"%d as the number of samples %d" % (len(node_weights),
num_samples)
raise TrainingException(er)
if edge_weights is None:
er = "edge_weights should be a dictionary with entries: " + \
"d[(i,j)] = w_{i,j} or an NxN array"
raise TrainingException(er)
if isinstance(edge_weights, numx.ndarray):
# TODO: eventually make sure edge_weights are symmetric
# TODO: eventually make sure consistency restriction is fulfilled
if edge_weights.shape != (num_samples, num_samples):
er = "Shape of edge_weights should be " + \
"(%d, %d)" % (num_samples, num_samples) + \
" but is (%d, %d)" % (edge_weights.shape[0],
edge_weights.shape[1])
raise TrainingException(er)
node_weights_column = node_weights.reshape((num_samples, 1))
# Update Cov Matrix
weighted_x = x * node_weights_column
weighted_sum_x = weighted_x.sum(axis=0)
weighted_sum_prod_x = mdp.utils.mult(x.T, weighted_x)
weighted_num_samples = node_weights.sum()
self.add_samples(weighted_sum_prod_x, weighted_sum_x,
weighted_num_samples, weight=weight)
# Update DCov Matrix
if isinstance(edge_weights, numx.ndarray):
weighted_num_diffs = edge_weights.sum() # normalization constant R
prod1 = weighted_sum_prod_x # TODO: eventually check these
# equations, they might only work if Q==R
prod2 = mdp.utils.mult(mdp.utils.mult(x.T, edge_weights), x)
weighted_sum_prod_diffs = 2 * prod1 - 2 * prod2
self.add_diffs(weighted_sum_prod_diffs, weighted_num_diffs,
weight=weight)
else:
num_diffs = len(edge_weights)
diffs = numx.zeros((num_diffs, dim))
weighted_diffs = numx.zeros((num_diffs, dim))
weighted_num_diffs = 0
for ii, (i, j) in enumerate(edge_weights.keys()):
diff = x[j, :] - x[i, :]
diffs[ii] = diff
w_ij = edge_weights[(i, j)]
weighted_diff = diff * w_ij
weighted_diffs[ii] = weighted_diff
weighted_num_diffs += w_ij
weighted_sum_prod_diffs = mdp.utils.mult(diffs.T, weighted_diffs)
self.add_diffs(weighted_sum_prod_diffs, weighted_num_diffs,
weight=weight)
def update_graph_old(self, x, node_weights=None, edge_weights=None,
weight=1.0):
"""This method performs the same task as update_graph. It is slower
than update_graph because it has not been optimized. Thus, it is
mainly useful to verify the correctness of update_graph.
"""
num_samples, dim = x.shape
if node_weights is None:
node_weights = numx.ones(num_samples)
if len(node_weights) != num_samples:
er = "Node weights should be the same length " + \
"%d as the number of samples %d" % (len(node_weights),
num_samples)
raise TrainingException(er)
if edge_weights is None:
er = "edge_weights should be a dictionary with entries: " + \
"d[(i,j)] = w_{i,j} or an NxN array"
raise TrainingException(er)
if isinstance(edge_weights, numx.ndarray):
if edge_weights.shape == (num_samples, num_samples):
e_w = {}
for i in range(num_samples):
for j in range(num_samples):
if edge_weights[i, j] != 0:
e_w[(i, j)] = edge_weights[i, j]
edge_weights = e_w
else:
er = "edge_weights.shape should be (%d,%d) but is (%d,%d)" % \
(num_samples, num_samples, edge_weights.shape[0],
edge_weights.shape[1])
raise TrainingException(er)
node_weights_column = node_weights.reshape((num_samples, 1))
# Update Cov Matrix
weighted_x = x * node_weights_column
weighted_sum_x = weighted_x.sum(axis=0)
weighted_sum_prod_x = mdp.utils.mult(x.T, weighted_x)
weighted_num_samples = node_weights.sum()
self.add_samples(weighted_sum_prod_x, weighted_sum_x,
weighted_num_samples, weight=weight)
# Update DCov Matrix
num_diffs = len(edge_weights)
diffs = numx.zeros((num_diffs, dim))
weighted_diffs = numx.zeros((num_diffs, dim))
weighted_num_diffs = 0
for ii, (i, j) in enumerate(edge_weights.keys()):
diff = x[j, :] - x[i, :]
diffs[ii] = diff
w_ij = edge_weights[(i, j)]
weighted_diff = diff * w_ij
weighted_diffs[ii] = weighted_diff
weighted_num_diffs += w_ij
weighted_sum_prod_diffs = mdp.utils.mult(diffs.T, weighted_diffs)
self.add_diffs(weighted_sum_prod_diffs, weighted_num_diffs,
weight=weight)
def update_mirroring_sliding_window(self, x, weight=1.0,
window_halfwidth=2):
""" Note: this method makes sense according to the consistency
restriction for "larger" windows.
"""
num_samples, dim = x.shape
# window_halfwidth is too long to write it complete each time
width = window_halfwidth
if 2 * width >= num_samples:
ex = "window_halfwidth %d not supported for %d samples" % \
(width, num_samples)
raise TrainingException(ex)
# Update Cov Matrix. All samples have same weight
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# Update DCov Matrix. First mirror the borders
x_mirror = numx.zeros((num_samples + 2 * width, dim))
x_mirror[width:-width] = x # center part
x_mirror[0:width, :] = x[0:width, :][::-1, :] # first end
x_mirror[-width:, :] = x[-width:, :][::-1, :] # second end
# Center part
x_full = x
sum_prod_x_full = mdp.utils.mult(x_full.T, x_full)
Aacc123 = numx.zeros((dim, dim))
for i in range(0, 2 * width):
Aacc123 += (i + 1) * mdp.utils.mult(x_mirror[i:i + 1, :].T,
x_mirror[i:i + 1, :])
for i in range(num_samples, num_samples + 2 * width):
Aacc123 += (num_samples + 2 * width - i) * mdp.utils.mult(
x_mirror[i:i + 1, :].T, x_mirror[i:i + 1, :])
# intermediate values of x, which are connected 2*width+1 times
x_middle = x_mirror[2 * width:-2 * width, :]
Aacc123 += (2 * width + 1) * mdp.utils.mult(x_middle.T, x_middle)
b = numx.zeros((num_samples + 1 + 2 * width, dim))
b[1:] = x_mirror.cumsum(axis=0)
B = b[2 * width + 1:] - b[0:-2 * width - 1]
Bprod = mdp.utils.mult(x_full.T, B)
sum_prod_diffs_full = (2 * width + 1) * sum_prod_x_full + Aacc123 - \
Bprod - Bprod.T
num_diffs = num_samples * (2 * width) # removed zero differences
self.add_diffs(sum_prod_diffs_full, num_diffs, weight)
def update_slow_mirroring_sliding_window(self, x, weight=1.0,
window_halfwidth=2):
""" This is an unoptimized version of update_mirroring_sliding_window.
"""
num_samples, dim = x.shape
width = window_halfwidth
if 2 * width >= num_samples:
ex = "window_halfwidth %d " % width + \
"not supported for %d samples" % num_samples
raise TrainingException(ex)
# Update Cov Matrix. All samples have same weight
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# Update DCov Matrix. window = numx.ones(2*width+1), rectangular window
x_mirror = numx.zeros((num_samples + 2 * width, dim))
x_mirror[width:-width] = x # center part
x_mirror[0:width, :] = x[0:width, :][::-1, :] # start of the sequence
x_mirror[-width:, :] = x[-width:, :][::-1, :] # end of the sequence
for offset in range(-width, width + 1):
if offset == 0:
pass
else:
diffs = x_mirror[offset + width:offset + width +
num_samples, :] - x
sum_prod_diffs = mdp.utils.mult(diffs.T, diffs)
num_diffs = len(diffs)
self.add_diffs(sum_prod_diffs, num_diffs, weight)
def update_slow_truncating_sliding_window(self, x, weight=1.0,
window_halfwidth=2):
""" Truncating Window (original slow/reference version). """
num_samples, dim = x.shape
width = window_halfwidth
if 2 * width >= num_samples:
ex = "window_halfwidth %d " % width + \
"not supported for %d samples" % num_samples
raise ValueError(ex)
# Update Cov Matrix. All samples have same weight
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# Update DCov Matrix. window = numx.ones(2*width+1), rectangular window
x_extended = numx.zeros((num_samples + 2 * width, dim))
# Center part is preserved, extreme samples are zero
x_extended[width:-width] = x
# Negative offset is not considered because it is equivalent to the
# positive one, thereore the factor 2
for offset in range(1, width + 1):
diffs = x_extended[offset + width:width + num_samples, :] - \
x[0:-offset, :]
sum_prod_diffs = 2 * mdp.utils.mult(diffs.T, diffs)
num_diffs = 2 * (num_samples - offset)
self.add_diffs(sum_prod_diffs, num_diffs, weight)
def update_fast_sliding_window(self, x, weight=1.0, window_halfwidth=2):
""" Sliding window with node-weight correction. """
num_samples, dim = x.shape
width = window_halfwidth
if 2 * width >= num_samples:
ex = "window_halfwidth %d not supported for %d samples" % (width,
num_samples)
raise ValueError(ex)
# MOST CORRECT VERSION
x_sel = x + 0.0
w_up = numx.arange(width, 2 * width) / (2.0 * width)
w_up = w_up.reshape((width, 1))
w_down = numx.arange(2 * width - 1, width - 1, -1) / (2.0 * width)
w_down = w_down.reshape((width, 1))
x_sel[0:width, :] = x_sel[0:width, :] * w_up
x_sel[-width:, :] = x_sel[-width:, :] * w_down
sum_x = x_sel.sum(axis=0)
sum_prod_x = mdp.utils.mult(x_sel.T, x)
self.add_samples(sum_prod_x, sum_x,
num_samples - (0.5 * window_halfwidth - 0.5), weight)
# Update DCov Matrix. First we compute the borders
# Left border
for i in range(0, width): # [0, width -1]
diffs = x[0:width + i + 1, :] - x[i, :]
sum_prod_diffs = mdp.utils.mult(diffs.T, diffs)
num_diffs = len(diffs) - 1 # removed zero differences
# print "N1=", num_diffs
# print "sum_prod_diffs[0]=", sum_prod_diffs[0]
self.add_diffs(sum_prod_diffs, num_diffs, weight)
# Right border
for i in range(num_samples - width, num_samples):
diffs = x[i - width:num_samples, :] - x[i, :]
sum_prod_diffs = mdp.utils.mult(diffs.T, diffs)
num_diffs = len(diffs) - 1 # removed zero differences
# print "N2=", num_diffs
# print "sum_prod_diffs[0]=", sum_prod_diffs[0]
self.add_diffs(sum_prod_diffs, num_diffs, weight)
# Center part
x_full = x[width:num_samples - width, :]
sum_prod_x_full = mdp.utils.mult(x_full.T, x_full)
Aacc123 = numx.zeros((dim, dim))
for i in range(0, 2 * width): # [0, 2*width-1]
Aacc123 += (i + 1) * mdp.utils.mult(x[i:i + 1, :].T, x[i:i + 1, :])
for i in range(num_samples - 2 * width, num_samples):
Aacc123 += (num_samples - i) * mdp.utils.mult(x[i:i + 1, :].T,
x[i:i + 1, :])
# intermediate values of x, which are connected 2*width+1 times
x_middle = x[2 * width:num_samples - 2 * width, :]
Aacc123 += (2 * width + 1) * mdp.utils.mult(x_middle.T, x_middle)
b = numx.zeros((num_samples + 1, dim))
b[1:] = x.cumsum(axis=0)
B = b[2 * width + 1:] - b[0:-2 * width - 1]
Bprod = mdp.utils.mult(x_full.T, B)
sum_prod_diffs_full = (2 * width + 1) * sum_prod_x_full + Aacc123 - \
Bprod - Bprod.T
num_diffs = (num_samples - 2 * width) * (2 * width)
self.add_diffs(sum_prod_diffs_full, num_diffs, weight)
def update_sliding_window(self, x, weight=1.0, window_halfwidth=2):
num_samples = x.shape[0]
width = window_halfwidth
if 2 * width >= num_samples:
ex = "window_halfwidth %d not supported for %d samples" % \
(width, num_samples)
raise ValueError(ex)
# MOST CORRECT VERSION
x_sel = x + 0.0
w_up = numx.arange(width, 2 * width) / (2.0 * width)
w_up = w_up.reshape((width, 1))
w_down = numx.arange(2 * width - 1, width - 1, -1) / (2.0 * width)
w_down = w_down.reshape((width, 1))
x_sel[0:width, :] = x_sel[0:width, :] * w_up
x_sel[-width:, :] = x_sel[-width:, :] * w_down
sum_x = x_sel.sum(axis=0)
sum_prod_x = mdp.utils.mult(x_sel.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples -
(0.5 * window_halfwidth - 0.5), weight)
# This can be made faster (twice) due to symmetry
for offset in range(-width, width + 1):
if offset == 0:
pass
else:
if offset > 0:
diffs = x[offset:, :] - x[0:num_samples - offset, :]
sum_prod_diffs = mdp.utils.mult(diffs.T, diffs)
num_diffs = len(diffs)
self.add_diffs(sum_prod_diffs, num_diffs, weight)
# Add samples belonging to a serial training graph
def update_serial(self, x, block_size, weight=1.0):
num_samples, dim = x.shape
if block_size is None:
er = "block_size must be specified"
raise TrainingException(er)
if isinstance(block_size, numx.ndarray):
err = "Inhomogeneous block sizes not yet supported"
raise ValueError(err)
elif isinstance(block_size, list):
block_size_0 = block_size[0]
for bs in block_size:
if bs != block_size_0:
er = "for serial graph all groups must have same group" + \
"size (block_size constant), but " + \
str(bs) + "!=" + str(block_size_0)
raise ValueError(er)
block_size = block_size_0
if num_samples % block_size > 0:
err = "num_samples is not a multiple of block_size"
raise ValueError(err)
num_blocks = num_samples // block_size
# Correlation Matrix. Computing sum of outer products (the easy part)
xp = x[block_size:num_samples - block_size]
x_b_ini = x[0:block_size]
x_b_end = x[num_samples - block_size:]
sum_x = x_b_ini.sum(axis=0) + 2 * xp.sum(axis=0) + x_b_end.sum(axis=0)
sum_prod_x = mdp.utils.mult(x_b_ini.T, x_b_ini) + \
2 * mdp.utils.mult(xp.T, xp) + \
mdp.utils.mult(x_b_end.T, x_b_end)
num_samples = 2 * block_size + 2 * (num_samples - 2 * block_size)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# DCorrelation Matrix. Compute medias signal
media = numx.zeros((num_blocks, dim))
for i in range(num_blocks):
media[i] = x[i * block_size:(i + 1) * block_size].sum(axis=0) * \
(1.0 / block_size)
media_a = media[0:-1]
media_b = media[1:]
sum_prod_mixed_meds = (mdp.utils.mult(media_a.T, media_b) +
mdp.utils.mult(media_b.T, media_a))
num_diffs = block_size * (num_blocks - 1)
sum_prod_diffs = (block_size * sum_prod_x - block_size * block_size
* sum_prod_mixed_meds) * (1.0 / block_size)
# The factor 2 accounts for both directions
self.add_diffs(2 * sum_prod_diffs, 2 * num_diffs, weight)
# Weight should refer to node weights
def update_clustered(self, x, block_sizes=None, weight=1.0,
include_self_loops=True):
num_samples = x.shape[0]
if isinstance(block_sizes, int):
self.update_clustered_homogeneous_block_sizes(
x, weight=weight, block_size=block_sizes,
include_self_loops=include_self_loops)
return
if block_sizes is None:
er = "block_size is not specified"
raise TrainingException(er)
if num_samples != numx.array(block_sizes).sum():
err = "Inconsistency error: num_samples (%d)" % num_samples + \
"is not equal to sum of block_sizes: %d" % block_sizes
raise ValueError(err)
counter_sample = 0
for block_size in block_sizes:
normalized_weight = weight
self.update_clustered_homogeneous_block_sizes(
x[counter_sample:counter_sample + block_size, :],
weight=normalized_weight, block_size=block_size,
include_self_loops=include_self_loops)
counter_sample += block_size
def update_clustered_homogeneous_block_sizes(self, x, weight=1.0,
block_size=None,
include_self_loops=True):
if self.verbose:
print("update_clustered_homogeneous_block_sizes ")
if block_size is None:
er = "block_size is not specified"
raise TrainingException(er)
if isinstance(block_size, numx.ndarray):
er = "inhomogeneous block sizes are not supported by this function"
raise TrainingException(er)
# Assuming block_size is an integer:
num_samples, dim = x.shape
if num_samples % block_size > 0:
err = "num_samples (%d) is not a multiple of block_size (%d)" % \
(num_samples, block_size)
raise ValueError(err)
num_blocks = num_samples // block_size
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.add_samples(sum_prod_x, sum_x, num_samples, weight)
# DCorrelation Matrix. Compute medias signal
media = numx.zeros((num_blocks, dim))
for i in range(num_blocks):
media[i] = x[i * block_size:(i + 1) * block_size].sum(axis=0) * \
(1.0 / block_size)
sum_prod_meds = mdp.utils.mult(media.T, media)
num_diffs = num_blocks * block_size
if self.verbose:
print("num_diffs in block:", num_diffs,
" num_samples:", num_samples)
if include_self_loops:
sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size *
sum_prod_meds) / block_size
else:
sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size *
sum_prod_meds) / (block_size - 1)
self.add_diffs(sum_prod_diffs, num_diffs, weight)
if self.verbose:
print("(Diag(complete)/num_diffs.avg)**0.5 =",
((numx.diagonal(sum_prod_diffs) / num_diffs).mean()) ** 0.5)
def update_compact_classes(self, x, block_sizes=None, Jdes=None):
num_samples = x.shape[0]
if self.verbose:
print("block_sizes=", block_sizes, type(block_sizes))
if isinstance(block_sizes, list):
block_sizes = numx.array(block_sizes)
if isinstance(block_sizes, numx.ndarray):
if len(block_sizes) > 1:
if block_sizes.var() > 0:
er = "for compact_classes all groups must have the " + \
"same number of elements (block_sizes)"
raise ValueError(er)
else:
block_size = block_sizes[0]
else:
block_size = block_sizes[0]
elif block_sizes is None:
er = "block_size not specified"
raise TrainingException(er)
else:
block_size = block_sizes
if num_samples % block_size != 0:
err = "num_samples (%d) should be a multiple of block_size: %d" % \
(num_samples, block_sizes)
raise ValueError(err)
num_classes = num_samples // block_size
J = int(numx.log2(num_classes))
if Jdes is None:
Jdes = J
extra_label = Jdes - J
if self.verbose:
print("Besides J=%d labels, also adding %d labels" % (J,
extra_label))
if num_classes != 2 ** J:
err = "num_clases %d is probably not a power of 2" % num_classes
raise ValueError(err)
N = num_samples
labels = numx.zeros((N, J + extra_label))
for j in range(J):
labels[:, j] = (numx.arange(N) // block_size //
(2 ** (J - j - 1)) % 2) * 2 - 1
eigenvalues = numx.concatenate(([1.0] * (J - 1),
numx.arange(1.0, 0.0,
-1.0 / (extra_label + 1))))
n_taken = [2 ** k for k in range(J)]
n_free = list(set(range(num_classes)) - set(n_taken))
n_free_weights = Hamming_weight(n_free)
order = numx.argsort(n_free_weights)[::-1]
for j in range(extra_label):
digit = n_free[order[j]]
label = numx.ones(N)
for c in range(J):
if (digit >> c) % 2:
label *= labels[:, c]
if n_free_weights[order[j]] % 2 == 0:
label *= -1
labels[:, J + j] = label
eigenvalues = numx.array(eigenvalues)
eigenvalues /= eigenvalues.sum()
if self.verbose:
print("Eigenvalues:", eigenvalues)
print("Eigenvalues normalized:", eigenvalues)
for j in range(J + extra_label):
print("labels[%d]=" % j, labels[:, j])
for j in range(J + extra_label):
set10 = x[labels[:, j] == -1]
# first cluster
self.update_clustered_homogeneous_block_sizes(
set10, weight=eigenvalues[j], block_size=N // 2)
set10 = x[labels[:, j] == 1]
# second cluster
self.update_clustered_homogeneous_block_sizes(
set10, weight=eigenvalues[j], block_size=N // 2)
def add_cov_dcov_matrix(self, cov_dcov_mat, adding_weight=1.0,
own_weight=1.0):
if self.sum_prod_x is None:
self.sum_prod_x = cov_dcov_mat.sum_prod_x * adding_weight
self.sum_x = cov_dcov_mat.sum_x * adding_weight
else:
self.sum_prod_x = self.sum_prod_x * own_weight + \
cov_dcov_mat.sum_prod_x * adding_weight
self.sum_x = self.sum_x * own_weight + \
cov_dcov_mat.sum_x * adding_weight
self.num_samples = self.num_samples * own_weight + \
cov_dcov_mat.num_samples * adding_weight
if self.sum_prod_diffs is None:
self.sum_prod_diffs = cov_dcov_mat.sum_prod_diffs * adding_weight
else:
self.sum_prod_diffs = self.sum_prod_diffs * own_weight + \
cov_dcov_mat.sum_prod_diffs * adding_weight
self.num_diffs = self.num_diffs * own_weight + \
cov_dcov_mat.num_diffs * adding_weight
def fix(self, divide_by_num_samples_or_differences=True):
if self.verbose:
print("Fixing CovDCovMatrix")
avg_x = self.sum_x * (1.0 / self.num_samples)
prod_avg_x = numx.outer(avg_x, avg_x)
if divide_by_num_samples_or_differences:
# as specified by the theory on training graphs
cov_x = (self.sum_prod_x -
self.num_samples * prod_avg_x) / (1.0 * self.num_samples)
else: # standard unbiased estimation used by standard SFA
cov_x = (self.sum_prod_x -
self.num_samples * prod_avg_x) / (self.num_samples - 1.0)
# Finalize covariance matrix of dx
if divide_by_num_samples_or_differences or True:
cov_dx = self.sum_prod_diffs / (1.0 * self.num_diffs)
else:
cov_dx = self.sum_prod_diffs / (self.num_diffs - 1.0)
self.cov_mtx = cov_x
self.avg = avg_x
self.dcov_mtx = cov_dx
if self.verbose:
print("Finishing training CovDcovMtx:", self.num_samples,
"num_samples, and", self.num_diffs, "num_diffs")
print("Avg[0:3] is", self.avg[0:4])
print("Prod_avg_x[0:3,0:3] is", prod_avg_x[0:3, 0:3])
print("Cov[0:3,0:3] is", self.cov_mtx[0:3, 0:3])
print("DCov[0:3,0:3] is", self.dcov_mtx[0:3, 0:3])
# print("AvgDiff[0:4] is", avg_diff[0:4])
# print("Prod_avg_diff[0:3,0:3] is", prod_avg_diff[0:3,0:3])
print("Sum_prod_diffs[0:3,0:3] is", self.sum_prod_diffs[0:3, 0:3])
# print("exp_prod_diffs[0:3,0:3] is", exp_prod_diffs[0:3,0:3])
return self.cov_mtx, self.avg, self.dcov_mtx
class iGSFANode(mdp.Node):
"""This node implements "information-preserving graph-based SFA (iGSFA)",
which is the main component of hierarchical iGSFA (HiGSFA).
For further information, see: Escalante-B., A.-N. and Wiskott, L.,
"Improved graph-based {SFA}: Information preservation complements the
slowness principle", e-print arXiv:1601.03945,
http://arxiv.org/abs/1601.03945, 2017.
"""
def __init__(self, pre_expansion_node_class=None,
pre_expansion_out_dim=None, expansion_funcs=None,
expansion_output_dim=None, expansion_starting_point=None,
max_length_slow_part=None, slow_feature_scaling_method=None,
delta_threshold=1.999, reconstruct_with_sfa=False,
verbose=False, input_dim=None, output_dim=None,
dtype=None, **argv):
""" Initializes the iGSFA node.
pre_expansion_node_class: a node class. An instance of this class is
used to filter the data before the expansion.
pre_expansion_out_dim: the output dimensionality of the
above-mentioned node.
expansion_funcs: a list of expansion functions to be applied before
GSFA.
expansion_output_dim: this parameter is used to specify an output
dimensionality for some expansion functions.
expansion_starting_point: this parameter is also used by some specific
expansion functions.
max_length_slow_part: fixes an upper bound to the size of the slow
part, which is convenient for computational reasons.
slow_feature_scaling_method: the method used to scale the slow
features. Valid entries are: None, "sensitivity_based" (default),
"data_dependent", and "QR_decomposition".
delta_threshold: this parameter has two different meanings depending
on its type. If it is real valued (e.g., 1.99), it determines the
parameter Delta_threshold, which is used to decide how many slow
features are preserved, depending on their delta values. If it is
integer (e.g., 20), it directly specifies the exact size of the
slow part.
reconstruct_with_sfa: this Boolean parameter indicates whether the
slow part is removed from the input before PCA is applied.
More information about parameters 'expansion_funcs' and
'expansion_starting_point' can be found in the documentation of
GeneralExpansionNode.
Note: Training sometimes finishes after a single call to the train
method, unless multi-train is enabled (default). Multi-train is
enabled by setting reconstruct_with_sfa=False and
slow_feature_scaling_method in [None, "data_dependent"].
This is necessary to support weight sharing in iGSFA layers
(convolutional iGSFA layers).
"""
super(iGSFANode, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype, **argv)
# Type of node used to expand the data
self.pre_expansion_node_class = pre_expansion_node_class
self.pre_expansion_node = None # Node that expands the input data
self.pre_expansion_output_dim = pre_expansion_out_dim
# Expanded dimensionality
self.expansion_output_dim = expansion_output_dim
# Initial parameters for the expansion function
self.expansion_starting_point = expansion_starting_point
# creates an expansion node
if expansion_funcs and self.expansion_output_dim is not None:
self.exp_node = GeneralExpansionNode(
funcs=expansion_funcs, output_dim=self.expansion_output_dim,
starting_point=self.expansion_starting_point)
elif expansion_funcs:
self.exp_node = GeneralExpansionNode(
funcs=expansion_funcs,
starting_point=self.expansion_starting_point)
else:
self.exp_node = None
self.sfa_node = None
self.pca_node = None
self.lr_node = None
# hard upper limit to the size of the slow part
self.max_length_slow_part = max_length_slow_part
# Parameter that defines the size of the slow part. Its meaning
# depends on wheather it is an integer or a float
self.delta_threshold = delta_threshold
# Indicates whether (nonlinear) SFA components are used for
# reconstruction
self.reconstruct_with_sfa = reconstruct_with_sfa
# Indicates how to scale the slow part
self.slow_feature_scaling_method = slow_feature_scaling_method
# Default value when none is explicity provided to the class methods
self.verbose = verbose
# Dimensionality of the data after the expansion function
self.expanded_dim = None
# The following variables are for internal use only (available after
# training on a single batch only)
self.magn_n_sfa_x = None
self.num_sfa_features_preserved = None
self.x_mean = None
self.sfa_x_mean = None
self.sfa_x_std = None
self.evar = None
# The following variables are for internal use only, by the QR slow
# feature scaling method
self.Q = None
self.R = None
self.Rpinv = None
@staticmethod
def is_trainable():
return True
# TODO: should train_mode be renamed training_mode?
def _train(self, x, block_size=None, train_mode=None, node_weights=None,
edge_weights=None, verbose=None, **argv):
"""Trains an iGSFA node on data 'x'
The parameters: block_size, train_mode, node_weights, and edge_weights
are passed to the training function of the corresponding gsfa node
inside iGSFA (node.gsfa_node).
"""
self.input_dim = x.shape[1]
if verbose is None:
verbose = self.verbose
if self.output_dim is None:
self.output_dim = self.input_dim
if verbose:
print("Training iGSFANode...")
if (not self.reconstruct_with_sfa) and \
(self.slow_feature_scaling_method in [None, "data_dependent"]):
self.multiple_train(x, block_size=block_size,
train_mode=train_mode,
node_weights=node_weights,
edge_weights=edge_weights)
return
if (not self.reconstruct_with_sfa) and \
(self.slow_feature_scaling_method not in [None,
"data_dependent"]):
er = "'reconstruct_with_sfa' " + str(self.reconstruct_with_sfa) + \
") should be True when the scaling method (" + \
str(self.slow_feature_scaling_method) + \
") is neither 'None' not 'data_dependent'"
raise TrainingException(er)
# else continue using the regular method:
# Remove mean before expansion
self.x_mean = x.mean(axis=0)
x_zm = x - self.x_mean
# Reorder or pre-process the data before it is expanded,
# but only if there is really an expansion
if self.pre_expansion_node_class and self.exp_node:
self.pre_expansion_node = self.pre_expansion_node_class(
output_dim=self.pre_expansion_output_dim)
# reasonable options for pre_expansion_node_class are GSFANode
# or WhitheningNode
self.pre_expansion_node.train(x_zm, block_size=block_size,
train_mode=train_mode)
self.pre_expansion_node.stop_training()
x_pre_exp = self.pre_expansion_node.execute(x_zm)
else:
x_pre_exp = x_zm
# Expand data
if self.exp_node:
if verbose:
print("expanding x...")
exp_x = self.exp_node.execute(x_pre_exp)
else:
exp_x = x_pre_exp
self.expanded_dim = exp_x.shape[1]
if self.max_length_slow_part is None:
sfa_output_dim = min(self.expanded_dim, self.output_dim)
else:
sfa_output_dim = min(self.max_length_slow_part, self.expanded_dim,
self.output_dim)
if isinstance(self.delta_threshold, int):
sfa_output_dim = min(sfa_output_dim, self.delta_threshold)
sfa_output_dim = max(1, sfa_output_dim)
# Apply SFA to expanded data
self.sfa_node = GSFANode(output_dim=sfa_output_dim, dtype=self.dtype,
verbose=verbose)
self.sfa_node.train(exp_x, block_size=block_size,
train_mode=train_mode, node_weights=node_weights,
edge_weights=edge_weights) # sfa_node.train_params
self.sfa_node.stop_training()
if verbose:
print("self.sfa_node.d", self.sfa_node.d)
# Decide how many slow features are preserved (either use
# Delta_T=delta_threshold when delta_threshold is a float, or
# preserve delta_threshold features when delta_threshold is an integer)
if isinstance(self.delta_threshold, float):
# here self.max_length_slow_part should be considered
self.num_sfa_features_preserved = (self.sfa_node.d <=
self.delta_threshold).sum()
elif isinstance(self.delta_threshold, int):
# here self.max_length_slow_part should be considered
self.num_sfa_features_preserved = self.delta_threshold
if self.delta_threshold > self.output_dim:
er = "The provided integer delta_threshold " + \
"%d is larger than the output dimensionality %d" % \
(self.delta_threshold, self.output_dim)
raise ValueError(er)
if self.max_length_slow_part is not None and \
self.delta_threshold > self.max_length_slow_part:
er = "The provided integer delta_threshold " + \
"%d" % self.delta_threshold + \
" is larger than the given upper bound on the size " + \
"of the slow part (max_length_slow_part) %d" % \
self.max_length_slow_part
raise ValueError(er)
else:
ex = "Cannot handle type of self.delta_threshold"
raise ValueError(ex)
if self.num_sfa_features_preserved > self.output_dim:
self.num_sfa_features_preserved = self.output_dim
SFANode_reduce_output_dim(self.sfa_node,
self.num_sfa_features_preserved)
if verbose:
print("sfa execute...")
sfa_x = self.sfa_node.execute(exp_x)
# normalize sfa_x
self.sfa_x_mean = sfa_x.mean(axis=0)
self.sfa_x_std = sfa_x.std(axis=0)
if verbose:
print("self.sfa_x_mean=", self.sfa_x_mean)
print("self.sfa_x_std=", self.sfa_x_std)
if (self.sfa_x_std == 0).any():
er = "zero-component detected"
raise TrainingException(er)
n_sfa_x = (sfa_x - self.sfa_x_mean) / self.sfa_x_std
if self.reconstruct_with_sfa:
x_pca = x_zm
# Approximate input linearly
if verbose:
print("training linear regression...")
self.lr_node = mdp.nodes.LinearRegressionNode()
# Notice that the input data is "x"=n_sfa_x and the output
# to learn is "y" = x_pca
self.lr_node.train(n_sfa_x, x_pca)
self.lr_node.stop_training()
x_pca_app = self.lr_node.execute(n_sfa_x)
x_app = x_pca_app
else:
x_app = numx.zeros_like(x_zm)
# Remove linear approximation
sfa_removed_x = x_zm - x_app
# TODO:Compute variance removed by linear approximation
if verbose:
print("ranking method...")
# A method for feature scaling( +rotation)
if self.reconstruct_with_sfa and \
self.slow_feature_scaling_method == "QR_decomposition":
# A bias term is included by default, we do not need it
M = self.lr_node.beta[1:, :].T
Q, R = numx.linalg.qr(M)
self.Q = Q
self.R = R
self.Rpinv = pinv(R)
s_n_sfa_x = numx.dot(n_sfa_x, R.T)
# Another method for feature scaling (no rotation)
elif self.reconstruct_with_sfa and \
(self.slow_feature_scaling_method == "sensitivity_based"):
beta = self.lr_node.beta[1:, :]
sens = (beta ** 2).sum(axis=1)
self.magn_n_sfa_x = sens ** 0.5
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
if verbose:
print("method: sensitivity_based enforced")
elif self.slow_feature_scaling_method is None:
self.magn_n_sfa_x = 1.0
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
if verbose:
print("method: constant amplitude for all slow features")
elif self.slow_feature_scaling_method == "data_dependent":
if verbose:
print("skiped data_dependent")
else:
er = "unknown slow feature scaling method= " + \
str(self.slow_feature_scaling_method) + \
" for reconstruct_with_sfa= " + str(self.reconstruct_with_sfa)
raise ValueError(er)
print("training PCA...")
pca_output_dim = self.output_dim - self.num_sfa_features_preserved
# This allows training of PCA when pca_out_dim is zero
self.pca_node = mdp.nodes.PCANode(output_dim=max(1, pca_output_dim),
dtype=self.dtype)
self.pca_node.train(sfa_removed_x)
self.pca_node.stop_training()
PCANode_reduce_output_dim(self.pca_node, pca_output_dim,
verbose=False)
if verbose:
print("executing PCA...")
pca_x = self.pca_node.execute(sfa_removed_x)
if self.slow_feature_scaling_method == "data_dependent":
if pca_output_dim > 0:
self.magn_n_sfa_x = 1.0 * numx.median(
self.pca_node.d) ** 0.5
else:
self.magn_n_sfa_x = 1.0
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
if verbose:
print("method: data dependent")
if self.pca_node.output_dim + self.num_sfa_features_preserved < \
self.output_dim:
er = "Error, the number of features computed is SMALLER than " \
+ "the output dimensionality of the node: " + \
"self.pca_node.output_dim=" + str(self.pca_node.output_dim) \
+ ", self.num_sfa_features_preserved=" + \
str(self.num_sfa_features_preserved) + ", self.output_dim=" \
+ str(self.output_dim)
raise TrainingException(er)
# Finally, the output is the concatenation of scaled slow features
# and remaining pca components
sfa_pca_x = numx.concatenate((s_n_sfa_x, pca_x), axis=1)
sfa_pca_x_truncated = sfa_pca_x[:, 0:self.output_dim]
# Compute explained variance from amplitudes of output compared to
# amplitudes of input. Only works because amplitudes of SFA are
# scaled to be equal to explained variance, because PCA is
# a rotation, and because data has zero mean
self.evar = (sfa_pca_x_truncated ** 2).sum() / (x_zm ** 2).sum()
if verbose:
print("s_n_sfa_x:", s_n_sfa_x, "pca_x:", pca_x)
print("sfa_pca_x_truncated:", sfa_pca_x_truncated, "x_zm:", x_zm)
print("Variance(output) / Variance(input) is ", self.evar)
self.stop_training()
def multiple_train(self, x, block_size=None, train_mode=None,
node_weights=None, edge_weights=None, verbose=None):
"""This function should not be called directly. Use instead the train
method, which will decide whether multiple-training is enabled, and
call this function if needed.
"""
self.input_dim = x.shape[1]
if verbose is None:
verbose = self.verbose
if verbose:
print("Training iGSFANode (multiple train method)...")
# Data mean is ignored by the multiple train method
if self.x_mean is None:
self.x_mean = numx.zeros(self.input_dim, dtype=self.dtype)
x_zm = x
# Reorder or pre-process the data before it is expanded, but only if
# there is really an expansion.
if self.pre_expansion_node_class and self.exp_node:
er = "Unexpected parameters"
raise TrainingException(er)
else:
x_pre_exp = x_zm
if self.exp_node:
if verbose:
print("expanding x...")
exp_x = self.exp_node.execute(x_pre_exp)
else:
exp_x = x_pre_exp
self.expanded_dim = exp_x.shape[1]
if self.max_length_slow_part is None:
sfa_output_dim = min(self.expanded_dim, self.output_dim)
else:
sfa_output_dim = min(self.max_length_slow_part, self.expanded_dim,
self.output_dim)
if isinstance(self.delta_threshold, int):
sfa_output_dim = min(sfa_output_dim, self.delta_threshold)
sfa_output_dim = max(1, sfa_output_dim)
# Apply SFA to expanded data
if self.sfa_node is None:
self.sfa_node = GSFANode(output_dim=sfa_output_dim,
verbose=verbose)
self.sfa_x_mean = 0
self.sfa_x_std = 1.0
self.sfa_node.train(exp_x, block_size=block_size,
train_mode=train_mode,
node_weights=node_weights,
edge_weights=edge_weights)
if verbose:
print("training PCA...")
pca_output_dim = self.output_dim
if self.pca_node is None:
# If necessary, add reduce=True
self.pca_node = mdp.nodes.PCANode(output_dim=pca_output_dim)
sfa_removed_x = x
self.pca_node.train(sfa_removed_x)
def _stop_training(self, verbose=None):
if verbose is None:
verbose = self.verbose
if self.reconstruct_with_sfa or \
(self.slow_feature_scaling_method not in [None,
"data_dependent"]):
return
# else, continue with multi-train method
self.sfa_node.stop_training()
if verbose:
print("self.sfa_node.d", self.sfa_node.d)
self.pca_node.stop_training()
# Decide how many slow features are preserved
if isinstance(self.delta_threshold, float):
# here self.max_length_slow_part should be considered
self.num_sfa_features_preserved = (self.sfa_node.d <=
self.delta_threshold).sum()
elif isinstance(self.delta_threshold, int):
# here self.max_length_slow_part should be considered
self.num_sfa_features_preserved = self.delta_threshold
if self.delta_threshold > self.output_dim:
er = "The provided integer delta_threshold " + \
"%d is larger than the output dimensionality %d" % \
(self.delta_threshold, self.output_dim)
raise ValueError(er)
if self.max_length_slow_part is not None and \
self.delta_threshold > self.max_length_slow_part:
er = "The provided integer delta_threshold: %d" % \
self.delta_threshold + \
" is larger than max_length_slow_part: %d" % \
self.max_length_slow_part
raise ValueError(er)
else:
ex = "Cannot handle type of self.delta_threshold:" + \
str(type(self.delta_threshold))
raise ValueError(ex)
if self.num_sfa_features_preserved > self.output_dim:
self.num_sfa_features_preserved = self.output_dim
SFANode_reduce_output_dim(self.sfa_node,
self.num_sfa_features_preserved)
if verbose:
print("size of slow part:", self.num_sfa_features_preserved)
final_pca_node_output_dim = self.output_dim - \
self.num_sfa_features_preserved
if final_pca_node_output_dim > self.pca_node.output_dim:
er = "The number of features computed is SMALLER than the " + \
"output dimensionality of the node: " + \
"pca_node.output_dim=" + str(self.pca_node.output_dim) + \
", num_sfa_features_preserved=" + \
str(self.num_sfa_features_preserved) + \
", output_dim=" + str(self.output_dim)
raise ValueError(er)
PCANode_reduce_output_dim(self.pca_node, final_pca_node_output_dim,
verbose=False)
if verbose:
print("self.pca_node.d", self.pca_node.d)
print("ranking method...")
if self.slow_feature_scaling_method is None:
self.magn_n_sfa_x = 1.0
if verbose:
print("method: constant amplitude for all slow features")
elif self.slow_feature_scaling_method == "data_dependent":
# SFA components are given a variance equal to the median variance
# of the principal components
if self.pca_node.d.shape[0] > 0:
self.magn_n_sfa_x = 1.0 * numx.median(self.pca_node.d) ** 0.5
else:
self.magn_n_sfa_x = 1.0
if verbose:
print("method: data dependent")
else:
er = "Unknown slow feature scaling method" + \
str(self.slow_feature_scaling_method)
raise ValueError(er)
self.evar = self.pca_node.explained_variance
@staticmethod
def _is_invertible():
return True
def _execute(self, x):
"""Extracts iGSFA features from some data. The node must have been
already trained.
"""
x_zm = x - self.x_mean
if self.pre_expansion_node:
x_pre_exp = self.pre_expansion_node.execute(x_zm)
else:
x_pre_exp = x_zm
if self.exp_node:
exp_x = self.exp_node.execute(x_pre_exp)
else:
exp_x = x_pre_exp
sfa_x = self.sfa_node.execute(exp_x)
n_sfa_x = (sfa_x - self.sfa_x_mean) / self.sfa_x_std
if self.reconstruct_with_sfa:
# approximate input linearly, done inline to preserve node
x_pca_app = self.lr_node.execute(n_sfa_x)
x_app = x_pca_app
else:
x_app = numx.zeros_like(x_zm)
# Remove linear approximation from the centered data
sfa_removed_x = x_zm - x_app
# A method for feature scaling( +rotation)
if self.reconstruct_with_sfa \
and self.slow_feature_scaling_method == "QR_decomposition":
s_n_sfa_x = numx.dot(n_sfa_x, self.R.T)
# Another method for feature scaling (no rotation)
elif self.reconstruct_with_sfa \
and self.slow_feature_scaling_method == "sensitivity_based":
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
elif self.slow_feature_scaling_method is None:
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
elif self.slow_feature_scaling_method == "data_dependent":
s_n_sfa_x = n_sfa_x * self.magn_n_sfa_x
else:
er = "unknown feature scaling method" + \
str(self.slow_feature_scaling_method)
raise ValueError(er)
# Apply PCA to sfa removed data
if self.pca_node.output_dim > 0:
pca_x = self.pca_node.execute(sfa_removed_x)
else:
# No reconstructive components present
pca_x = numx.zeros((x.shape[0], 0))
# Finally output is the concatenation of scaled slow features and
# remaining pca components
sfa_pca_x = numx.concatenate((s_n_sfa_x, pca_x), axis=1)
return sfa_pca_x # sfa_pca_x_truncated
def _inverse(self, y, linear_inverse=True):
"""This method approximates an inverse function to the feature
extraction.
If linear_inverse is True, a linear method is used. Otherwise, a
gradient-based non-linear method is used.
"""
if linear_inverse:
return self.linear_inverse(y)
else:
return self.nonlinear_inverse(y)
def nonlinear_inverse(self, y, verbose=None):
"""Non-linear inverse approximation method.
This method is experimental and should be used with care.
Note: this function requires scipy.
"""
if mdp.numx_description != 'scipy':
raise NotImplementedError('This function requires scipy.')
else:
import scipy.optimize
if verbose is None:
verbose = self.verbose
x_lin = self.linear_inverse(y)
rmse_lin = ((y - self.execute(x_lin)) ** 2).sum(axis=1).mean() ** 0.5
x_nl = numx.zeros_like(x_lin)
y_dim = y.shape[1]
x_dim = x_lin.shape[1]
if y_dim < x_dim:
num_zeros_filling = x_dim - y_dim
else:
num_zeros_filling = 0
if verbose:
print("x_dim=", x_dim, "y_dim=", y_dim, "num_zeros_filling=",
num_zeros_filling)
y_long = numx.zeros(y_dim + num_zeros_filling)
for i, y_i in enumerate(y):
y_long[0:y_dim] = y_i
if verbose:
print("x_0=", x_lin[i])
print("y_long=", y_long)
plsq = scipy.optimize.leastsq(func=f_residual, x0=x_lin[i],
args=(self, y_long),
full_output=False)
x_nl_i = plsq[0]
if verbose:
print("x_nl_i=", x_nl_i, "plsq[1]=", plsq[1])
if plsq[1] != 2:
print("Quitting: plsq[1]=", plsq[1])
x_nl[i] = x_nl_i
if verbose:
y_i_app = self.execute(x_lin[i].reshape((1, -1)))
print("|E_lin(%d)|=" % i,
((y_i - y_i_app) ** 2).sum() ** 0.5)
y_i_app = self.execute(x_nl_i.reshape((1, -1)))
print("|E_nl(%d)|=" % i,
((y_i - y_i_app) ** 2).sum() ** 0.5)
rmse_nl = ((y - self.execute(x_nl)) ** 2).sum(axis=1).mean() ** 0.5
if verbose:
print("rmse_lin(all samples)=", rmse_lin,
"rmse_nl(all samples)=", rmse_nl)
return x_nl
def linear_inverse(self, y, verbose=True):
"""Linear inverse approximation method. """
if verbose is None:
verbose = self.verbose
num_samples = y.shape[0]
if y.shape[1] != self.output_dim:
er = "Serious dimensionality inconsistency"
raise TrainingException(er)
sfa_pca_x_full = numx.zeros(
(num_samples,
self.pca_node.output_dim + self.num_sfa_features_preserved),
dtype=self.dtype)
sfa_pca_x_full[:, 0:self.output_dim] = y
s_n_sfa_x = sfa_pca_x_full[:, 0:self.num_sfa_features_preserved]
pca_x = sfa_pca_x_full[:, self.num_sfa_features_preserved:]
if pca_x.shape[1] > 0:
sfa_removed_x = self.pca_node.inverse(pca_x)
else:
sfa_removed_x = numx.zeros((num_samples, self.input_dim))
# A method for feature scaling (+rotation)
if self.reconstruct_with_sfa \
and self.slow_feature_scaling_method == "QR_decomposition":
n_sfa_x = numx.dot(s_n_sfa_x, self.Rpinv.T)
else:
n_sfa_x = s_n_sfa_x / self.magn_n_sfa_x
# recall: sfa_x is n_sfa_x * self.sfa_x_std + self.sfa_x_mean
if self.reconstruct_with_sfa:
x_pca_app = self.lr_node.execute(n_sfa_x)
x_app = x_pca_app
else:
x_app = numx.zeros_like(sfa_removed_x)
x_zm = sfa_removed_x + x_app
x = x_zm + self.x_mean
if verbose:
print("Data_std(x_zm)=", x_zm.var(axis=0))
print("Data_std(x_app)=", x_app.var(axis=0))
print("Data_std(sfa_removed_x)=", sfa_removed_x.var(axis=0))
return x
def SFANode_reduce_output_dim(sfa_node, new_output_dim, verbose=False):
""" This function modifies an already trained SFA node (or GSFA node),
reducing the number of preserved SFA features to new_output_dim features.
The modification is done in place
"""
if verbose:
print("Updating the output dimensionality of SFA node")
if new_output_dim > sfa_node.output_dim:
er = "Cannot increase the output dimensionality of the SFA node"
raise ValueError(er)
sfa_node.d = sfa_node.d[:new_output_dim]
sfa_node.sf = sfa_node.sf[:, :new_output_dim]
sfa_node._bias = sfa_node._bias[:new_output_dim]
sfa_node._output_dim = new_output_dim
def PCANode_reduce_output_dim(pca_node, new_output_dim, verbose=False):
""" This function modifies an already trained PCA node,
reducing the number of preserved SFA features to new_output_dim features.
The modification is done in place. Also the explained variance field is
updated.
"""
if verbose:
print("Updating the output dimensionality of PCA node")
if new_output_dim > pca_node.output_dim:
er = "Cannot increase the output dimensionality of the PCA node"
raise ValueError(er)
original_total_variance = pca_node.d.sum()
original_explained_variance = pca_node.explained_variance
pca_node.d = pca_node.d[0:new_output_dim]
pca_node.v = pca_node.v[:, 0:new_output_dim]
# pca_node.avg is not affected by this method!
pca_node._output_dim = new_output_dim
pca_node.explained_variance = \
original_explained_variance * \
pca_node.d.sum() / original_total_variance
# Computes output errors dimension by dimension for a single sample:
# y - node.execute(x_app)
# The library fails when dim(x_app) > dim(y), thus filling of x_app with
# zeros is recommended
def f_residual(x_app_i, node, y_i):
res_long = numx.zeros_like(y_i)
y_i = y_i.reshape((1, -1))
y_i_short = y_i[:, 0:node.output_dim]
res = (y_i_short - node.execute(x_app_i.reshape((1, -1)))).flatten()
res_long[0:len(res)] = res
return res_long
###############################################################################
# EXAMPLES THAT SHOW HOW GSFA CAN BE USED #
###############################################################################
def example_clustered_graph():
print("\n****************************************************************")
print("*Example of training GSFA using a clustered graph")
cluster_size = 20
num_clusters = 5
num_samples = cluster_size * num_clusters
dim = 20
output_dim = 2
x = numx.random.normal(size=(num_samples, dim))
x += 0.1 * numx.arange(num_samples).reshape((num_samples, 1))
print("x=", x)
GSFA_n = GSFANode(output_dim=output_dim)
def identity(xx):
return xx
def norm2(xx): # Computes the norm of each sample returning an Nx1 array
return ((xx ** 2).sum(axis=1) ** 0.5).reshape((-1, 1))
Exp_n = mdp.nodes.GeneralExpansionNode([identity, norm2])
exp_x = Exp_n.execute(x) # Expanded data
GSFA_n.train(exp_x, train_mode="clustered", block_size=cluster_size)
GSFA_n.stop_training()
print("GSFA_n.d=", GSFA_n.d)
y = GSFA_n.execute(Exp_n.execute(x))
print("y", y)
print("Standard delta values of output features y:", comp_delta(y))
x_test = numx.random.normal(size=(num_samples, dim))
x_test += 0.1 * numx.arange(num_samples).reshape((num_samples, 1))
y_test = GSFA_n.execute(Exp_n.execute(x_test))
print("y_test", y_test)
print("Standard delta values of output features y_test:",
comp_delta(y_test))
def example_pathological_outputs(experiment):
print("\n ***************************************************************")
print("*Pathological responses.",
"Experiment on graph with weakly connected samples")
x = numx.random.normal(size=(20, 19))
x2 = numx.random.normal(size=(20, 19))
l = numx.random.normal(size=20)
l -= l.mean()
l /= l.std()
l.sort()
half_width = 3
v = numx.ones(20)
e = {}
for t in range(19):
e[(t, t + 1)] = 1.0
e[(0, 0)] = 0.5
e[(19, 19)] = 0.5
train_mode = "graph"
# Select the experiment to perform, from 0 to 11
print("experiment", experiment)
if experiment == 0:
exp_title = "Original linear SFA graph. Experiment 0"
elif experiment == 1:
v[0] = 10.0
v[10] = 0.1
v[19] = 10.0
exp_title = "Modified node weights. Experiment 1"
elif experiment == 2:
v[0] = 10.0
v[19] = 0.1
exp_title = "Modified node weights. Experiment 2"
elif experiment == 3:
e[(0, 1)] = 0.1
e[(18, 19)] = 10.0
exp_title = "Modified edge weights. Experiment 3"
elif experiment == 4:
e[(0, 1)] = 0.01
e[(18, 19)] = 0.01
e[(15, 17)] = 0.5
e[(16, 18)] = 0.5
e[(12, 14)] = 0.5
e[(3, 5)] = 0.5
e[(4, 6)] = 0.5
e[(5, 7)] = 0.5
exp_title = "Modified edge weights. Experiment 4"
elif experiment == 5:
e[(10, 11)] = 0.02
e[(1, 2)] = 0.02
e[(3, 5)] = 1.0
e[(7, 9)] = 1.0
e[(17, 19)] = 1.0
e[(14, 16)] = 1.0
exp_title = "Modified edge weights. Experiment 5"
elif experiment == 6:
e[(6, 7)] = 0.1
e[(5, 6)] = 0.1
exp_title = "Modified edge weights. Experiment 6"
elif experiment == 7:
e = {}
for j1 in range(19):
for j2 in range(j1 + 1, 20):
e[(j1, j2)] = 1 / (l[j2] - l[j1] + 0.00005)
exp_title = "Modified edge weights for labels with " + \
"w12 = 1/(l2-l1+0.00005). Experiment 7"
elif experiment == 8:
e = {}
for j1 in range(19):
for j2 in range(j1 + 1, 20):
e[(j1, j2)] = numx.exp(-0.25 * (l[j2] - l[j1]) ** 2)
exp_title = "Modified edge weights for labels with " + \
"w12 = exp(-0.25*(l2-l1)**2). Experiment 8"
elif experiment == 9:
e = {}
for j1 in range(19):
for j2 in range(j1 + 1, 20):
if l[j2] - l[j1] < 1.5:
e[(j1, j2)] = 1 / (l[j2] - l[j1] + 0.0005)
exp_title = "Modified edge weights w12 = 1/(l2-l1+0.0005), " + \
"for l2-l1<1.5. Experiment 9"
elif experiment == 10:
exp_title = "Mirroring training graph, w=%d" % half_width + \
". Experiment 10"
train_mode = "smirror_window%d" % half_width
e = {}
elif experiment == 11:
exp_title = "Node weight adjustment training graph, w=%d " % \
half_width + ". Experiment 11"
train_mode = "window%d" % half_width
e = {}
else:
er = "Unknown experiment: " + str(experiment)
raise ValueError(er)
n = GSFANode(output_dim=5)
if experiment in (10, 11):
n.train(x, train_mode=train_mode)
else:
n.train(x, train_mode="graph", node_weights=v, edge_weights=e)
n.stop_training()
print("/" * 20, "Brute delta values of GSFA features (training/test):")
y = n.execute(x)
y2 = n.execute(x2)
if e != {}:
print(graph_delta_values(y, e))
print(graph_delta_values(y2, e))
D = numx.zeros(20)
for (j1, j2) in e:
D[j1] += e[(j1, j2)] / 2.0
D[j2] += e[(j1, j2)] / 2.0
import matplotlib as mpl
mpl.use('Qt4Agg')
import matplotlib.pyplot as plt
plt.figure()
plt.title("Outputs overfitted to training data, node weights=" + str(v))
plt.xlabel(exp_title + "\n With D (half the sum of all edges " +
"from/to each vertex)=" + str(D))
plt.xticks(numx.arange(0, 20, 1))
plt.plot(y)
if experiment in (6, 7, 8):
if y[0, 0] > 0:
l *= -1
plt.plot(l, "*")
plt.show()
def example_continuous_edge_weights():
print("\n****************************************************************")
print("*Testing continuous edge weigths w_{n,n'} = 1/(|l_n'-l_n|+k)")
x = numx.random.normal(size=(20, 19))
x2 = numx.random.normal(size=(20, 19))
l = numx.random.normal(size=20)
l -= l.mean()
l /= l.std()
l.sort()
k = 0.0001
v = numx.ones(20)
e = {}
for n1 in range(20):
for n2 in range(20):
if n1 != n2:
e[(n1, n2)] = 1.0 / (numx.absolute(l[n2] - l[n1]) + k)
exp_title = "Original linear SFA graph"
n = GSFANode(output_dim=5)
n.train(x, train_mode="graph", node_weights=v, edge_weights=e)
n.stop_training()
print("/" * 20, "Brute delta values of GSFA features (training/test):")
y = n.execute(x)
y2 = n.execute(x2)
if e != {}:
print(graph_delta_values(y, e))
print(graph_delta_values(y2, e))
D = numx.zeros(20)
for (j1, j2) in e:
D[j1] += e[(j1, j2)] / 2.0
D[j2] += e[(j1, j2)] / 2.0
import matplotlib as mpl
mpl.use('Qt4Agg')
import matplotlib.pyplot as plt
plt.figure()
plt.title("Overfitted outputs on training data,v=" + str(v))
plt.xlabel(exp_title + "\n With D=" + str(D))
plt.xticks(numx.arange(0, 20, 1))
plt.plot(y)
plt.plot(l, "*")
plt.show()
###############################################################################
# AN EXAMPLE OF HOW iGSFA CAN BE USED #
###############################################################################
def example_iGSFA():
print("\n\n**************************************************************")
print("*Example of training iGSFA on random data")
num_samples = 1000
dim = 20
verbose = False
x = numx.random.normal(size=(num_samples, dim))
x[:, 0] += 2.0 * numx.arange(num_samples) / num_samples
x[:, 1] += 1.0 * numx.arange(num_samples) / num_samples
x[:, 2] += 0.5 * numx.arange(num_samples) / num_samples
x_test = numx.random.normal(size=(num_samples, dim))
x_test[:, 0] += 2.0 * numx.arange(num_samples) / num_samples
x_test[:, 1] += 1.0 * numx.arange(num_samples) / num_samples
x_test[:, 2] += 0.5 * numx.arange(num_samples) / num_samples
def zero_mean_unit_var(x):
x -= x.mean(axis=0)
x /= x.std(axis=0)
return x
print("Node creation and training")
n = iGSFANode(output_dim=15, reconstruct_with_sfa=False,
slow_feature_scaling_method="data_dependent",
verbose=verbose)
n.train(x, train_mode="regular")
n.stop_training()
y = n.execute(x)
y_test = n.execute(x_test)
print("y=", y)
print("y_test=", y_test)
print("Standard delta values of output features y:", comp_delta(y))
print("Standard delta values of output features y_test:",
comp_delta(y_test))
y_norm = zero_mean_unit_var(y)
y_test_norm = zero_mean_unit_var(y_test)
print("Standard delta values of output features y after constraint ",
"enforcement:", comp_delta(y_norm))
print("Standard delta values of output features y_test after constraint ",
"enforcement:", comp_delta(y_test_norm))
x_app_lin = n.inverse(y)
x_app_nonlin = n.nonlinear_inverse(y)
y_app_lin = n.execute(x_app_lin)
y_app_nonlin = n.execute(x_app_nonlin)
print("|y - y_app_lin|", ((y - y_app_lin)**2).mean())
print("|y - y_app_nonlin|", ((y - y_app_nonlin)**2).mean())
if __name__ == "__main__":
for experiment_number in range(0, 12):
example_pathological_outputs(experiment=experiment_number)
example_continuous_edge_weights()
example_clustered_graph()
example_iGSFA() | PypiClean |
/Flask-SSLify-0.1.5.tar.gz/Flask-SSLify-0.1.5/README.rst | Flask-SSLify
============
This is a simple Flask extension that configures your Flask application to redirect
all incoming requests to HTTPS.
Redirects only occur when ``app.debug`` is ``False``.
Usage
-----
Usage is pretty simple::
from flask import Flask
from flask_sslify import SSLify
app = Flask(__name__)
sslify = SSLify(app)
If you make an HTTP request, it will automatically redirect::
$ curl -I http://secure-samurai.herokuapp.com/
HTTP/1.1 302 FOUND
Content-length: 281
Content-Type: text/html; charset=utf-8
Date: Sun, 29 Apr 2012 21:39:36 GMT
Location: https://secure-samurai.herokuapp.com/
Server: gunicorn/0.14.2
Strict-Transport-Security: max-age=31536000
Connection: keep-alive
HTTP Strict Transport Security
------------------------------
Flask-SSLify also provides your application with an HSTS policy.
By default, HSTS is set for *one year* (31536000 seconds).
You can change the duration by passing the ``age`` parameter::
sslify = SSLify(app, age=300)
If you'd like to include subdomains in your HSTS policy, set the ``subdomains`` parameter::
sslify = SSLify(app, subdomains=True)
Or by including ``SSLIFY_SUBDOMAINS`` in your app's config.
HTTP 301 Redirects
------------------
By default, the redirect is issued with a HTTP 302 response. You can change that to a HTTP 301 response
by passing the ``permanent`` parameter::
sslify = SSLify(app, permanent=True)
Or by including ``SSLIFY_PERMANENT`` in your app's config.
Exclude Certain Paths from Being Redirected
-------------------------------------------
You can exlude a path that starts with given string by including a list called ``skips``::
sslify = SSLify(app, skips=['mypath', 'anotherpath'])
Or by including ``SSLIFY_SKIPS`` in your app's config.
Install
-------
Installation is simple too::
$ pip install Flask-SSLify
Security consideration using basic auth
---------------------------------------
When using basic auth, it is important that the redirect occurs before the user is prompted for
credentials. Flask-SSLify registers a ``before_request`` handler, to make sure this handler gets
executed before credentials are entered it is advisable to not prompt for any authentication
inside a ``before_request`` handler.
The example found at http://flask.pocoo.org/snippets/8/ works nicely, as the view function's
decorator will never have an effect before the ``before_request`` hooks are executed.
| PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/addon/fold/foldcode.js | (function() {
"use strict";
function doFold(cm, pos, options) {
var finder = options && (options.call ? options : options.rangeFinder);
if (!finder) finder = cm.getHelper(pos, "fold");
if (!finder) return;
if (typeof pos == "number") pos = CodeMirror.Pos(pos, 0);
var minSize = options && options.minFoldSize || 0;
function getRange(allowFolded) {
var range = finder(cm, pos);
if (!range || range.to.line - range.from.line < minSize) return null;
var marks = cm.findMarksAt(range.from);
for (var i = 0; i < marks.length; ++i) {
if (marks[i].__isFold) {
if (!allowFolded) return null;
range.cleared = true;
marks[i].clear();
}
}
return range;
}
var range = getRange(true);
if (options && options.scanUp) while (!range && pos.line > cm.firstLine()) {
pos = CodeMirror.Pos(pos.line - 1, 0);
range = getRange(false);
}
if (!range || range.cleared) return;
var myWidget = makeWidget(options);
CodeMirror.on(myWidget, "mousedown", function() { myRange.clear(); });
var myRange = cm.markText(range.from, range.to, {
replacedWith: myWidget,
clearOnEnter: true,
__isFold: true
});
myRange.on("clear", function(from, to) {
CodeMirror.signal(cm, "unfold", cm, from, to);
});
CodeMirror.signal(cm, "fold", cm, range.from, range.to);
}
function makeWidget(options) {
var widget = (options && options.widget) || "\u2194";
if (typeof widget == "string") {
var text = document.createTextNode(widget);
widget = document.createElement("span");
widget.appendChild(text);
widget.className = "CodeMirror-foldmarker";
}
return widget;
}
// Clumsy backwards-compatible interface
CodeMirror.newFoldFunction = function(rangeFinder, widget) {
return function(cm, pos) { doFold(cm, pos, {rangeFinder: rangeFinder, widget: widget}); };
};
// New-style interface
CodeMirror.defineExtension("foldCode", function(pos, options) { doFold(this, pos, options); });
CodeMirror.registerHelper("fold", "combine", function() {
var funcs = Array.prototype.slice.call(arguments, 0);
return function(cm, start) {
for (var i = 0; i < funcs.length; ++i) {
var found = funcs[i](cm, start);
if (found) return found;
}
};
});
})(); | PypiClean |
/Deliverance-0.6.1.tar.gz/Deliverance-0.6.1/deliverance/editor/media/delivxml.js | editAreaLoader.load_syntax["delivxml"] = {
'COMMENT_SINGLE' : {}
,'COMMENT_MULTI' : {'<!--' : '-->'}
,'QUOTEMARKS' : {1: "'", 2: '"'}
,'KEYWORD_CASE_SENSITIVE' : true
,'KEYWORDS' : {
'values' : [
'ruleset',
'server-settings', 'server', 'execute-pyref', 'display-local-files', 'edit-local-files',
'dev-allow', 'dev-deny', 'dev-htpasswd', 'dev-user', 'dev-expiration',
'proxy', 'dest', 'request', 'response',
'theme', 'rule', 'replace', 'append', 'prepend', 'drop'
],
'attributes' : [
'href', 'pyref', 'content', 'theme', 'if-content', 'notheme',
'manytheme', 'nocontent', 'manycontent', 'href', 'move',
'suppress-standard', 'class',
'domain', 'path', 'header', 'rewrite-links', 'request-header',
'response-header', 'environ', 'editable']
}
,'OPERATORS' :[
]
,'DELIMITERS' :[
]
,'REGEXPS' : {
'xml' : {
'search' : '()(<\\?[^>]*?\\?>)()'
,'class' : 'xml'
,'modifiers' : 'g'
,'execute' : 'before' // before or after
}
,'cdatas' : {
'search' : '()(<!\\[CDATA\\[.*?\\]\\]>)()'
,'class' : 'cdata'
,'modifiers' : 'g'
,'execute' : 'before' // before or after
}
/* These get in the way of the KEYWORDS based highlighting */
/*,'tags' : {
'search' : '(<)(/?[a-z][^ \r\n\t>]*)([^>]*>)'
,'class' : 'tags'
,'modifiers' : 'gi'
,'execute' : 'before' // before or after
}*/
/*,'attributes' : {
'search' : '( |\n|\r|\t)([^ \r\n\t=]+)(=)'
,'class' : 'attributes'
,'modifiers' : 'g'
,'execute' : 'before' // before or after
}*/
}
,'STYLES' : {
'COMMENTS': 'color: #AAAAAA;'
,'QUOTESMARKS': 'color: #6381F8;'
,'KEYWORDS' : {
'values' : 'color: #0000FF;'
,'attributes' : 'color: #009900;'
}
,'OPERATORS' : 'color: #E775F0;'
,'DELIMITERS' : ''
,'REGEXPS' : {
'attributes': 'color: #B1AC41;'
,'tags': 'color: #E62253;'
,'xml': 'color: #8DCFB5;'
,'cdata': 'color: #50B020;'
}
}
}; | PypiClean |
/IronPdf-2023.8.6-py37-none-any.whl/IronPdf-2023.8.6.data/data/IronPdf.Slim/dotnet-install.sh |
# Stop script on NZEC
set -e
# Stop script if unbound variable found (use ${var:-} if intentional)
set -u
# By default cmd1 | cmd2 returns exit code of cmd2 regardless of cmd1 success
# This is causing it to fail
set -o pipefail
# Use in the the functions: eval $invocation
invocation='say_verbose "Calling: ${yellow:-}${FUNCNAME[0]} ${green:-}$*${normal:-}"'
# standard output may be used as a return value in the functions
# we need a way to write text on the screen in the functions so that
# it won't interfere with the return value.
# Exposing stream 3 as a pipe to standard output of the script itself
exec 3>&1
# Setup some colors to use. These need to work in fairly limited shells, like the Ubuntu Docker container where there are only 8 colors.
# See if stdout is a terminal
if [ -t 1 ] && command -v tput > /dev/null; then
# see if it supports colors
ncolors=$(tput colors || echo 0)
if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then
bold="$(tput bold || echo)"
normal="$(tput sgr0 || echo)"
black="$(tput setaf 0 || echo)"
red="$(tput setaf 1 || echo)"
green="$(tput setaf 2 || echo)"
yellow="$(tput setaf 3 || echo)"
blue="$(tput setaf 4 || echo)"
magenta="$(tput setaf 5 || echo)"
cyan="$(tput setaf 6 || echo)"
white="$(tput setaf 7 || echo)"
fi
fi
say_warning() {
printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}" >&3
}
say_err() {
printf "%b\n" "${red:-}dotnet_install: Error: $1${normal:-}" >&2
}
say() {
# using stream 3 (defined in the beginning) to not interfere with stdout of functions
# which may be used as return value
printf "%b\n" "${cyan:-}dotnet-install:${normal:-} $1" >&3
}
say_verbose() {
if [ "$verbose" = true ]; then
say "$1"
fi
}
# This platform list is finite - if the SDK/Runtime has supported Linux distribution-specific assets,
# then and only then should the Linux distribution appear in this list.
# Adding a Linux distribution to this list does not imply distribution-specific support.
get_legacy_os_name_from_platform() {
eval $invocation
platform="$1"
case "$platform" in
"centos.7")
echo "centos"
return 0
;;
"debian.8")
echo "debian"
return 0
;;
"debian.9")
echo "debian.9"
return 0
;;
"fedora.23")
echo "fedora.23"
return 0
;;
"fedora.24")
echo "fedora.24"
return 0
;;
"fedora.27")
echo "fedora.27"
return 0
;;
"fedora.28")
echo "fedora.28"
return 0
;;
"opensuse.13.2")
echo "opensuse.13.2"
return 0
;;
"opensuse.42.1")
echo "opensuse.42.1"
return 0
;;
"opensuse.42.3")
echo "opensuse.42.3"
return 0
;;
"rhel.7"*)
echo "rhel"
return 0
;;
"ubuntu.14.04")
echo "ubuntu"
return 0
;;
"ubuntu.16.04")
echo "ubuntu.16.04"
return 0
;;
"ubuntu.16.10")
echo "ubuntu.16.10"
return 0
;;
"ubuntu.18.04")
echo "ubuntu.18.04"
return 0
;;
"alpine.3.4.3")
echo "alpine"
return 0
;;
esac
return 1
}
get_legacy_os_name() {
eval $invocation
local uname=$(uname)
if [ "$uname" = "Darwin" ]; then
echo "osx"
return 0
elif [ -n "$runtime_id" ]; then
echo $(get_legacy_os_name_from_platform "${runtime_id%-*}" || echo "${runtime_id%-*}")
return 0
else
if [ -e /etc/os-release ]; then
. /etc/os-release
os=$(get_legacy_os_name_from_platform "$ID${VERSION_ID:+.${VERSION_ID}}" || echo "")
if [ -n "$os" ]; then
echo "$os"
return 0
fi
fi
fi
say_verbose "Distribution specific OS name and version could not be detected: UName = $uname"
return 1
}
get_linux_platform_name() {
eval $invocation
if [ -n "$runtime_id" ]; then
echo "${runtime_id%-*}"
return 0
else
if [ -e /etc/os-release ]; then
. /etc/os-release
echo "$ID${VERSION_ID:+.${VERSION_ID}}"
return 0
elif [ -e /etc/redhat-release ]; then
local redhatRelease=$(</etc/redhat-release)
if [[ $redhatRelease == "CentOS release 6."* || $redhatRelease == "Red Hat Enterprise Linux "*" release 6."* ]]; then
echo "rhel.6"
return 0
fi
fi
fi
say_verbose "Linux specific platform name and version could not be detected: UName = $uname"
return 1
}
is_musl_based_distro() {
(ldd --version 2>&1 || true) | grep -q musl
}
get_current_os_name() {
eval $invocation
local uname=$(uname)
if [ "$uname" = "Darwin" ]; then
echo "osx"
return 0
elif [ "$uname" = "FreeBSD" ]; then
echo "freebsd"
return 0
elif [ "$uname" = "Linux" ]; then
local linux_platform_name=""
linux_platform_name="$(get_linux_platform_name)" || true
if [ "$linux_platform_name" = "rhel.6" ]; then
echo $linux_platform_name
return 0
elif is_musl_based_distro; then
echo "linux-musl"
return 0
elif [ "$linux_platform_name" = "linux-musl" ]; then
echo "linux-musl"
return 0
else
echo "linux"
return 0
fi
fi
say_err "OS name could not be detected: UName = $uname"
return 1
}
machine_has() {
eval $invocation
command -v "$1" > /dev/null 2>&1
return $?
}
check_min_reqs() {
local hasMinimum=false
if machine_has "curl"; then
hasMinimum=true
elif machine_has "wget"; then
hasMinimum=true
fi
if [ "$hasMinimum" = "false" ]; then
say_err "curl (recommended) or wget are required to download dotnet. Install missing prerequisite to proceed."
return 1
fi
return 0
}
# args:
# input - $1
to_lowercase() {
#eval $invocation
echo "$1" | tr '[:upper:]' '[:lower:]'
return 0
}
# args:
# input - $1
remove_trailing_slash() {
#eval $invocation
local input="${1:-}"
echo "${input%/}"
return 0
}
# args:
# input - $1
remove_beginning_slash() {
#eval $invocation
local input="${1:-}"
echo "${input#/}"
return 0
}
# args:
# root_path - $1
# child_path - $2 - this parameter can be empty
combine_paths() {
eval $invocation
# TODO: Consider making it work with any number of paths. For now:
if [ ! -z "${3:-}" ]; then
say_err "combine_paths: Function takes two parameters."
return 1
fi
local root_path="$(remove_trailing_slash "$1")"
local child_path="$(remove_beginning_slash "${2:-}")"
say_verbose "combine_paths: root_path=$root_path"
say_verbose "combine_paths: child_path=$child_path"
echo "$root_path/$child_path"
return 0
}
get_machine_architecture() {
eval $invocation
if command -v uname > /dev/null; then
CPUName=$(uname -m)
case $CPUName in
armv*l)
echo "arm"
return 0
;;
aarch64|arm64)
echo "arm64"
return 0
;;
s390x)
echo "s390x"
return 0
;;
ppc64le)
echo "ppc64le"
return 0
;;
esac
fi
# Always default to 'x64'
echo "x64"
return 0
}
# args:
# architecture - $1
get_normalized_architecture_from_architecture() {
eval $invocation
local architecture="$(to_lowercase "$1")"
if [[ $architecture == \<auto\> ]]; then
echo "$(get_machine_architecture)"
return 0
fi
case "$architecture" in
amd64|x64)
echo "x64"
return 0
;;
arm)
echo "arm"
return 0
;;
arm64)
echo "arm64"
return 0
;;
s390x)
echo "s390x"
return 0
;;
ppc64le)
echo "ppc64le"
return 0
;;
esac
say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues"
return 1
}
# args:
# version - $1
# channel - $2
# architecture - $3
get_normalized_architecture_for_specific_sdk_version() {
eval $invocation
local is_version_support_arm64="$(is_arm64_supported "$1")"
local is_channel_support_arm64="$(is_arm64_supported "$2")"
local architecture="$3";
local osname="$(get_current_os_name)"
if [ "$osname" == "osx" ] && [ "$architecture" == "arm64" ] && { [ "$is_version_support_arm64" = false ] || [ "$is_channel_support_arm64" = false ]; }; then
#check if rosetta is installed
if [ "$(/usr/bin/pgrep oahd >/dev/null 2>&1;echo $?)" -eq 0 ]; then
say_verbose "Changing user architecture from '$architecture' to 'x64' because .NET SDKs prior to version 6.0 do not support arm64."
echo "x64"
return 0;
else
say_err "Architecture \`$architecture\` is not supported for .NET SDK version \`$version\`. Please install Rosetta to allow emulation of the \`$architecture\` .NET SDK on this platform"
return 1
fi
fi
echo "$architecture"
return 0
}
# args:
# version or channel - $1
is_arm64_supported() {
#any channel or version that starts with the specified versions
case "$1" in
( "1"* | "2"* | "3"* | "4"* | "5"*)
echo false
return 0
esac
echo true
return 0
}
# args:
# user_defined_os - $1
get_normalized_os() {
eval $invocation
local osname="$(to_lowercase "$1")"
if [ ! -z "$osname" ]; then
case "$osname" in
osx | freebsd | rhel.6 | linux-musl | linux)
echo "$osname"
return 0
;;
*)
say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues."
return 1
;;
esac
else
osname="$(get_current_os_name)" || return 1
fi
echo "$osname"
return 0
}
# args:
# quality - $1
get_normalized_quality() {
eval $invocation
local quality="$(to_lowercase "$1")"
if [ ! -z "$quality" ]; then
case "$quality" in
daily | signed | validated | preview)
echo "$quality"
return 0
;;
ga)
#ga quality is available without specifying quality, so normalizing it to empty
return 0
;;
*)
say_err "'$quality' is not a supported value for --quality option. Supported values are: daily, signed, validated, preview, ga. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues."
return 1
;;
esac
fi
return 0
}
# args:
# channel - $1
get_normalized_channel() {
eval $invocation
local channel="$(to_lowercase "$1")"
if [[ $channel == current ]]; then
say_warning 'Value "Current" is deprecated for -Channel option. Use "STS" instead.'
fi
if [[ $channel == release/* ]]; then
say_warning 'Using branch name with -Channel option is no longer supported with newer releases. Use -Quality option with a channel in X.Y format instead.';
fi
if [ ! -z "$channel" ]; then
case "$channel" in
lts)
echo "LTS"
return 0
;;
sts)
echo "STS"
return 0
;;
current)
echo "STS"
return 0
;;
*)
echo "$channel"
return 0
;;
esac
fi
return 0
}
# args:
# runtime - $1
get_normalized_product() {
eval $invocation
local product=""
local runtime="$(to_lowercase "$1")"
if [[ "$runtime" == "dotnet" ]]; then
product="dotnet-runtime"
elif [[ "$runtime" == "aspnetcore" ]]; then
product="aspnetcore-runtime"
elif [ -z "$runtime" ]; then
product="dotnet-sdk"
fi
echo "$product"
return 0
}
# The version text returned from the feeds is a 1-line or 2-line string:
# For the SDK and the dotnet runtime (2 lines):
# Line 1: # commit_hash
# Line 2: # 4-part version
# For the aspnetcore runtime (1 line):
# Line 1: # 4-part version
# args:
# version_text - stdin
get_version_from_latestversion_file_content() {
eval $invocation
cat | tail -n 1 | sed 's/\r$//'
return 0
}
# args:
# install_root - $1
# relative_path_to_package - $2
# specific_version - $3
is_dotnet_package_installed() {
eval $invocation
local install_root="$1"
local relative_path_to_package="$2"
local specific_version="${3//[$'\t\r\n']}"
local dotnet_package_path="$(combine_paths "$(combine_paths "$install_root" "$relative_path_to_package")" "$specific_version")"
say_verbose "is_dotnet_package_installed: dotnet_package_path=$dotnet_package_path"
if [ -d "$dotnet_package_path" ]; then
return 0
else
return 1
fi
}
# args:
# azure_feed - $1
# channel - $2
# normalized_architecture - $3
get_version_from_latestversion_file() {
eval $invocation
local azure_feed="$1"
local channel="$2"
local normalized_architecture="$3"
local version_file_url=null
if [[ "$runtime" == "dotnet" ]]; then
version_file_url="$azure_feed/Runtime/$channel/latest.version"
elif [[ "$runtime" == "aspnetcore" ]]; then
version_file_url="$azure_feed/aspnetcore/Runtime/$channel/latest.version"
elif [ -z "$runtime" ]; then
version_file_url="$azure_feed/Sdk/$channel/latest.version"
else
say_err "Invalid value for \$runtime"
return 1
fi
say_verbose "get_version_from_latestversion_file: latest url: $version_file_url"
download "$version_file_url" || return $?
return 0
}
# args:
# json_file - $1
parse_globaljson_file_for_version() {
eval $invocation
local json_file="$1"
if [ ! -f "$json_file" ]; then
say_err "Unable to find \`$json_file\`"
return 1
fi
sdk_section=$(cat $json_file | tr -d "\r" | awk '/"sdk"/,/}/')
if [ -z "$sdk_section" ]; then
say_err "Unable to parse the SDK node in \`$json_file\`"
return 1
fi
sdk_list=$(echo $sdk_section | awk -F"[{}]" '{print $2}')
sdk_list=${sdk_list//[\" ]/}
sdk_list=${sdk_list//,/$'\n'}
local version_info=""
while read -r line; do
IFS=:
while read -r key value; do
if [[ "$key" == "version" ]]; then
version_info=$value
fi
done <<< "$line"
done <<< "$sdk_list"
if [ -z "$version_info" ]; then
say_err "Unable to find the SDK:version node in \`$json_file\`"
return 1
fi
unset IFS;
echo "$version_info"
return 0
}
# args:
# azure_feed - $1
# channel - $2
# normalized_architecture - $3
# version - $4
# json_file - $5
get_specific_version_from_version() {
eval $invocation
local azure_feed="$1"
local channel="$2"
local normalized_architecture="$3"
local version="$(to_lowercase "$4")"
local json_file="$5"
if [ -z "$json_file" ]; then
if [[ "$version" == "latest" ]]; then
local version_info
version_info="$(get_version_from_latestversion_file "$azure_feed" "$channel" "$normalized_architecture" false)" || return 1
say_verbose "get_specific_version_from_version: version_info=$version_info"
echo "$version_info" | get_version_from_latestversion_file_content
return 0
else
echo "$version"
return 0
fi
else
local version_info
version_info="$(parse_globaljson_file_for_version "$json_file")" || return 1
echo "$version_info"
return 0
fi
}
# args:
# azure_feed - $1
# channel - $2
# normalized_architecture - $3
# specific_version - $4
# normalized_os - $5
construct_download_link() {
eval $invocation
local azure_feed="$1"
local channel="$2"
local normalized_architecture="$3"
local specific_version="${4//[$'\t\r\n']}"
local specific_product_version="$(get_specific_product_version "$1" "$4")"
local osname="$5"
local download_link=null
if [[ "$runtime" == "dotnet" ]]; then
download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz"
elif [[ "$runtime" == "aspnetcore" ]]; then
download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz"
elif [ -z "$runtime" ]; then
download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_product_version-$osname-$normalized_architecture.tar.gz"
else
return 1
fi
echo "$download_link"
return 0
}
# args:
# azure_feed - $1
# specific_version - $2
# download link - $3 (optional)
get_specific_product_version() {
# If we find a 'productVersion.txt' at the root of any folder, we'll use its contents
# to resolve the version of what's in the folder, superseding the specified version.
# if 'productVersion.txt' is missing but download link is already available, product version will be taken from download link
eval $invocation
local azure_feed="$1"
local specific_version="${2//[$'\t\r\n']}"
local package_download_link=""
if [ $# -gt 2 ]; then
local package_download_link="$3"
fi
local specific_product_version=null
# Try to get the version number, using the productVersion.txt file located next to the installer file.
local download_links=($(get_specific_product_version_url "$azure_feed" "$specific_version" true "$package_download_link")
$(get_specific_product_version_url "$azure_feed" "$specific_version" false "$package_download_link"))
for download_link in "${download_links[@]}"
do
say_verbose "Checking for the existence of $download_link"
if machine_has "curl"
then
if ! specific_product_version=$(curl -s --fail "${download_link}${feed_credential}" 2>&1); then
continue
else
echo "${specific_product_version//[$'\t\r\n']}"
return 0
fi
elif machine_has "wget"
then
specific_product_version=$(wget -qO- "${download_link}${feed_credential}" 2>&1)
if [ $? = 0 ]; then
echo "${specific_product_version//[$'\t\r\n']}"
return 0
fi
fi
done
# Getting the version number with productVersion.txt has failed. Try parsing the download link for a version number.
say_verbose "Failed to get the version using productVersion.txt file. Download link will be parsed instead."
specific_product_version="$(get_product_specific_version_from_download_link "$package_download_link" "$specific_version")"
echo "${specific_product_version//[$'\t\r\n']}"
return 0
}
# args:
# azure_feed - $1
# specific_version - $2
# is_flattened - $3
# download link - $4 (optional)
get_specific_product_version_url() {
eval $invocation
local azure_feed="$1"
local specific_version="$2"
local is_flattened="$3"
local package_download_link=""
if [ $# -gt 3 ]; then
local package_download_link="$4"
fi
local pvFileName="productVersion.txt"
if [ "$is_flattened" = true ]; then
if [ -z "$runtime" ]; then
pvFileName="sdk-productVersion.txt"
elif [[ "$runtime" == "dotnet" ]]; then
pvFileName="runtime-productVersion.txt"
else
pvFileName="$runtime-productVersion.txt"
fi
fi
local download_link=null
if [ -z "$package_download_link" ]; then
if [[ "$runtime" == "dotnet" ]]; then
download_link="$azure_feed/Runtime/$specific_version/${pvFileName}"
elif [[ "$runtime" == "aspnetcore" ]]; then
download_link="$azure_feed/aspnetcore/Runtime/$specific_version/${pvFileName}"
elif [ -z "$runtime" ]; then
download_link="$azure_feed/Sdk/$specific_version/${pvFileName}"
else
return 1
fi
else
download_link="${package_download_link%/*}/${pvFileName}"
fi
say_verbose "Constructed productVersion link: $download_link"
echo "$download_link"
return 0
}
# args:
# download link - $1
# specific version - $2
get_product_specific_version_from_download_link()
{
eval $invocation
local download_link="$1"
local specific_version="$2"
local specific_product_version=""
if [ -z "$download_link" ]; then
echo "$specific_version"
return 0
fi
#get filename
filename="${download_link##*/}"
#product specific version follows the product name
#for filename 'dotnet-sdk-3.1.404-linux-x64.tar.gz': the product version is 3.1.404
IFS='-'
read -ra filename_elems <<< "$filename"
count=${#filename_elems[@]}
if [[ "$count" -gt 2 ]]; then
specific_product_version="${filename_elems[2]}"
else
specific_product_version=$specific_version
fi
unset IFS;
echo "$specific_product_version"
return 0
}
# args:
# azure_feed - $1
# channel - $2
# normalized_architecture - $3
# specific_version - $4
construct_legacy_download_link() {
eval $invocation
local azure_feed="$1"
local channel="$2"
local normalized_architecture="$3"
local specific_version="${4//[$'\t\r\n']}"
local distro_specific_osname
distro_specific_osname="$(get_legacy_os_name)" || return 1
local legacy_download_link=null
if [[ "$runtime" == "dotnet" ]]; then
legacy_download_link="$azure_feed/Runtime/$specific_version/dotnet-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz"
elif [ -z "$runtime" ]; then
legacy_download_link="$azure_feed/Sdk/$specific_version/dotnet-dev-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz"
else
return 1
fi
echo "$legacy_download_link"
return 0
}
get_user_install_path() {
eval $invocation
if [ ! -z "${DOTNET_INSTALL_DIR:-}" ]; then
echo "$DOTNET_INSTALL_DIR"
else
echo "$HOME/.dotnet"
fi
return 0
}
# args:
# install_dir - $1
resolve_installation_path() {
eval $invocation
local install_dir=$1
if [ "$install_dir" = "<auto>" ]; then
local user_install_path="$(get_user_install_path)"
say_verbose "resolve_installation_path: user_install_path=$user_install_path"
echo "$user_install_path"
return 0
fi
echo "$install_dir"
return 0
}
# args:
# relative_or_absolute_path - $1
get_absolute_path() {
eval $invocation
local relative_or_absolute_path=$1
echo "$(cd "$(dirname "$1")" && pwd -P)/$(basename "$1")"
return 0
}
# args:
# input_files - stdin
# root_path - $1
# out_path - $2
# override - $3
copy_files_or_dirs_from_list() {
eval $invocation
local root_path="$(remove_trailing_slash "$1")"
local out_path="$(remove_trailing_slash "$2")"
local override="$3"
local osname="$(get_current_os_name)"
local override_switch=$(
if [ "$override" = false ]; then
if [ "$osname" = "linux-musl" ]; then
printf -- "-u";
else
printf -- "-n";
fi
fi)
cat | uniq | while read -r file_path; do
local path="$(remove_beginning_slash "${file_path#$root_path}")"
local target="$out_path/$path"
if [ "$override" = true ] || (! ([ -d "$target" ] || [ -e "$target" ])); then
mkdir -p "$out_path/$(dirname "$path")"
if [ -d "$target" ]; then
rm -rf "$target"
fi
cp -R $override_switch "$root_path/$path" "$target"
fi
done
}
# args:
# zip_path - $1
# out_path - $2
extract_dotnet_package() {
eval $invocation
local zip_path="$1"
local out_path="$2"
local temp_out_path="$(mktemp -d "$temporary_file_template")"
local failed=false
tar -xzf "$zip_path" -C "$temp_out_path" > /dev/null || failed=true
local folders_with_version_regex='^.*/[0-9]+\.[0-9]+[^/]+/'
find "$temp_out_path" -type f | grep -Eo "$folders_with_version_regex" | sort | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" false
find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files"
rm -rf "$temp_out_path"
rm -f "$zip_path" && say_verbose "Temporary zip file $zip_path was removed"
if [ "$failed" = true ]; then
say_err "Extraction failed"
return 1
fi
return 0
}
# args:
# remote_path - $1
# disable_feed_credential - $2
get_http_header()
{
eval $invocation
local remote_path="$1"
local disable_feed_credential="$2"
local failed=false
local response
if machine_has "curl"; then
get_http_header_curl $remote_path $disable_feed_credential || failed=true
elif machine_has "wget"; then
get_http_header_wget $remote_path $disable_feed_credential || failed=true
else
failed=true
fi
if [ "$failed" = true ]; then
say_verbose "Failed to get HTTP header: '$remote_path'."
return 1
fi
return 0
}
# args:
# remote_path - $1
# disable_feed_credential - $2
get_http_header_curl() {
eval $invocation
local remote_path="$1"
local disable_feed_credential="$2"
remote_path_with_credential="$remote_path"
if [ "$disable_feed_credential" = false ]; then
remote_path_with_credential+="$feed_credential"
fi
curl_options="-I -sSL --retry 5 --retry-delay 2 --connect-timeout 15 "
curl $curl_options "$remote_path_with_credential" 2>&1 || return 1
return 0
}
# args:
# remote_path - $1
# disable_feed_credential - $2
get_http_header_wget() {
eval $invocation
local remote_path="$1"
local disable_feed_credential="$2"
local wget_options="-q -S --spider --tries 5 "
local wget_options_extra=''
# Test for options that aren't supported on all wget implementations.
if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then
wget_options_extra="--waitretry 2 --connect-timeout 15 "
else
say "wget extra options are unavailable for this environment"
fi
remote_path_with_credential="$remote_path"
if [ "$disable_feed_credential" = false ]; then
remote_path_with_credential+="$feed_credential"
fi
wget $wget_options $wget_options_extra "$remote_path_with_credential" 2>&1
return $?
}
# args:
# remote_path - $1
# [out_path] - $2 - stdout if not provided
download() {
eval $invocation
local remote_path="$1"
local out_path="${2:-}"
if [[ "$remote_path" != "http"* ]]; then
cp "$remote_path" "$out_path"
return $?
fi
local failed=false
local attempts=0
while [ $attempts -lt 3 ]; do
attempts=$((attempts+1))
failed=false
if machine_has "curl"; then
downloadcurl "$remote_path" "$out_path" || failed=true
elif machine_has "wget"; then
downloadwget "$remote_path" "$out_path" || failed=true
else
say_err "Missing dependency: neither curl nor wget was found."
exit 1
fi
if [ "$failed" = false ] || [ $attempts -ge 3 ] || { [ ! -z $http_code ] && [ $http_code = "404" ]; }; then
break
fi
say "Download attempt #$attempts has failed: $http_code $download_error_msg"
say "Attempt #$((attempts+1)) will start in $((attempts*10)) seconds."
sleep $((attempts*10))
done
if [ "$failed" = true ]; then
say_verbose "Download failed: $remote_path"
return 1
fi
return 0
}
# Updates global variables $http_code and $download_error_msg
downloadcurl() {
eval $invocation
unset http_code
unset download_error_msg
local remote_path="$1"
local out_path="${2:-}"
# Append feed_credential as late as possible before calling curl to avoid logging feed_credential
# Avoid passing URI with credentials to functions: note, most of them echoing parameters of invocation in verbose output.
local remote_path_with_credential="${remote_path}${feed_credential}"
local curl_options="--retry 20 --retry-delay 2 --connect-timeout 15 -sSL -f --create-dirs "
local curl_exit_code=0;
if [ -z "$out_path" ]; then
curl $curl_options "$remote_path_with_credential" 2>&1
curl_exit_code=$?
else
curl $curl_options -o "$out_path" "$remote_path_with_credential" 2>&1
curl_exit_code=$?
fi
if [ $curl_exit_code -gt 0 ]; then
download_error_msg="Unable to download $remote_path."
# Check for curl timeout codes
if [[ $curl_exit_code == 7 || $curl_exit_code == 28 ]]; then
download_error_msg+=" Failed to reach the server: connection timeout."
else
local disable_feed_credential=false
local response=$(get_http_header_curl $remote_path $disable_feed_credential)
http_code=$( echo "$response" | awk '/^HTTP/{print $2}' | tail -1 )
if [[ ! -z $http_code && $http_code != 2* ]]; then
download_error_msg+=" Returned HTTP status code: $http_code."
fi
fi
say_verbose "$download_error_msg"
return 1
fi
return 0
}
# Updates global variables $http_code and $download_error_msg
downloadwget() {
eval $invocation
unset http_code
unset download_error_msg
local remote_path="$1"
local out_path="${2:-}"
# Append feed_credential as late as possible before calling wget to avoid logging feed_credential
local remote_path_with_credential="${remote_path}${feed_credential}"
local wget_options="--tries 20 "
local wget_options_extra=''
local wget_result=''
# Test for options that aren't supported on all wget implementations.
if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then
wget_options_extra="--waitretry 2 --connect-timeout 15 "
else
say "wget extra options are unavailable for this environment"
fi
if [ -z "$out_path" ]; then
wget -q $wget_options $wget_options_extra -O - "$remote_path_with_credential" 2>&1
wget_result=$?
else
wget $wget_options $wget_options_extra -O "$out_path" "$remote_path_with_credential" 2>&1
wget_result=$?
fi
if [[ $wget_result != 0 ]]; then
local disable_feed_credential=false
local response=$(get_http_header_wget $remote_path $disable_feed_credential)
http_code=$( echo "$response" | awk '/^ HTTP/{print $2}' | tail -1 )
download_error_msg="Unable to download $remote_path."
if [[ ! -z $http_code && $http_code != 2* ]]; then
download_error_msg+=" Returned HTTP status code: $http_code."
# wget exit code 4 stands for network-issue
elif [[ $wget_result == 4 ]]; then
download_error_msg+=" Failed to reach the server: connection timeout."
fi
say_verbose "$download_error_msg"
return 1
fi
return 0
}
get_download_link_from_aka_ms() {
eval $invocation
#quality is not supported for LTS or STS channel
#STS maps to current
if [[ ! -z "$normalized_quality" && ("$normalized_channel" == "LTS" || "$normalized_channel" == "STS") ]]; then
normalized_quality=""
say_warning "Specifying quality for STS or LTS channel is not supported, the quality will be ignored."
fi
say_verbose "Retrieving primary payload URL from aka.ms for channel: '$normalized_channel', quality: '$normalized_quality', product: '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'."
#construct aka.ms link
aka_ms_link="https://aka.ms/dotnet"
if [ "$internal" = true ]; then
aka_ms_link="$aka_ms_link/internal"
fi
aka_ms_link="$aka_ms_link/$normalized_channel"
if [[ ! -z "$normalized_quality" ]]; then
aka_ms_link="$aka_ms_link/$normalized_quality"
fi
aka_ms_link="$aka_ms_link/$normalized_product-$normalized_os-$normalized_architecture.tar.gz"
say_verbose "Constructed aka.ms link: '$aka_ms_link'."
#get HTTP response
#do not pass credentials as a part of the $aka_ms_link and do not apply credentials in the get_http_header function
#otherwise the redirect link would have credentials as well
#it would result in applying credentials twice to the resulting link and thus breaking it, and in echoing credentials to the output as a part of redirect link
disable_feed_credential=true
response="$(get_http_header $aka_ms_link $disable_feed_credential)"
say_verbose "Received response: $response"
# Get results of all the redirects.
http_codes=$( echo "$response" | awk '$1 ~ /^HTTP/ {print $2}' )
# They all need to be 301, otherwise some links are broken (except for the last, which is not a redirect but 200 or 404).
broken_redirects=$( echo "$http_codes" | sed '$d' | grep -v '301' )
# All HTTP codes are 301 (Moved Permanently), the redirect link exists.
if [[ -z "$broken_redirects" ]]; then
aka_ms_download_link=$( echo "$response" | awk '$1 ~ /^Location/{print $2}' | tail -1 | tr -d '\r')
if [[ -z "$aka_ms_download_link" ]]; then
say_verbose "The aka.ms link '$aka_ms_link' is not valid: failed to get redirect location."
return 1
fi
say_verbose "The redirect location retrieved: '$aka_ms_download_link'."
return 0
else
say_verbose "The aka.ms link '$aka_ms_link' is not valid: received HTTP code: $(echo "$broken_redirects" | paste -sd "," -)."
return 1
fi
}
get_feeds_to_use()
{
feeds=(
"https://dotnetcli.azureedge.net/dotnet"
"https://dotnetbuilds.azureedge.net/public"
)
if [[ -n "$azure_feed" ]]; then
feeds=("$azure_feed")
fi
if [[ "$no_cdn" == "true" ]]; then
feeds=(
"https://dotnetcli.blob.core.windows.net/dotnet"
"https://dotnetbuilds.blob.core.windows.net/public"
)
if [[ -n "$uncached_feed" ]]; then
feeds=("$uncached_feed")
fi
fi
}
# THIS FUNCTION MAY EXIT (if the determined version is already installed).
generate_download_links() {
download_links=()
specific_versions=()
effective_versions=()
link_types=()
# If generate_akams_links returns false, no fallback to old links. Just terminate.
# This function may also 'exit' (if the determined version is already installed).
generate_akams_links || return
# Check other feeds only if we haven't been able to find an aka.ms link.
if [[ "${#download_links[@]}" -lt 1 ]]; then
for feed in ${feeds[@]}
do
# generate_regular_links may also 'exit' (if the determined version is already installed).
generate_regular_links $feed || return
done
fi
if [[ "${#download_links[@]}" -eq 0 ]]; then
say_err "Failed to resolve the exact version number."
return 1
fi
say_verbose "Generated ${#download_links[@]} links."
for link_index in ${!download_links[@]}
do
say_verbose "Link $link_index: ${link_types[$link_index]}, ${effective_versions[$link_index]}, ${download_links[$link_index]}"
done
}
# THIS FUNCTION MAY EXIT (if the determined version is already installed).
generate_akams_links() {
local valid_aka_ms_link=true;
normalized_version="$(to_lowercase "$version")"
if [[ "$normalized_version" != "latest" ]] && [ -n "$normalized_quality" ]; then
say_err "Quality and Version options are not allowed to be specified simultaneously. See https://learn.microsoft.com/dotnet/core/tools/dotnet-install-script#options for details."
return 1
fi
if [[ -n "$json_file" || "$normalized_version" != "latest" ]]; then
# aka.ms links are not needed when exact version is specified via command or json file
return
fi
get_download_link_from_aka_ms || valid_aka_ms_link=false
if [[ "$valid_aka_ms_link" == true ]]; then
say_verbose "Retrieved primary payload URL from aka.ms link: '$aka_ms_download_link'."
say_verbose "Downloading using legacy url will not be attempted."
download_link=$aka_ms_download_link
#get version from the path
IFS='/'
read -ra pathElems <<< "$download_link"
count=${#pathElems[@]}
specific_version="${pathElems[count-2]}"
unset IFS;
say_verbose "Version: '$specific_version'."
#Retrieve effective version
effective_version="$(get_specific_product_version "$azure_feed" "$specific_version" "$download_link")"
# Add link info to arrays
download_links+=($download_link)
specific_versions+=($specific_version)
effective_versions+=($effective_version)
link_types+=("aka.ms")
# Check if the SDK version is already installed.
if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then
say "$asset_name with version '$effective_version' is already installed."
exit 0
fi
return 0
fi
# if quality is specified - exit with error - there is no fallback approach
if [ ! -z "$normalized_quality" ]; then
say_err "Failed to locate the latest version in the channel '$normalized_channel' with '$normalized_quality' quality for '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'."
say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support."
return 1
fi
say_verbose "Falling back to latest.version file approach."
}
# THIS FUNCTION MAY EXIT (if the determined version is already installed)
# args:
# feed - $1
generate_regular_links() {
local feed="$1"
local valid_legacy_download_link=true
specific_version=$(get_specific_version_from_version "$feed" "$channel" "$normalized_architecture" "$version" "$json_file") || specific_version='0'
if [[ "$specific_version" == '0' ]]; then
say_verbose "Failed to resolve the specific version number using feed '$feed'"
return
fi
effective_version="$(get_specific_product_version "$feed" "$specific_version")"
say_verbose "specific_version=$specific_version"
download_link="$(construct_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version" "$normalized_os")"
say_verbose "Constructed primary named payload URL: $download_link"
# Add link info to arrays
download_links+=($download_link)
specific_versions+=($specific_version)
effective_versions+=($effective_version)
link_types+=("primary")
legacy_download_link="$(construct_legacy_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version")" || valid_legacy_download_link=false
if [ "$valid_legacy_download_link" = true ]; then
say_verbose "Constructed legacy named payload URL: $legacy_download_link"
download_links+=($legacy_download_link)
specific_versions+=($specific_version)
effective_versions+=($effective_version)
link_types+=("legacy")
else
legacy_download_link=""
say_verbose "Cound not construct a legacy_download_link; omitting..."
fi
# Check if the SDK version is already installed.
if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then
say "$asset_name with version '$effective_version' is already installed."
exit 0
fi
}
print_dry_run() {
say "Payload URLs:"
for link_index in "${!download_links[@]}"
do
say "URL #$link_index - ${link_types[$link_index]}: ${download_links[$link_index]}"
done
resolved_version=${specific_versions[0]}
repeatable_command="./$script_name --version "\""$resolved_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"" --os "\""$normalized_os"\"""
if [ ! -z "$normalized_quality" ]; then
repeatable_command+=" --quality "\""$normalized_quality"\"""
fi
if [[ "$runtime" == "dotnet" ]]; then
repeatable_command+=" --runtime "\""dotnet"\"""
elif [[ "$runtime" == "aspnetcore" ]]; then
repeatable_command+=" --runtime "\""aspnetcore"\"""
fi
repeatable_command+="$non_dynamic_parameters"
if [ -n "$feed_credential" ]; then
repeatable_command+=" --feed-credential "\""<feed_credential>"\"""
fi
say "Repeatable invocation: $repeatable_command"
}
calculate_vars() {
eval $invocation
script_name=$(basename "$0")
normalized_architecture="$(get_normalized_architecture_from_architecture "$architecture")"
say_verbose "Normalized architecture: '$normalized_architecture'."
normalized_os="$(get_normalized_os "$user_defined_os")"
say_verbose "Normalized OS: '$normalized_os'."
normalized_quality="$(get_normalized_quality "$quality")"
say_verbose "Normalized quality: '$normalized_quality'."
normalized_channel="$(get_normalized_channel "$channel")"
say_verbose "Normalized channel: '$normalized_channel'."
normalized_product="$(get_normalized_product "$runtime")"
say_verbose "Normalized product: '$normalized_product'."
install_root="$(resolve_installation_path "$install_dir")"
say_verbose "InstallRoot: '$install_root'."
normalized_architecture="$(get_normalized_architecture_for_specific_sdk_version "$version" "$normalized_channel" "$normalized_architecture")"
if [[ "$runtime" == "dotnet" ]]; then
asset_relative_path="shared/Microsoft.NETCore.App"
asset_name=".NET Core Runtime"
elif [[ "$runtime" == "aspnetcore" ]]; then
asset_relative_path="shared/Microsoft.AspNetCore.App"
asset_name="ASP.NET Core Runtime"
elif [ -z "$runtime" ]; then
asset_relative_path="sdk"
asset_name=".NET Core SDK"
fi
get_feeds_to_use
}
install_dotnet() {
eval $invocation
local download_failed=false
local download_completed=false
mkdir -p "$install_root"
zip_path="$(mktemp "$temporary_file_template")"
say_verbose "Zip path: $zip_path"
for link_index in "${!download_links[@]}"
do
download_link="${download_links[$link_index]}"
specific_version="${specific_versions[$link_index]}"
effective_version="${effective_versions[$link_index]}"
link_type="${link_types[$link_index]}"
say "Attempting to download using $link_type link $download_link"
# The download function will set variables $http_code and $download_error_msg in case of failure.
download_failed=false
download "$download_link" "$zip_path" 2>&1 || download_failed=true
if [ "$download_failed" = true ]; then
case $http_code in
404)
say "The resource at $link_type link '$download_link' is not available."
;;
*)
say "Failed to download $link_type link '$download_link': $download_error_msg"
;;
esac
rm -f "$zip_path" 2>&1 && say_verbose "Temporary zip file $zip_path was removed"
else
download_completed=true
break
fi
done
if [[ "$download_completed" == false ]]; then
say_err "Could not find \`$asset_name\` with version = $specific_version"
say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
return 1
fi
say "Extracting zip from $download_link"
extract_dotnet_package "$zip_path" "$install_root" || return 1
# Check if the SDK version is installed; if not, fail the installation.
# if the version contains "RTM" or "servicing"; check if a 'release-type' SDK version is installed.
if [[ $specific_version == *"rtm"* || $specific_version == *"servicing"* ]]; then
IFS='-'
read -ra verArr <<< "$specific_version"
release_version="${verArr[0]}"
unset IFS;
say_verbose "Checking installation: version = $release_version"
if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$release_version"; then
say "Installed version is $effective_version"
return 0
fi
fi
# Check if the standard SDK version is installed.
say_verbose "Checking installation: version = $effective_version"
if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then
say "Installed version is $effective_version"
return 0
fi
# Version verification failed. More likely something is wrong either with the downloaded content or with the verification algorithm.
say_err "Failed to verify the version of installed \`$asset_name\`.\nInstallation source: $download_link.\nInstallation location: $install_root.\nReport the bug at https://github.com/dotnet/install-scripts/issues."
say_err "\`$asset_name\` with version = $effective_version failed to install with an error."
return 1
}
args=("$@")
local_version_file_relative_path="/.version"
bin_folder_relative_path=""
temporary_file_template="${TMPDIR:-/tmp}/dotnet.XXXXXXXXX"
channel="LTS"
version="Latest"
json_file=""
install_dir="<auto>"
architecture="<auto>"
dry_run=false
no_path=false
no_cdn=false
azure_feed=""
uncached_feed=""
feed_credential=""
verbose=false
runtime=""
runtime_id=""
quality=""
internal=false
override_non_versioned_files=true
non_dynamic_parameters=""
user_defined_os=""
while [ $# -ne 0 ]
do
name="$1"
case "$name" in
-c|--channel|-[Cc]hannel)
shift
channel="$1"
;;
-v|--version|-[Vv]ersion)
shift
version="$1"
;;
-q|--quality|-[Qq]uality)
shift
quality="$1"
;;
--internal|-[Ii]nternal)
internal=true
non_dynamic_parameters+=" $name"
;;
-i|--install-dir|-[Ii]nstall[Dd]ir)
shift
install_dir="$1"
;;
--arch|--architecture|-[Aa]rch|-[Aa]rchitecture)
shift
architecture="$1"
;;
--os|-[Oo][SS])
shift
user_defined_os="$1"
;;
--shared-runtime|-[Ss]hared[Rr]untime)
say_warning "The --shared-runtime flag is obsolete and may be removed in a future version of this script. The recommended usage is to specify '--runtime dotnet'."
if [ -z "$runtime" ]; then
runtime="dotnet"
fi
;;
--runtime|-[Rr]untime)
shift
runtime="$1"
if [[ "$runtime" != "dotnet" ]] && [[ "$runtime" != "aspnetcore" ]]; then
say_err "Unsupported value for --runtime: '$1'. Valid values are 'dotnet' and 'aspnetcore'."
if [[ "$runtime" == "windowsdesktop" ]]; then
say_err "WindowsDesktop archives are manufactured for Windows platforms only."
fi
exit 1
fi
;;
--dry-run|-[Dd]ry[Rr]un)
dry_run=true
;;
--no-path|-[Nn]o[Pp]ath)
no_path=true
non_dynamic_parameters+=" $name"
;;
--verbose|-[Vv]erbose)
verbose=true
non_dynamic_parameters+=" $name"
;;
--no-cdn|-[Nn]o[Cc]dn)
no_cdn=true
non_dynamic_parameters+=" $name"
;;
--azure-feed|-[Aa]zure[Ff]eed)
shift
azure_feed="$1"
non_dynamic_parameters+=" $name "\""$1"\"""
;;
--uncached-feed|-[Uu]ncached[Ff]eed)
shift
uncached_feed="$1"
non_dynamic_parameters+=" $name "\""$1"\"""
;;
--feed-credential|-[Ff]eed[Cc]redential)
shift
feed_credential="$1"
#feed_credential should start with "?", for it to be added to the end of the link.
#adding "?" at the beginning of the feed_credential if needed.
[[ -z "$(echo $feed_credential)" ]] || [[ $feed_credential == \?* ]] || feed_credential="?$feed_credential"
;;
--runtime-id|-[Rr]untime[Ii]d)
shift
runtime_id="$1"
non_dynamic_parameters+=" $name "\""$1"\"""
say_warning "Use of --runtime-id is obsolete and should be limited to the versions below 2.1. To override architecture, use --architecture option instead. To override OS, use --os option instead."
;;
--jsonfile|-[Jj][Ss]on[Ff]ile)
shift
json_file="$1"
;;
--skip-non-versioned-files|-[Ss]kip[Nn]on[Vv]ersioned[Ff]iles)
override_non_versioned_files=false
non_dynamic_parameters+=" $name"
;;
-?|--?|-h|--help|-[Hh]elp)
script_name="$(basename "$0")"
echo ".NET Tools Installer"
echo "Usage: $script_name [-c|--channel <CHANNEL>] [-v|--version <VERSION>] [-p|--prefix <DESTINATION>]"
echo " $script_name -h|-?|--help"
echo ""
echo "$script_name is a simple command line interface for obtaining dotnet cli."
echo " Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:"
echo " - The SDK needs to be installed without user interaction and without admin rights."
echo " - The SDK installation doesn't need to persist across multiple CI runs."
echo " To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer."
echo ""
echo "Options:"
echo " -c,--channel <CHANNEL> Download from the channel specified, Defaults to \`$channel\`."
echo " -Channel"
echo " Possible values:"
echo " - STS - the most recent Standard Term Support release"
echo " - LTS - the most recent Long Term Support release"
echo " - 2-part version in a format A.B - represents a specific release"
echo " examples: 2.0; 1.0"
echo " - 3-part version in a format A.B.Cxx - represents a specific SDK release"
echo " examples: 5.0.1xx, 5.0.2xx."
echo " Supported since 5.0 release"
echo " Warning: Value 'Current' is deprecated for the Channel parameter. Use 'STS' instead."
echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used."
echo " -v,--version <VERSION> Use specific VERSION, Defaults to \`$version\`."
echo " -Version"
echo " Possible values:"
echo " - latest - the latest build on specific channel"
echo " - 3-part version in a format A.B.C - represents specific version of build"
echo " examples: 2.0.0-preview2-006120; 1.1.0"
echo " -q,--quality <quality> Download the latest build of specified quality in the channel."
echo " -Quality"
echo " The possible values are: daily, signed, validated, preview, GA."
echo " Works only in combination with channel. Not applicable for STS and LTS channels and will be ignored if those channels are used."
echo " For SDK use channel in A.B.Cxx format. Using quality for SDK together with channel in A.B format is not supported."
echo " Supported since 5.0 release."
echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used, and therefore overrides the quality."
echo " --internal,-Internal Download internal builds. Requires providing credentials via --feed-credential parameter."
echo " --feed-credential <FEEDCREDENTIAL> Token to access Azure feed. Used as a query string to append to the Azure feed."
echo " -FeedCredential This parameter typically is not specified."
echo " -i,--install-dir <DIR> Install under specified location (see Install Location below)"
echo " -InstallDir"
echo " --architecture <ARCHITECTURE> Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`."
echo " --arch,-Architecture,-Arch"
echo " Possible values: x64, arm, arm64, s390x and ppc64le"
echo " --os <system> Specifies operating system to be used when selecting the installer."
echo " Overrides the OS determination approach used by the script. Supported values: osx, linux, linux-musl, freebsd, rhel.6."
echo " In case any other value is provided, the platform will be determined by the script based on machine configuration."
echo " Not supported for legacy links. Use --runtime-id to specify platform for legacy links."
echo " Refer to: https://aka.ms/dotnet-os-lifecycle for more information."
echo " --runtime <RUNTIME> Installs a shared runtime only, without the SDK."
echo " -Runtime"
echo " Possible values:"
echo " - dotnet - the Microsoft.NETCore.App shared runtime"
echo " - aspnetcore - the Microsoft.AspNetCore.App shared runtime"
echo " --dry-run,-DryRun Do not perform installation. Display download link."
echo " --no-path, -NoPath Do not set PATH for the current process."
echo " --verbose,-Verbose Display diagnostics information."
echo " --azure-feed,-AzureFeed For internal use only."
echo " Allows using a different storage to download SDK archives from."
echo " This parameter is only used if --no-cdn is false."
echo " --uncached-feed,-UncachedFeed For internal use only."
echo " Allows using a different storage to download SDK archives from."
echo " This parameter is only used if --no-cdn is true."
echo " --skip-non-versioned-files Skips non-versioned files if they already exist, such as the dotnet executable."
echo " -SkipNonVersionedFiles"
echo " --no-cdn,-NoCdn Disable downloading from the Azure CDN, and use the uncached feed directly."
echo " --jsonfile <JSONFILE> Determines the SDK version from a user specified global.json file."
echo " Note: global.json must have a value for 'SDK:Version'"
echo " -?,--?,-h,--help,-Help Shows this help message"
echo ""
echo "Install Location:"
echo " Location is chosen in following order:"
echo " - --install-dir option"
echo " - Environmental variable DOTNET_INSTALL_DIR"
echo " - $HOME/.dotnet"
exit 0
;;
*)
say_err "Unknown argument \`$name\`"
exit 1
;;
esac
shift
done
say_verbose "Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:"
say_verbose "- The SDK needs to be installed without user interaction and without admin rights."
say_verbose "- The SDK installation doesn't need to persist across multiple CI runs."
say_verbose "To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer.\n"
if [ "$internal" = true ] && [ -z "$(echo $feed_credential)" ]; then
message="Provide credentials via --feed-credential parameter."
if [ "$dry_run" = true ]; then
say_warning "$message"
else
say_err "$message"
exit 1
fi
fi
check_min_reqs
calculate_vars
# generate_regular_links call below will 'exit' if the determined version is already installed.
generate_download_links
if [[ "$dry_run" = true ]]; then
print_dry_run
exit 0
fi
install_dotnet
bin_path="$(get_absolute_path "$(combine_paths "$install_root" "$bin_folder_relative_path")")"
if [ "$no_path" = false ]; then
say "Adding to current process PATH: \`$bin_path\`. Note: This change will be visible only when sourcing script."
export PATH="$bin_path":"$PATH"
else
say "Binaries of dotnet can be found in $bin_path"
fi
say "Note that the script does not resolve dependencies during installation."
say "To check the list of dependencies, go to https://learn.microsoft.com/dotnet/core/install, select your operating system and check the \"Dependencies\" section."
say "Installation finished successfully." | PypiClean |
/MetaGram-2.0.2.tar.gz/MetaGram-2.0.2/pyrogram/methods/messages/send_contact.py |
from datetime import datetime
from typing import Union
import pyrogram
from pyrogram import raw, utils
from pyrogram import types
class SendContact:
async def send_contact(
self: "pyrogram.Client",
chat_id: Union[int, str],
phone_number: str,
first_name: str,
last_name: str = None,
vcard: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
schedule_date: datetime = None,
protect_content: bool = None,
reply_markup: Union[
"types.InlineKeyboardMarkup",
"types.ReplyKeyboardMarkup",
"types.ReplyKeyboardRemove",
"types.ForceReply"
] = None
) -> "types.Message":
"""Send phone contacts.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
phone_number (``str``):
Contact's phone number.
first_name (``str``):
Contact's first name.
last_name (``str``, *optional*):
Contact's last name.
vcard (``str``, *optional*):
Additional data about the contact in the form of a vCard, 0-2048 bytes
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
schedule_date (:py:obj:`~datetime.datetime`, *optional*):
Date when the message will be automatically sent.
protect_content (``bool``, *optional*):
Protects the contents of the sent message from forwarding and saving.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardRemove` | :obj:`~pyrogram.types.ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
Returns:
:obj:`~pyrogram.types.Message`: On success, the sent contact message is returned.
Example:
.. code-block:: python
await app.send_contact("me", "+1-123-456-7890", "Name")
"""
r = await self.invoke(
raw.functions.messages.SendMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaContact(
phone_number=phone_number,
first_name=first_name,
last_name=last_name or "",
vcard=vcard or ""
),
message="",
silent=disable_notification or None,
reply_to_msg_id=reply_to_message_id,
random_id=self.rnd_id(),
schedule_date=utils.datetime_to_timestamp(schedule_date),
noforwards=protect_content,
reply_markup=await reply_markup.write(self) if reply_markup else None
)
)
for i in r.updates:
if isinstance(i, (raw.types.UpdateNewMessage,
raw.types.UpdateNewChannelMessage,
raw.types.UpdateNewScheduledMessage)):
return await types.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats},
is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage)
) | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/printing/conventions.py | import collections
import re
_name_with_digits_p = re.compile(r'^([a-zA-Z]+)([0-9]+)$')
def split_super_sub(text):
"""Split a symbol name into a name, superscripts and subscripts
The first part of the symbol name is considered to be its actual
'name', followed by super- and subscripts. Each superscript is
preceded with a "^" character or by "__". Each subscript is preceded
by a "_" character. The three return values are the actual name, a
list with superscripts and a list with subscripts.
>>> split_super_sub('a_x^1')
('a', ['1'], ['x'])
>>> split_super_sub('var_sub1__sup_sub2')
('var', ['sup'], ['sub1', 'sub2'])
"""
if len(text) == 0:
return text, [], []
pos = 0
name = None
supers = []
subs = []
while pos < len(text):
start = pos + 1
if text[pos:pos + 2] == '__':
start += 1
pos_hat = text.find('^', start)
if pos_hat < 0:
pos_hat = len(text)
pos_usc = text.find('_', start)
if pos_usc < 0:
pos_usc = len(text)
pos_next = min(pos_hat, pos_usc)
part = text[pos:pos_next]
pos = pos_next
if name is None:
name = part
elif part.startswith('^'):
supers.append(part[1:])
elif part.startswith('__'):
supers.append(part[2:])
elif part.startswith('_'):
subs.append(part[1:])
else: # pragma: no cover
raise RuntimeError('This should never happen.')
# make a little exception when a name ends with digits, i.e. treat them
# as a subscript too.
m = _name_with_digits_p.match(name)
if m:
name, sub = m.groups()
subs.insert(0, sub)
return name, supers, subs
def requires_partial(expr):
"""Return whether a partial derivative symbol is required for printing
This requires checking how many free variables there are,
filtering out the ones that are integers. Some expressions don't have
free variables. In that case, check its variable list explicitly to
get the context of the expression.
"""
if not isinstance(expr.free_symbols, collections.abc.Iterable):
return len(set(expr.variables)) > 1
return sum(not s.is_integer for s in expr.free_symbols) > 1 | PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/tools/environments/Virtualenv.py | import os
import sys
from contextlib import contextmanager
from nuitka.__past__ import unicode
from nuitka.Tracing import my_print
from nuitka.utils.Execution import check_call, executeProcess
from nuitka.utils.FileOperations import (
getDirectoryRealPath,
removeDirectory,
withDirectoryChange,
)
class Virtualenv(object):
def __init__(self, env_dir):
self.env_dir = os.path.abspath(env_dir)
def runCommand(self, commands, style=None):
if type(commands) in (str, unicode):
commands = [commands]
with withDirectoryChange(self.env_dir):
if os.name == "nt":
commands = [r"call scripts\activate.bat"] + commands
else:
commands = [". bin/activate"] + commands
command = " && ".join(commands)
if style is not None:
my_print("Executing: %s" % command, style=style)
assert os.system(command) == 0, command
def runCommandWithOutput(self, commands, style=None):
"""
Returns the stdout,stderr,exit_code from running command
"""
if type(commands) in (str, unicode):
commands = [commands]
with withDirectoryChange(self.env_dir):
if os.name == "nt":
commands = [r"call scripts\activate.bat"] + commands
else:
commands = [". bin/activate"] + commands
# Build shell command.
command = " && ".join(commands)
if style is not None:
my_print("Executing: %s" % command, style=style)
# Use subprocess and also return outputs, stdout, stderr, result
return executeProcess(
command=command,
shell=True,
)
def getVirtualenvDir(self):
return self.env_dir
@contextmanager
def withVirtualenv(env_name, base_dir=None, python=None, delete=True, style=None):
"""Create a virtualenv and change into it.
Activating for actual use will be your task.
"""
if style is not None:
my_print("Creating a virtualenv:")
if python is None:
python = sys.executable
# Avoid symlinks on Windows, they won't work for virtualenv e.g.
python = os.path.join(
getDirectoryRealPath(os.path.dirname(python)),
os.path.basename(python),
)
if base_dir is not None:
env_dir = os.path.join(base_dir, env_name)
else:
env_dir = env_name
removeDirectory(env_dir, ignore_errors=False)
with withDirectoryChange(base_dir, allow_none=True):
command = [python, "-m", "virtualenv", env_name]
if style is not None:
my_print("Executing: %s" % " ".join(command), style=style)
check_call(command)
yield Virtualenv(env_dir)
if delete:
removeDirectory(env_dir, ignore_errors=False) | PypiClean |
/MPD_sima-0.18.2.tar.gz/MPD_sima-0.18.2/doc/source/dev/meta.rst | Track object Documentation
==========================
.. autodata:: sima.lib.track.SEPARATOR
.. automodule:: sima.lib.track
:members:
Meta object Documentation
=========================
Generic class and helper function
---------------------------------
.. autoclass:: sima.lib.meta.Meta
:members:
:undoc-members:
.. autofunction:: sima.lib.meta.is_uuid4
Derived objects
---------------
.. autoclass:: sima.lib.meta.Artist(name, mbid=None, musicbrainz_artistid=None, albumartist=None)
:show-inheritance:
.. autoclass:: sima.lib.meta.Album
:show-inheritance:
Exceptions
----------
.. autoclass:: sima.lib.meta.MetaException
| PypiClean |
/BRAILS-3.0.1.tar.gz/BRAILS-3.0.1/brails/modules/ChimneyDetector/train.py |
from lib.train_detector import Detector
import argparse
def get_args():
parser = argparse.ArgumentParser('EfficientDet-based chimney detection model')
parser.add_argument('-c', '--compound_coef', type=int, default=4,
help='Compund coefficient for the EfficientDet backbone, e.g., enter 7 for EfficientDet-D7')
parser.add_argument('-n', '--num_workers', type=int, default=0,
help='Number of workers of Dataloader')
parser.add_argument('--head_only', type=boolean_string, default=False,
help='True if desired to finetune the regressor and the classifier only,'
'useful in early stage convergence or small/easy dataset')
parser.add_argument('--num_gpus', type=int, default=1,
help='Number of GPUs (Enter 0 for CPU-based training)')
parser.add_argument('--optim', type=str, default='adamw',
help='Select optimizer for training, '
'Use \'adamw\' until the last stage'
'then switch to \'sgd\'')
parser.add_argument('--batch_size', type=int, default=2,
help='The number of images per batch')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--num_epochs', type=int, default=25, help='Number of training epochs')
parser.add_argument('--data_path', type=str, default='datasets/',
help='Path for the root folder of dataset')
parser.add_argument('--val_interval', type=int, default=1,
help='Number of epoches between validating phases')
parser.add_argument('--save_interval', type=int, default=5,
help='Number of epoches between model saving')
parser.add_argument('--es_min_delta', type=float, default=0.0,
help='Early stopping parameter: Minimum change in loss to qualify as an improvement')
parser.add_argument('--es_patience', type=int, default=0,
help='Early stopping parameter: Number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.')
parser.add_argument('--customModel_path', type=str,
default='models/efficientdet-d4_trained.pth',
help='Path for the custom pretrained model')
args = parser.parse_args()
return args
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean entry')
return s == 'True'
def train(opt):
# Create the Object Detector Object
gtf = Detector()
# Set the Training and Validation Datasets
train_dir = "train"; val_dir = "valid"
classes = ["chimney"]
gtf.set_train_dataset(opt.data_path,"","",train_dir,classes_list=classes,
batch_size=opt.batch_size, num_workers=opt.num_workers)
gtf.set_val_dataset(opt.data_path,"","",val_dir)
# Define the Model Architecture
modelArchitecture = f"efficientdet-d{opt.compound_coef}.pth" ## Figure out the customModel_path
gtf.set_model(model_name=modelArchitecture, num_gpus=opt.num_gpus,
freeze_head=opt.head_only)
# Set Model Hyperparameters
gtf.set_hyperparams(optimizer=opt.optim, lr=opt.lr,
es_min_delta=opt.es_min_delta, es_patience=opt.es_patience)
# Train
gtf.train(num_epochs=opt.num_epochs, val_interval=opt.val_interval,
save_interval=opt.save_interval)
if __name__ == '__main__':
opt = get_args()
train(opt) | PypiClean |
/NESTML-5.3.0-py3-none-any.whl/NESTML-5.3.0.data/data/doc/models_library/hh_cond_exp_destexhe.rst | hh_cond_exp_destexhe
####################
hh_cond_exp_destexhe - Hodgin Huxley based model, Traub, Destexhe and Mainen modified
Description
+++++++++++
hh_cond_exp_destexhe is an implementation of a modified Hodkin-Huxley model, which is based on the hh_cond_exp_traub model.
Differences to hh_cond_exp_traub:
(1) **Additional background noise:** A background current whose conductances were modeled as an Ornstein-Uhlenbeck process is injected into the neuron.
(2) **Additional non-inactivating K+ current:** A non-inactivating K+ current was included, which is responsible for spike frequency adaptation.
References
++++++++++
.. [1] Traub, R.D. and Miles, R. (1991) Neuronal Networks of the Hippocampus. Cambridge University Press, Cambridge UK.
.. [2] Destexhe, A. and Pare, D. (1999) Impact of Network Activity on the Integrative Properties of Neocortical Pyramidal Neurons In Vivo. Journal of Neurophysiology
.. [3] A. Destexhe, M. Rudolph, J.-M. Fellous and T. J. Sejnowski (2001) Fluctuating synaptic conductances recreate in vivo-like activity in neocortical neurons. Neuroscience
.. [4] Z. Mainen, J. Joerges, J. R. Huguenard and T. J. Sejnowski (1995) A Model of Spike Initiation in Neocortical Pyramidal Neurons. Neuron
See also
++++++++
hh_cond_exp_traub
Parameters
++++++++++
.. csv-table::
:header: "Name", "Physical unit", "Default value", "Description"
:widths: auto
"g_Na", "nS", "17318.0nS", "Na Conductance"
"g_K", "nS", "3463.6nS", "K Conductance"
"g_L", "nS", "15.5862nS", "Leak Conductance"
"C_m", "pF", "346.36pF", "Membrane Capacitance"
"E_Na", "mV", "60mV", "Reversal potentials"
"E_K", "mV", "-90.0mV", "Potassium reversal potential"
"E_L", "mV", "-80.0mV", "Leak reversal Potential (aka resting potential)"
"V_T", "mV", "-58.0mV", "Voltage offset that controls dynamics. For default"
"tau_syn_exc", "ms", "2.7ms", "parameters, V_T = -63mV results in a threshold around -50mV.Synaptic Time Constant Excitatory Synapse"
"tau_syn_inh", "ms", "10.5ms", "Synaptic Time Constant for Inhibitory Synapse"
"E_exc", "mV", "0.0mV", "Excitatory synaptic reversal potential"
"E_inh", "mV", "-75.0mV", "Inhibitory synaptic reversal potential"
"g_M", "nS", "173.18nS", "Conductance of non-inactivating K+ channel"
"g_noise_exc0", "uS", "0.012uS", "Conductance OU noiseMean of the excitatory noise conductance"
"g_noise_inh0", "uS", "0.057uS", "Mean of the inhibitory noise conductance"
"sigma_noise_exc", "uS", "0.003uS", "Standard deviation of the excitatory noise conductance"
"sigma_noise_inh", "uS", "0.0066uS", "Standard deviation of the inhibitory noise conductance"
"alpha_n_init", "1 / ms", "0.032 / (ms * mV) * (15.0mV - V_m) / (exp((15.0mV - V_m) / 5.0mV) - 1.0)", ""
"beta_n_init", "1 / ms", "0.5 / ms * exp((10.0mV - V_m) / 40.0mV)", ""
"alpha_m_init", "1 / ms", "0.32 / (ms * mV) * (13.0mV - V_m) / (exp((13.0mV - V_m) / 4.0mV) - 1.0)", ""
"beta_m_init", "1 / ms", "0.28 / (ms * mV) * (V_m - 40.0mV) / (exp((V_m - 40.0mV) / 5.0mV) - 1.0)", ""
"alpha_h_init", "1 / ms", "0.128 / ms * exp((17.0mV - V_m) / 18.0mV)", ""
"beta_h_init", "1 / ms", "(4.0 / (1.0 + exp((40.0mV - V_m) / 5.0mV))) / ms", ""
"alpha_p_init", "1 / ms", "0.0001 / (ms * mV) * (V_m + 30.0mV) / (1.0 - exp(-(V_m + 30.0mV) / 9.0mV))", ""
"beta_p_init", "1 / ms", "-0.0001 / (ms * mV) * (V_m + 30.0mV) / (1.0 - exp((V_m + 30.0mV) / 9.0mV))", ""
"I_e", "pA", "0pA", "constant external input current"
State variables
+++++++++++++++
.. csv-table::
:header: "Name", "Physical unit", "Default value", "Description"
:widths: auto
"r", "integer", "0", "counts number of tick during the refractory period"
"g_noise_exc", "uS", "g_noise_exc0", ""
"g_noise_inh", "uS", "g_noise_inh0", ""
"V_m", "mV", "E_L", "Membrane potential"
"Act_m", "real", "alpha_m_init / (alpha_m_init + beta_m_init)", ""
"Act_h", "real", "alpha_h_init / (alpha_h_init + beta_h_init)", ""
"Inact_n", "real", "alpha_n_init / (alpha_n_init + beta_n_init)", ""
"Noninact_p", "real", "alpha_p_init / (alpha_p_init + beta_p_init)", ""
Equations
+++++++++
.. math::
\frac{ dV_{m} } { dt }= \frac 1 { C_{m} } \left( { (-I_{Na} - I_{K} - I_{M} - I_{L} - I_{syn,exc} - I_{syn,inh} + I_{e} + I_{stim} - I_{noise}) } \right)
.. math::
\frac{ dAct_{m} } { dt }= (\alpha_{m} - (\alpha_{m} + \beta_{m}) \cdot Act_{m})
.. math::
\frac{ dAct_{h} } { dt }= (\alpha_{h} - (\alpha_{h} + \beta_{h}) \cdot Act_{h})
.. math::
\frac{ dInact_{n} } { dt }= (\alpha_{n} - (\alpha_{n} + \beta_{n}) \cdot Inact_{n})
.. math::
\frac{ dNoninact_{p} } { dt }= (\alpha_{p} - (\alpha_{p} + \beta_{p}) \cdot Noninact_{p})
Source code
+++++++++++
The model source code can be found in the NESTML models repository here: `hh_cond_exp_destexhe <https://github.com/nest/nestml/tree/master/models/neurons/hh_cond_exp_destexhe.nestml>`_.
Characterisation
++++++++++++++++
.. include:: hh_cond_exp_destexhe_characterisation.rst
.. footer::
Generated at 2023-03-22 17:48:49.081867 | PypiClean |
/Ceygen-0.3.tar.gz/Ceygen-0.3/doc/lu.rst | ==================================
LU Decomposition-powered Functions
==================================
This module contains algebraic functions powered by the LU matrix decomposition (as
provided by the <`Eigen/LU`_> include), most notably matrix inverse and determinant.
.. module:: ceygen.lu
.. function:: inv(x[, out=None])
Return matrix inverse computed using LU decomposition with partial pivoting. It is your
responsibility to ensure that *x* is invertible, otherwise you get undefined result
without any warning.
:param x: matrix to invert
:type x: |nonint_matrix|
:param out: |out|
:type out: |nonint_matrix|
:raises: |valueerror|
:raises: |typeerror|
:rtype: |nonint_matrix|
.. function:: iinv(x[, out=None])
Compte matrix inverse using LU decomposition with partial pivoting in-place. Equivalent
to *x* = :obj:`inv(x) <inv>`, but without overhead. It is your responsibility to ensure
that *x* is invertible, otherwise you get undefined result without any warning.
:param x: matrix to invert in-place
:type x: |nonint_matrix|
:raises: |valueerror|
:raises: |typeerror|
:returns: |alwaystrue|
.. function:: det(x)
Compute determinant of a square matrix *x* using LU decomposition.
:param x: matrix whose determimant to compute
:type x: |matrix|
:raises: |valueerror|
:raises: |typeerror|
:rtype: |scalar|
.. _`Eigen/LU`: http://eigen.tuxfamily.org/dox/QuickRefPage.html#QuickRef_Headers
.. include:: definitions.rst
| PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/builtin/quantum_mechanics/angular.py | from typing import List, Optional
from sympy.physics.matrices import msigma
from sympy.physics.quantum.cg import CG
from sympy.physics.wigner import wigner_3j, wigner_6j
from mathics.builtin.base import SympyFunction
from mathics.core.atoms import Integer
from mathics.core.attributes import ( # A_LISTABLE,; A_NUMERIC_FUNCTION,
A_PROTECTED,
A_READ_PROTECTED,
)
from mathics.core.convert.python import from_python
from mathics.core.convert.sympy import from_sympy
from mathics.core.evaluation import Evaluation
from mathics.core.list import ListExpression
from mathics.core.symbols import Symbol
class ClebschGordan(SympyFunction):
"""
<url>
:Clebsch-Gordan coefficients matrices:
https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/physics/quantum/cg.html</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/ClebschGordan</url>)
<dl>
<dt>'ClebschGordan[{$j1$, $m1$}, {$j2$, $m2$}, {$j$ $m$}]'
<dd>returns the Clebsch-Gordan coefficient for the decomposition of |$j$,$m$> \
in terms of |$j1$, $m$>, |$j2$, $m2$>.
</dl>
>> ClebschGordan[{3 / 2, 3 / 2}, {1 / 2, -1 / 2}, {1, 1}]
= Sqrt[3] / 2
'ClebschGordan' works with integer and half‐integer arguments:
>> ClebschGordan[{1/2, -1/2}, {1/2, -1/2}, {1, -1}]
= 1
>> ClebschGordan[{1/2, -1/2}, {1, 0}, {1/2, -1/2}]
= -Sqrt[3] / 3
Compare with WMA example:
>> ClebschGordan[{5, 0}, {4, 0}, {1, 0}] == Sqrt[5 / 33]
= True
"""
attributes = A_PROTECTED | A_READ_PROTECTED
summary_text = "Clebsch-Gordan coefficient"
sympy_name = "physics.quantum.cg.CG"
def eval(
self,
j1m1: ListExpression,
j2m2: ListExpression,
jm: ListExpression,
evaluation: Evaluation,
):
"ClebschGordan[j1m1_List, j2m2_List, jm_List]"
sympy_jms: List[int] = []
for pair in (j1m1, j2m2, jm):
if len(pair.elements) != 2:
return
sympy_jms += [p.to_sympy() for p in pair.elements]
return from_sympy(CG(*sympy_jms).doit())
IdentityMatrix2 = from_python([[1, 0], [0, 1]])
class PauliMatrix(SympyFunction):
"""
<url>
:Pauli matrices:
https://en.wikipedia.org/wiki/Pauli_matrices</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/physics/matrices.html#sympy.physics.matrices.msigma</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/PauliMatrix.html</url>)
<dl>
<dt>'PauliMatrix[$k$]'
<dd>returns the $k$th Pauli spin matrix).
</dl>
>> Table[PauliMatrix[i], {i, 1, 3}]
= {{{0, 1}, {1, 0}}, {{0, -I}, {I, 0}}, {{1, 0}, {0, -1}}}
>> PauliMatrix[1] . PauliMatrix[2] == I PauliMatrix[3]
= True
>> MatrixExp[I \[Phi]/2 PauliMatrix[3]]
= {{E ^ (I / 2 ϕ), 0}, {0, E ^ ((-I / 2) ϕ)}}
>> % /. \[Phi] -> 2 Pi
= {{-1, 0}, {0, -1}}
"""
attributes = A_PROTECTED | A_READ_PROTECTED
messages = {
"pauli": "PauliMatrix parameter k=`` is not in the range 0..4.",
}
summary_text = "Pauli spin matrix"
sympy_name = "physics.matrices.msigma"
def eval(self, k: Integer, evaluation: Evaluation) -> Optional[Evaluation]:
"PauliMatrix[k_]"
py_k = k.value
if 0 <= py_k <= 4:
if py_k in (0, 4):
return IdentityMatrix2
return from_sympy(msigma(py_k))
else:
evaluation.message("PauliMatrix", "pauli", k)
class SixJSymbol(SympyFunction):
"""
<url>
:6-j symbol:
https://en.wikipedia.org/wiki/6-j_symbol</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/physics/wigner.html#sympy.physics.wigner.wigner_6j</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/SixJSymbol.html</url>)
<dl>
<dt>'SixJSymbol[{$j1, $j2$, $j3$}, {$j4$, $j5$, $j6$}]'
<dd>returns the values of the Wigner 6-$j$ symbol.
</dl>
>> SixJSymbol[{1, 2, 3}, {1, 2, 3}]
= 1 / 105
'SixJSymbol' is symmetric under permutations:
>> % == SixJSymbol[{3, 2, 1}, {3, 2, 1}]
= True
>> SixJSymbol[{1, 2, 3}, {1, 2, 3}] == SixJSymbol[{2, 1, 3}, {2, 1, 3}]
= True
'SixJSymbol' works with integer and half-integer arguments:
>> SixJSymbol[{1/2, 1/2, 1}, {5/2, 7/2, 3}]
= -Sqrt[21] / 21
Compare with WMA example:
>> SixJSymbol[{1, 2, 3}, {2, 1, 2}] == 1 / (5 Sqrt[21])
= True
Result 0 returned for unphysical cases:
>> SixJSymbol[{1, 2, 3}, {4, 5, 12}]
= 0
Arguments must be integer or half integer values:
>> SixJSymbol[{0.5, 0.5, 1.1},{0.5, 0.5, 1.1}]
: SixJSymbol values {0.5, 0.5, 1.1} {0.5, 0.5, 1.1} must be integer or half integer and fulfill the triangle relation
= SixJSymbol[{0.5, 0.5, 1.1}, {0.5, 0.5, 1.1}]
"""
attributes = A_PROTECTED | A_READ_PROTECTED
messages = {
"6jsymbol_symbol": "Parameter `` of `` has value ``; SixJSymbol cannot handle symbols yet.",
"6jsymbol_value": "SixJSymbol values `` `` must be integer or half integer and fulfill the triangle relation",
}
# WMA docs say Ricah 6-j symbol, but Wigner 6-j sees to be more likely, and that is what
# https://mathworld.wolfram.com/Wigner6j-Symbol.html claims SixJSymbol means.
# Also, Mathematica 5 refers to (Wigner) 6-j. So the WMA doc is probably wrong.
summary_text = "values of the Wigner 6-j symbol"
sympy_name = "physics.wigner.wigner_6j"
def eval(self, j13: ListExpression, j46: ListExpression, evaluation: Evaluation):
"SixJSymbol[j13_List, j46_List]"
sympy_js = []
i = 0
for triple in (j13, j46):
i += 1
if len(triple.elements) != 3:
return
for element in triple.elements:
if isinstance(element, Symbol):
evaluation.message(
"SixJSymbol", "6jsymbol_symbol", i, triple, element
)
return
py_element = element.to_sympy()
sympy_js.append(py_element)
try:
result = wigner_6j(*sympy_js)
except ValueError:
evaluation.message("SixJSymbol", "6jsymbol_value", j13, j46)
return
return from_sympy(result)
class ThreeJSymbol(SympyFunction):
"""
<url>
:3-j symbol:
https://en.wikipedia.org/wiki/3-j_symbol</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/physics/wigner.html#sympy.physics.wigner.wigner_3j</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/ThreeJSymbol.html</url>)
<dl>
<dt>'ThreeJSymbol[{$j1, $m1}, {$j2$, $m2$}, {$j3$, $m3$}]'
<dd>returns the values of the Wigner 3-$j$ symbol.
</dl>
Compare with SymPy examples:
>> ThreeJSymbol[{2, 0}, {6, 0}, {4, 0}]
= Sqrt[715] / 143
'ThreeJSymbol' is symmetric under permutations:
>> % == ThreeJSymbol[{2, 0}, {4, 0}, {6, 0}] == ThreeJSymbol[{4, 0}, {2, 0}, {6, 0}]
= True
>> ThreeJSymbol[{2, 0}, {6, 0}, {4, 1}]
= 0
Compare with WMA examples:
>> ThreeJSymbol[{6, 0}, {4, 0}, {2, 0}] == Sqrt[5 / 143]
= True
>> ThreeJSymbol[{2, 1}, {2, 2}, {4, -3}] == -(1 / (3 Sqrt[2]))
= True
>> ThreeJSymbol[{1/2, -1/2}, {1/2, -1/2}, {1, 1}]
= -Sqrt[3] / 3
Result 0 returned for unphysical cases:
>> ThreeJSymbol[{1, 2}, {3, 4}, {5, 12}]
= 0
Arguments must be integer or half integer values:
>> ThreeJSymbol[{2.1, 6}, {4, 0}, {0, 0}]
: ThreeJSymbol values {2.1, 6}, {4, 0}, {0, 0} must be integer or half integer
= ThreeJSymbol[{2.1, 6}, {4, 0}, {0, 0}]
"""
attributes = A_PROTECTED | A_READ_PROTECTED
messages = {
"3jsymbol_symbol": "Parameter `` of `` has value ``; ThreeJSymbol cannot handle symbols yet.",
"3jsymbol_value": "ThreeJSymbol values ``, ``, `` must be integer or half integer",
}
summary_text = "values of the Wigner 3-j symbol"
sympy_name = "physics.wigner.wigner_3j"
def eval(
self,
j12: ListExpression,
j34: ListExpression,
j56: ListExpression,
evaluation: Evaluation,
):
"ThreeJSymbol[j12_List, j34_List, j56_List]"
sympy_js = [None] * 6
for i, pair in enumerate([j12, j34, j56]):
if len(pair.elements) != 2:
return
for j, element in enumerate(pair.elements):
if isinstance(element, Symbol):
evaluation.message("ThreeJSymbol", "threejsymbol", i, pair, element)
return
py_element = element.to_sympy()
# SymPy wants all of the j's together first and then all of the m's together
# rather than pairs if (j, m).
sympy_js[j * 3 + i] = py_element
try:
result = wigner_3j(*sympy_js)
except ValueError:
evaluation.message("ThreeJSymbol", "3jsymbol_value", j12, j34, j56)
return
return from_sympy(result) | PypiClean |
/Helmholtz-0.2.0.tar.gz/Helmholtz-0.2.0/helmholtz/editor/forms/measurements.py | from django.utils.datastructures import SortedDict
from django import forms
from django.contrib.contenttypes.models import ContentType
from helmholtz.annotation.models import Descriptor
from helmholtz.measurements.models import Parameter, GenericMeasurement, BoolMeasurement, IntegerMeasurement, FloatMeasurement, StringMeasurement
class ParameterForm(forms.ModelForm):
class Meta :
model = Parameter
exclude = ['constraints']
class DescriptorForm(forms.ModelForm):
class Meta :
model = Descriptor
class GenericMeasurementForm(forms.ModelForm):
class Meta :
model = GenericMeasurement
exclude = ['object', 'content_type', 'object_id']
class BoolMeasurementForm(forms.ModelForm):
class Meta :
model = BoolMeasurement
exclude = ['object', 'content_type', 'object_id']
class IntegerMeasurementForm(forms.ModelForm):
class Meta :
model = IntegerMeasurement
exclude = ['object', 'content_type', 'object_id']
class FloatMeasurementForm(forms.ModelForm):
class Meta :
model = FloatMeasurement
exclude = ['object', 'content_type', 'object_id']
class StringMeasurementForm(forms.ModelForm):
class Meta :
model = StringMeasurement
exclude = ['object', 'content_type', 'object_id']
class MeasurementField(forms.Field):
def clean(self, value):
return value
class MeasurementWidget(forms.MultiWidget):
"""
A Widget that splits a measurement input into three <input type="text"> boxes.
"""
def __init__(self, datetime, value_choices, unit_choices, attrs=None):
widgets = (forms.TextInput(attrs=attrs['left'] if attrs.has_key('left') else None),
forms.TextInput(attrs=attrs['right'] if attrs.has_key('right') else None))
super(MeasurementWidget, self).__init__(widgets, attrs['all'] if attrs.has_key('all') else None)
def value_from_datadict(self, data, files, name):
values = super(PhysicalQuantityWidget, self).value_from_datadict(data, files, name)
return values
def decompress(self, value):
if value:
return [value.value, value.unit]
return [None, self.initial_unit] | PypiClean |
/Flask-Occam-0.2.1.tar.gz/Flask-Occam-0.2.1/flask_occam/converters.py | import re
from werkzeug.routing import BaseConverter
from werkzeug.exceptions import NotFound
# helpers
# -------
MODELS = dict()
def class_registry(cls):
"""
Function for dynamically getting class
registry dictionary from specified model.
"""
try:
return dict(cls._sa_registry._class_registry)
except:
return dict(cls._decl_class_registry)
return
def gather_models():
"""
Inspect sqlalchemy models from current context and set global
dictionary to be used in url conversion.
"""
global MODELS
from flask import current_app, has_app_context
if not has_app_context():
return
if 'sqlalchemy' not in current_app.extensions:
return
# inspect current models and add to map
db = current_app.extensions['sqlalchemy'].db
registry = class_registry(db.Model)
for cls in registry.values():
if isinstance(cls, type) and issubclass(cls, db.Model):
# class name
MODELS[cls.__name__] = cls
# lowercase name
MODELS[cls.__name__.lower()] = cls
# snake_case name
words = re.findall(r'([A-Z][0-9a-z]+)', cls.__name__)
if len(words) > 1:
alias = '_'.join(map(lambda x: x.lower(), words))
MODELS[alias] = cls
return
# converters
# ----------
class ModelConverter(BaseConverter):
"""
For url inputs containing a model identifier, look
up the model and return the object.
This method simplifies a lot of the boilerplate needed
to do model look ups in REST apis.
Examples:
.. code-block:: python
@app.route('/users/<id(User):user>')
def get_user(user):
return jsonify(user.json())
In addition, this class can be inherited and used
for other custom parameter url converters. For instance,
here is how you might use it to create a name converter:
.. code-block:: python
class NameConverter(ModelConverter):
__param__ = 'name'
app.url_map.converters['name'] = NameConverter
# ... handlers ...
@app.route('/users/<name(User):user>')
def get_user(user):
return jsonify(user.json())
"""
__param__ = 'id'
def __init__(self, map, model):
self.map = map
self.model = model
return
@property
def models(self):
global MODELS
if not MODELS:
gather_models()
return MODELS
def to_python(self, value):
mapper = self.models
# make sure model exists
if self.model not in mapper:
raise AssertionError(
'Specified model `{}` in url converter '
'not part of application models.'.format(self.model))
# set up class for conversion
cls = mapper[self.model]
# search for the object
model = cls.get(**{self.__param__: value})
if model is None:
raise NotFound
return model
def to_url(self, value):
return super(ModelConverter, self).to_url(getattr(value, self.__param__)) | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/solvers/inequalities.py |
import collections
import itertools
from ..core import Dummy, Eq, Ge, Gt, Integer, Le, Lt, Ne, S, Symbol, oo
from ..core.compatibility import iterable
from ..core.relational import Relational
from ..functions import Abs, Max, Min, Piecewise, sign
from ..logic import And, Or, false, true
from ..matrices import Matrix, diag
from ..polys import PolificationFailed, Poly, parallel_poly_from_expr
from ..polys.polyutils import _nsort
from ..sets import FiniteSet, Interval, Union
from ..utilities import filldedent, ordered
__all__ = 'reduce_inequalities',
def canonicalize_inequalities(eqs):
"""Canonicalize system of inequalities to have only Lt/Le."""
eqs = set(eqs)
# Canonicalize constraints, Ne -> pair Lt, Eq -> pair Le
eqs |= {Lt(*e.args) for e in eqs if isinstance(e, Ne)}
eqs |= {Lt(e.rhs, e.lhs) for e in eqs if isinstance(e, Ne)}
eqs |= {Le(*e.args) for e in eqs if isinstance(e, Eq)}
eqs |= {Le(e.rhs, e.lhs) for e in eqs if isinstance(e, Eq)}
eqs -= {e for e in eqs if isinstance(e, (Ne, Eq))}
# Gt/Ge -> Lt, Le
eqs = {e.reversed if e.func in (Gt, Ge) else e for e in eqs}
# Now we have only Lt/Le
return list(ordered(e.func(e.lhs - e.rhs, 0) for e in eqs))
def fourier_motzkin(A, b, c, j):
"""
Fourier-Motzkin elimination for `j`-th variable.
Parameters
==========
A : Matrix
The coefficients of the system.
b : Matrix
The constant terms in the right hand side of relations.
c : Matrix
The vector of boolean elements, which determine the
type of relation (1 for Le and 0 - for Lt).
j : int
The variable index.
Example
=======
>>> A = Matrix([[-1, 0], [2, 4], [1, -2]])
>>> b = Matrix([-1, 14, -1])
>>> c = Matrix([1, 1, 1])
>>> fourier_motzkin(A, b, c, 0)
(Matrix([
[0, 4],
[0, -2]]), Matrix([
[12],
[-2]]), Matrix([
[1],
[1]]))
References
==========
* :cite:`Schrijver1998theory`, pp. 155–156.
"""
m = A.rows
rows = [[], [], []]
D, d, k = [Matrix()]*3
assert m == b.rows == c.rows
assert all(_.is_comparable for _ in A)
for i, a in enumerate(A[:, j]):
rows[int(sign(a) + 1)].append(i)
for p in itertools.chain(rows[1], itertools.product(*rows[::2])):
if p in rows[1]:
D = D.col_join(A[p, :])
d = d.col_join(Matrix([b[p]]))
k = k.col_join(Matrix([c[p]]))
else:
s, t = p
D = D.col_join(A[t, j]*A[s, :] - A[s, j]*A[t, :])
d = d.col_join(Matrix([A[t, j]*b[s] - A[s, j]*b[t]]))
k = k.col_join(Matrix([c[s] and c[t]]))
return D, d, k
def solve_linear_inequalities(eqs, *gens, **args):
"""
Solve system of linear inequalities.
Examples
========
>>> solve_linear_inequalities([x >= 0, 2*x + 4*y <= 14, x - 2*y <= 1])
(x >= 0) & (x <= 4) & (y >= x/2 - 1/2) & (y <= -x/2 + 7/2)
"""
assert all(e.is_Relational for e in eqs)
eqs = canonicalize_inequalities(eqs)
polys, opt = parallel_poly_from_expr([e.lhs for e in eqs], *gens, **args)
if not all(p.is_linear for p in polys):
raise ValueError(f'Got non-linear inequality in {eqs}')
gens = Matrix(opt.gens)
A = Matrix([[p.coeff_monomial(x) for x in gens] for p in polys])
b = Matrix([-p.coeff_monomial(1) for p in polys])
c = Matrix([e.func is Le for e in eqs])
res = []
failed = []
op_map = {(1, 1): Le, (1, 0): Lt, (0, 1): Ge, (0, 0): Gt}
for i, g in reversed(list(enumerate(gens))):
D, d, e = fourier_motzkin(A, b, c, i)
if not D:
failed.append(i)
continue
gens_g = gens.copy()
gens_g[i] = 0
for j, (r, x) in enumerate(zip(b - A*gens_g, c)):
gc = A[j, i]
if not gc:
continue
res.append(op_map[(int(gc > 0), int(x))](g, r/gc))
A, b, c = D, d, e
if not A.is_zero:
i = failed.pop(0)
g = gens[i]
gens_g = gens.copy()
gens_g[i] = 0
strict = []
non_strict = []
for r, x in zip(diag(*A[:, i])**-1*(b - A*gens_g), c):
if x:
non_strict.append(r)
else:
strict.append(r)
pos = int(A[0, i] > 0)
other_op = Min if pos else Max
if strict and non_strict:
a, b = other_op(*non_strict), other_op(*strict)
opn, ops = op_map[(pos, 1)], op_map[(pos, 0)]
res.append(Or(And(opn(g, a), ops(a, b)), And(ops(g, b), opn(b, a))))
else:
both = non_strict + strict
res.append(op_map[(pos, int(not strict))](g, other_op(*(both))))
elif any(_ < 0 for _ in b):
return false
return And(*res)
def solve_poly_inequality(poly, rel):
"""
Solve a polynomial inequality with rational coefficients.
Examples
========
>>> solve_poly_inequality(x.as_poly(), '==')
[{0}]
>>> solve_poly_inequality((x**2 - 1).as_poly(), '!=')
[[-oo, -1), (-1, 1), (1, oo]]
>>> solve_poly_inequality((x**2 - 1).as_poly(), '==')
[{-1}, {1}]
See Also
========
solve_poly_inequalities
"""
if not isinstance(poly, Poly):
raise ValueError('`poly` should be a Poly instance')
if rel not in {'>', '<', '>=', '<=', '==', '!='}:
raise ValueError(f'Invalid relational operator symbol: {rel!r}')
if poly.is_number:
t = Relational(poly.as_expr(), 0, rel)
if t == true:
return [S.ExtendedReals]
elif t == false:
return [S.EmptySet]
else:
raise NotImplementedError(f"Couldn't determine truth value of {t}")
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = -oo
for right, _ in reals + [(oo, 1)]:
interval = Interval(left, right, left.is_finite, right.is_finite)
intervals.append(interval)
left = right
else:
sign = +1 if poly.LC() > 0 else -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
else:
eq_sign, equal = -1, True
right, right_open = oo, False
for left, multiplicity in reversed(reals):
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(0, Interval(left, right, not equal and left.is_finite, right_open and right.is_finite))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(0, Interval(left, right, left.is_finite, right_open and right.is_finite))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(0, Interval(-oo, right, False, right_open and right.is_finite))
return intervals
def solve_poly_inequalities(polys):
"""
Solve polynomial inequalities with rational coefficients.
Examples
========
>>> solve_poly_inequalities((((+x**2 - 3).as_poly(), '>'),
... ((-x**2 + 1).as_poly(), '>')))
[-oo, -sqrt(3)) U (-1, 1) U (sqrt(3), oo]
"""
return Union(*[solve_poly_inequality(*p) for p in polys])
def solve_rational_inequalities(eqs):
"""
Solve a system of rational inequalities with rational coefficients.
Examples
========
>>> solve_rational_inequalities([[(((-x + 1).as_poly(),
... Integer(1).as_poly(x)), '>='),
... (((-x + 1).as_poly(),
... Integer(1).as_poly(x)), '<=')]])
{1}
>>> solve_rational_inequalities([[((x.as_poly(), Integer(1).as_poly(x)), '!='),
... (((-x + 1).as_poly(),
... Integer(1).as_poly(x)), '>=')]])
[-oo, 0) U (0, 1]
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for eq in eqs:
global_intervals = [S.ExtendedReals]
for (numer, denom), rel in eq:
intervals = []
for numer_interval in solve_poly_inequality(numer*denom, rel):
for global_interval in global_intervals:
interval = numer_interval & global_interval
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
intervals = []
for global_interval in global_intervals:
for denom_interval in solve_poly_inequality(denom, '=='):
global_interval -= denom_interval
if global_interval is not S.EmptySet:
intervals.append(global_interval)
global_intervals = intervals
if not global_intervals:
break
intervals = []
expr = numer.as_expr()/denom.as_expr()
expr = Relational(expr, 0, rel)
for interval in global_intervals:
intervals.append(interval)
global_intervals = intervals
for interval in global_intervals:
result |= interval
return result
def reduce_rational_inequalities(exprs, gen, relational=True):
"""
Reduce a system of rational inequalities with rational coefficients.
Examples
========
>>> reduce_rational_inequalities([[x**2 <= 0]], x)
Eq(x, 0)
>>> reduce_rational_inequalities([[x + 2 > 0]], x)
-2 < x
>>> reduce_rational_inequalities([[(x + 2, '>')]], x)
-2 < x
>>> reduce_rational_inequalities([[x + 2]], x)
Eq(x, -2)
"""
exact = True
eqs = []
solution = S.ExtendedReals if exprs else S.EmptySet
for _exprs in exprs:
_eqs = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
rel = '=='
if expr == true:
numer, denom, rel = Integer(0), Integer(1), '=='
elif expr == false:
numer, denom, rel = Integer(1), Integer(1), '=='
else:
numer, denom = expr.together().as_numer_denom()
(numer, denom), opt = parallel_poly_from_expr((numer, denom), gen)
if not opt.domain.is_Exact:
numer, denom, exact = numer.to_exact(), denom.to_exact(), False
domain = opt.domain.get_exact()
if not (domain.is_IntegerRing or domain.is_RationalField):
expr = numer/denom
expr = Relational(expr, 0, rel)
solution &= solve_univariate_inequality(expr, gen, relational=False)
else:
_eqs.append(((numer, denom), rel))
if _eqs:
eqs.append(_eqs)
if eqs:
solution &= solve_rational_inequalities(eqs)
if not exact:
solution = solution.evalf()
if relational:
solution = solution.as_relational(gen)
return solution
def reduce_piecewise_inequality(expr, rel, gen):
"""
Reduce an inequality with nested piecewise functions.
Examples
========
>>> reduce_piecewise_inequality(abs(x - 5) - 3, '<', x)
(2 < x) & (x < 8)
>>> reduce_piecewise_inequality(abs(x + 2)*3 - 13, '<', x)
(-19/3 < x) & (x < 7/3)
>>> reduce_piecewise_inequality(Piecewise((1, x < 1),
... (3, True)) - 1, '>', x)
1 <= x
See Also
========
reduce_piecewise_inequalities
"""
if gen.is_extended_real is False:
raise TypeError(filldedent("""
can't solve inequalities with piecewise
functions containing non-real variables"""))
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.func
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer:
raise NotImplementedError('only integer powers are supported')
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append((+expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
elif isinstance(expr, Piecewise):
for a in expr.args:
_exprs = _bottom_up_scan(a.expr)
for ex, conds in _exprs:
if a.cond != true:
exprs.append((ex, conds + [a.cond]))
else:
oconds = [c[1] for c in expr.args if c[1] != true]
exprs.append((ex, conds + [And(*[~c for c in oconds])]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping:
expr = Relational(+expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_rational_inequalities(inequalities, gen)
def reduce_piecewise_inequalities(exprs, gen):
"""
Reduce a system of inequalities with nested piecewise functions.
Examples
========
>>> reduce_piecewise_inequalities([(abs(3*x - 5) - 7, '<'),
... (abs(x + 25) - 13, '>')], x)
(-2/3 < x) & (x < 4) & ((-12 < x) | (x < -38))
>>> reduce_piecewise_inequalities([(abs(x - 4) + abs(3*x - 5) - 7, '<')], x)
(1/2 < x) & (x < 4)
See Also
========
reduce_piecewise_inequality
"""
return And(*[reduce_piecewise_inequality(expr, rel, gen)
for expr, rel in exprs])
def solve_univariate_inequality(expr, gen, relational=True):
"""
Solves a real univariate inequality.
Examples
========
>>> solve_univariate_inequality(x**2 >= 4, x)
(2 <= x) | (x <= -2)
>>> solve_univariate_inequality(x**2 >= 4, x, relational=False)
[-oo, -2] U [2, oo]
"""
from ..simplify import simplify
from .solvers import denoms, solve
e = expr.lhs - expr.rhs
parts = n, d = e.as_numer_denom()
if all(i.is_polynomial(gen) for i in parts):
solns = solve(n, gen, check=False)
singularities = solve(d, gen, check=False)
else:
solns = solve(e, gen, check=False)
singularities = []
for d in denoms(e):
singularities.extend(solve(d, gen))
solns = [s[gen] for s in solns]
singularities = [s[gen] for s in singularities]
include_x = expr.func(0, 0)
def valid(x):
v = e.subs({gen: x})
try:
r = expr.func(v, 0)
except TypeError:
r = false
r = simplify(r)
if r in (true, false):
return r
elif v.is_comparable is False:
return False
else:
raise NotImplementedError
start = -oo
sol_sets = [S.EmptySet]
reals = _nsort(set(solns + singularities), separated=True)[0]
for x in reals:
end = x
if end in [-oo, oo]:
if valid(Integer(0)):
sol_sets.append(Interval(start, oo, start in reals, end == oo))
break
if valid((start + end)/2 if start != -oo else end - 1):
sol_sets.append(Interval(start, end, start.is_finite is not False, end.is_finite is not False))
if x in singularities:
singularities.remove(x)
elif include_x:
sol_sets.append(FiniteSet(x))
start = end
end = oo
if valid(start + 1):
sol_sets.append(Interval(start, end, True, end in reals))
rv = Union(*sol_sets)
rel_map = {Lt: Le, Gt: Ge}
for t in [oo, -oo]:
try:
rel = rel_map.get(expr.func, expr.func)
if rv.contains(t) is true and rel(e.limit(gen, t)) is false:
rv -= FiniteSet(t)
except TypeError:
pass
return rv if not relational else rv.as_relational(gen)
def _reduce_inequalities(inequalities, symbols):
if len(symbols) > 1:
try:
return solve_linear_inequalities(inequalities, *symbols)
except (PolificationFailed, ValueError):
pass
rat_part = collections.defaultdict(list)
pw_part = rat_part.copy()
other = []
for inequality in inequalities:
if inequality == true:
continue
if inequality == false:
return false
expr, rel = inequality.lhs, inequality.rel_op # rhs is 0
# check for gens using atoms which is more strict than free_symbols to
# guard against EX domain which won't be handled by
# reduce_rational_inequalities
gens = expr.atoms(Dummy, Symbol)
if len(gens) == 1:
gen = gens.pop()
else:
common = expr.free_symbols & set(symbols)
if len(common) == 1:
gen = common.pop()
other.append(solve_univariate_inequality(Relational(expr, 0, rel), gen))
continue
raise NotImplementedError('Solving multivariate inequalities is '
'implemented only for linear case yet.')
if expr.is_rational_function(gen):
rat_part[gen].append((expr, rel))
else:
components = set(expr.find(lambda u: u.has(gen) and
(u.is_Function or u.is_Pow and
not u.exp.is_Integer)))
if components and all(isinstance(i, (Abs, Piecewise)) for i in components):
pw_part[gen].append((expr, rel))
else:
other.append(solve_univariate_inequality(Relational(expr, 0, rel), gen))
rat_reduced = []
pw_reduced = []
for gen, exprs in rat_part.items():
rat_reduced.append(reduce_rational_inequalities([exprs], gen))
for gen, exprs in pw_part.items():
pw_reduced.append(reduce_piecewise_inequalities(exprs, gen))
return And(*(rat_reduced + pw_reduced + other))
def reduce_inequalities(inequalities, symbols=[]):
"""
Reduces a system of inequalities or equations.
Examples
========
>>> reduce_inequalities(0 <= x + 3, [])
-3 <= x
>>> reduce_inequalities(0 <= x + y*2 - 1, [x])
-2*y + 1 <= x
See Also
========
diofant.solvers.solvers.solve : solve algebraic equations
"""
if not iterable(inequalities):
inequalities = [inequalities]
# prefilter
keep = []
for i in inequalities:
if isinstance(i, Relational):
i = i.func(i.lhs.as_expr() - i.rhs.as_expr(), 0)
elif i not in (True, False):
i = Eq(i, 0)
if i == true:
continue
if i == false:
return false
keep.append(i)
inequalities = keep
del keep
gens = set().union(*[i.free_symbols for i in inequalities])
if not iterable(symbols):
symbols = [symbols]
symbols = ordered(set(symbols) or gens)
# make vanilla symbol real
recast = {i: Dummy(i.name, extended_real=True)
for i in gens if i.is_extended_real is None}
inequalities = [i.xreplace(recast) for i in inequalities]
symbols = ordered(i.xreplace(recast) for i in symbols)
# solve system
rv = _reduce_inequalities(inequalities, list(symbols))
# restore original symbols and return
return rv.xreplace({v: k for k, v in recast.items()}) | PypiClean |
/Kivy-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/kivy/animation.py | __all__ = ('Animation', 'AnimationTransition')
from math import sqrt, cos, sin, pi
from collections import ChainMap
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.compat import string_types, iterkeys
from kivy.weakproxy import WeakProxy
class Animation(EventDispatcher):
'''Create an animation definition that can be used to animate a Widget.
:Parameters:
`duration` or `d`: float, defaults to 1.
Duration of the animation, in seconds.
`transition` or `t`: str or func
Transition function for animate properties. It can be the name of a
method from :class:`AnimationTransition`.
`step` or `s`: float
Step in milliseconds of the animation. Defaults to 0, which means
the animation is updated for every frame.
To update the animation less often, set the step value to a float.
For example, if you want to animate at 30 FPS, use s=1/30.
:Events:
`on_start`: animation, widget
Fired when the animation is started on a widget.
`on_complete`: animation, widget
Fired when the animation is completed or stopped on a widget.
`on_progress`: animation, widget, progression
Fired when the progression of the animation is changing.
.. versionchanged:: 1.4.0
Added s/step parameter.
.. versionchanged:: 1.10.0
The default value of the step parameter was changed from 1/60. to 0.
'''
_update_ev = None
_instances = set()
__events__ = ('on_start', 'on_progress', 'on_complete')
def __init__(self, **kw):
super().__init__()
# Initialize
self._clock_installed = False
self._duration = kw.pop('d', kw.pop('duration', 1.))
self._transition = kw.pop('t', kw.pop('transition', 'linear'))
self._step = kw.pop('s', kw.pop('step', 0))
if isinstance(self._transition, string_types):
self._transition = getattr(AnimationTransition, self._transition)
self._animated_properties = kw
self._widgets = {}
@property
def duration(self):
'''Return the duration of the animation.
'''
return self._duration
@property
def transition(self):
'''Return the transition of the animation.
'''
return self._transition
@property
def animated_properties(self):
'''Return the properties used to animate.
'''
return self._animated_properties
@staticmethod
def stop_all(widget, *largs):
'''Stop all animations that concern a specific widget / list of
properties.
Example::
anim = Animation(x=50)
anim.start(widget)
# and later
Animation.stop_all(widget, 'x')
'''
if len(largs):
for animation in list(Animation._instances):
for x in largs:
animation.stop_property(widget, x)
else:
for animation in set(Animation._instances):
animation.stop(widget)
@staticmethod
def cancel_all(widget, *largs):
'''Cancel all animations that concern a specific widget / list of
properties. See :attr:`cancel`.
Example::
anim = Animation(x=50)
anim.start(widget)
# and later
Animation.cancel_all(widget, 'x')
.. versionadded:: 1.4.0
.. versionchanged:: 2.1.0
If the parameter ``widget`` is None, all animated widgets will be
the target and cancelled. If ``largs`` is also given, animation of
these properties will be canceled for all animated widgets.
'''
if widget is None:
if largs:
for animation in Animation._instances.copy():
for info in tuple(animation._widgets.values()):
widget = info['widget']
for x in largs:
animation.cancel_property(widget, x)
else:
for animation in Animation._instances:
animation._widgets.clear()
animation._clock_uninstall()
Animation._instances.clear()
return
if len(largs):
for animation in list(Animation._instances):
for x in largs:
animation.cancel_property(widget, x)
else:
for animation in set(Animation._instances):
animation.cancel(widget)
def start(self, widget):
'''Start the animation on a widget.
'''
self.stop(widget)
self._initialize(widget)
self._register()
self.dispatch('on_start', widget)
def stop(self, widget):
'''Stop the animation previously applied to a widget, triggering the
`on_complete` event.'''
props = self._widgets.pop(widget.uid, None)
if props:
self.dispatch('on_complete', widget)
self.cancel(widget)
def cancel(self, widget):
'''Cancel the animation previously applied to a widget. Same
effect as :attr:`stop`, except the `on_complete` event will
*not* be triggered!
.. versionadded:: 1.4.0
'''
self._widgets.pop(widget.uid, None)
self._clock_uninstall()
if not self._widgets:
self._unregister()
def stop_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be stopped (see :attr:`stop`).
'''
props = self._widgets.get(widget.uid, None)
if not props:
return
props['properties'].pop(prop, None)
# no more properties to animation ? kill the animation.
if not props['properties']:
self.stop(widget)
def cancel_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be canceled (see :attr:`cancel`)
.. versionadded:: 1.4.0
'''
props = self._widgets.get(widget.uid, None)
if not props:
return
props['properties'].pop(prop, None)
# no more properties to animation ? kill the animation.
if not props['properties']:
self.cancel(widget)
def have_properties_to_animate(self, widget):
'''Return True if a widget still has properties to animate.
.. versionadded:: 1.8.0
'''
props = self._widgets.get(widget.uid, None)
if props and props['properties']:
return True
#
# Private
#
def _register(self):
Animation._instances.add(self)
def _unregister(self):
Animation._instances.discard(self)
def _initialize(self, widget):
d = self._widgets[widget.uid] = {
'widget': widget,
'properties': {},
'time': None}
# get current values
p = d['properties']
for key, value in self._animated_properties.items():
original_value = getattr(widget, key)
if isinstance(original_value, (tuple, list)):
original_value = original_value[:]
elif isinstance(original_value, dict):
original_value = original_value.copy()
p[key] = (original_value, value)
# install clock
self._clock_install()
def _clock_install(self):
if self._clock_installed:
return
self._update_ev = Clock.schedule_interval(self._update, self._step)
self._clock_installed = True
def _clock_uninstall(self):
if self._widgets or not self._clock_installed:
return
self._clock_installed = False
if self._update_ev is not None:
self._update_ev.cancel()
self._update_ev = None
def _update(self, dt):
widgets = self._widgets
transition = self._transition
calculate = self._calculate
for uid in list(widgets.keys()):
anim = widgets[uid]
widget = anim['widget']
if isinstance(widget, WeakProxy) and not len(dir(widget)):
# empty proxy, widget is gone. ref: #2458
self._widgets.pop(uid, None)
self._clock_uninstall()
if not self._widgets:
self._unregister()
continue
if anim['time'] is None:
anim['time'] = 0.
else:
anim['time'] += dt
# calculate progression
if self._duration:
progress = min(1., anim['time'] / self._duration)
else:
progress = 1
t = transition(progress)
# apply progression on widget
for key, values in anim['properties'].items():
a, b = values
value = calculate(a, b, t)
setattr(widget, key, value)
self.dispatch('on_progress', widget, progress)
# time to stop ?
if progress >= 1.:
self.stop(widget)
def _calculate(self, a, b, t):
_calculate = self._calculate
if isinstance(a, list) or isinstance(a, tuple):
if isinstance(a, list):
tp = list
else:
tp = tuple
return tp([_calculate(a[x], b[x], t) for x in range(len(a))])
elif isinstance(a, dict):
d = {}
for x in iterkeys(a):
if x not in b:
# User requested to animate only part of the dict.
# Copy the rest
d[x] = a[x]
else:
d[x] = _calculate(a[x], b[x], t)
return d
else:
return (a * (1. - t)) + (b * t)
#
# Default handlers
#
def on_start(self, widget):
pass
def on_progress(self, widget, progress):
pass
def on_complete(self, widget):
pass
def __add__(self, animation):
return Sequence(self, animation)
def __and__(self, animation):
return Parallel(self, animation)
class CompoundAnimation(Animation):
def stop_property(self, widget, prop):
self.anim1.stop_property(widget, prop)
self.anim2.stop_property(widget, prop)
if (not self.anim1.have_properties_to_animate(widget) and
not self.anim2.have_properties_to_animate(widget)):
self.stop(widget)
def cancel(self, widget):
self.anim1.cancel(widget)
self.anim2.cancel(widget)
super().cancel(widget)
def cancel_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be canceled (see :attr:`cancel`)
This method overrides `:class:kivy.animation.Animation`'s
version, to cancel it on all animations of the Sequence.
.. versionadded:: 1.10.0
'''
self.anim1.cancel_property(widget, prop)
self.anim2.cancel_property(widget, prop)
if (not self.anim1.have_properties_to_animate(widget) and
not self.anim2.have_properties_to_animate(widget)):
self.cancel(widget)
def have_properties_to_animate(self, widget):
return (self.anim1.have_properties_to_animate(widget) or
self.anim2.have_properties_to_animate(widget))
@property
def animated_properties(self):
return ChainMap({},
self.anim2.animated_properties,
self.anim1.animated_properties)
@property
def transition(self):
# This property is impossible to implement
raise AttributeError(
"Can't lookup transition attribute of a CompoundAnimation")
class Sequence(CompoundAnimation):
def __init__(self, anim1, anim2):
super().__init__()
#: Repeat the sequence. See 'Repeating animation' in the header
#: documentation.
self.repeat = False
self.anim1 = anim1
self.anim2 = anim2
self.anim1.bind(on_complete=self.on_anim1_complete,
on_progress=self.on_anim1_progress)
self.anim2.bind(on_complete=self.on_anim2_complete,
on_progress=self.on_anim2_progress)
@property
def duration(self):
return self.anim1.duration + self.anim2.duration
def stop(self, widget):
props = self._widgets.pop(widget.uid, None)
self.anim1.stop(widget)
self.anim2.stop(widget)
if props:
self.dispatch('on_complete', widget)
super().cancel(widget)
def start(self, widget):
self.stop(widget)
self._widgets[widget.uid] = True
self._register()
self.dispatch('on_start', widget)
self.anim1.start(widget)
def on_anim1_complete(self, instance, widget):
if widget.uid not in self._widgets:
return
self.anim2.start(widget)
def on_anim1_progress(self, instance, widget, progress):
self.dispatch('on_progress', widget, progress / 2.)
def on_anim2_complete(self, instance, widget):
'''Repeating logic used with boolean variable "repeat".
.. versionadded:: 1.7.1
'''
if widget.uid not in self._widgets:
return
if self.repeat:
self.anim1.start(widget)
else:
self.dispatch('on_complete', widget)
self.cancel(widget)
def on_anim2_progress(self, instance, widget, progress):
self.dispatch('on_progress', widget, .5 + progress / 2.)
class Parallel(CompoundAnimation):
def __init__(self, anim1, anim2):
super().__init__()
self.anim1 = anim1
self.anim2 = anim2
self.anim1.bind(on_complete=self.on_anim_complete)
self.anim2.bind(on_complete=self.on_anim_complete)
@property
def duration(self):
return max(self.anim1.duration, self.anim2.duration)
def stop(self, widget):
self.anim1.stop(widget)
self.anim2.stop(widget)
if self._widgets.pop(widget.uid, None):
self.dispatch('on_complete', widget)
super().cancel(widget)
def start(self, widget):
self.stop(widget)
self.anim1.start(widget)
self.anim2.start(widget)
self._widgets[widget.uid] = {'complete': 0}
self._register()
self.dispatch('on_start', widget)
def on_anim_complete(self, instance, widget):
self._widgets[widget.uid]['complete'] += 1
if self._widgets[widget.uid]['complete'] == 2:
self.stop(widget)
class AnimationTransition:
'''Collection of animation functions to be used with the Animation object.
Easing Functions ported to Kivy from the Clutter Project
https://developer.gnome.org/clutter/stable/ClutterAlpha.html
The `progress` parameter in each animation function is in the range 0-1.
'''
@staticmethod
def linear(progress):
'''.. image:: images/anim_linear.png'''
return progress
@staticmethod
def in_quad(progress):
'''.. image:: images/anim_in_quad.png
'''
return progress * progress
@staticmethod
def out_quad(progress):
'''.. image:: images/anim_out_quad.png
'''
return -1.0 * progress * (progress - 2.0)
@staticmethod
def in_out_quad(progress):
'''.. image:: images/anim_in_out_quad.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p
p -= 1.0
return -0.5 * (p * (p - 2.0) - 1.0)
@staticmethod
def in_cubic(progress):
'''.. image:: images/anim_in_cubic.png
'''
return progress * progress * progress
@staticmethod
def out_cubic(progress):
'''.. image:: images/anim_out_cubic.png
'''
p = progress - 1.0
return p * p * p + 1.0
@staticmethod
def in_out_cubic(progress):
'''.. image:: images/anim_in_out_cubic.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p
p -= 2
return 0.5 * (p * p * p + 2.0)
@staticmethod
def in_quart(progress):
'''.. image:: images/anim_in_quart.png
'''
return progress * progress * progress * progress
@staticmethod
def out_quart(progress):
'''.. image:: images/anim_out_quart.png
'''
p = progress - 1.0
return -1.0 * (p * p * p * p - 1.0)
@staticmethod
def in_out_quart(progress):
'''.. image:: images/anim_in_out_quart.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p * p
p -= 2
return -0.5 * (p * p * p * p - 2.0)
@staticmethod
def in_quint(progress):
'''.. image:: images/anim_in_quint.png
'''
return progress * progress * progress * progress * progress
@staticmethod
def out_quint(progress):
'''.. image:: images/anim_out_quint.png
'''
p = progress - 1.0
return p * p * p * p * p + 1.0
@staticmethod
def in_out_quint(progress):
'''.. image:: images/anim_in_out_quint.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p * p * p
p -= 2.0
return 0.5 * (p * p * p * p * p + 2.0)
@staticmethod
def in_sine(progress):
'''.. image:: images/anim_in_sine.png
'''
return -1.0 * cos(progress * (pi / 2.0)) + 1.0
@staticmethod
def out_sine(progress):
'''.. image:: images/anim_out_sine.png
'''
return sin(progress * (pi / 2.0))
@staticmethod
def in_out_sine(progress):
'''.. image:: images/anim_in_out_sine.png
'''
return -0.5 * (cos(pi * progress) - 1.0)
@staticmethod
def in_expo(progress):
'''.. image:: images/anim_in_expo.png
'''
if progress == 0:
return 0.0
return pow(2, 10 * (progress - 1.0))
@staticmethod
def out_expo(progress):
'''.. image:: images/anim_out_expo.png
'''
if progress == 1.0:
return 1.0
return -pow(2, -10 * progress) + 1.0
@staticmethod
def in_out_expo(progress):
'''.. image:: images/anim_in_out_expo.png
'''
if progress == 0:
return 0.0
if progress == 1.:
return 1.0
p = progress * 2
if p < 1:
return 0.5 * pow(2, 10 * (p - 1.0))
p -= 1.0
return 0.5 * (-pow(2, -10 * p) + 2.0)
@staticmethod
def in_circ(progress):
'''.. image:: images/anim_in_circ.png
'''
return -1.0 * (sqrt(1.0 - progress * progress) - 1.0)
@staticmethod
def out_circ(progress):
'''.. image:: images/anim_out_circ.png
'''
p = progress - 1.0
return sqrt(1.0 - p * p)
@staticmethod
def in_out_circ(progress):
'''.. image:: images/anim_in_out_circ.png
'''
p = progress * 2
if p < 1:
return -0.5 * (sqrt(1.0 - p * p) - 1.0)
p -= 2.0
return 0.5 * (sqrt(1.0 - p * p) + 1.0)
@staticmethod
def in_elastic(progress):
'''.. image:: images/anim_in_elastic.png
'''
p = .3
s = p / 4.0
q = progress
if q == 1:
return 1.0
q -= 1.0
return -(pow(2, 10 * q) * sin((q - s) * (2 * pi) / p))
@staticmethod
def out_elastic(progress):
'''.. image:: images/anim_out_elastic.png
'''
p = .3
s = p / 4.0
q = progress
if q == 1:
return 1.0
return pow(2, -10 * q) * sin((q - s) * (2 * pi) / p) + 1.0
@staticmethod
def in_out_elastic(progress):
'''.. image:: images/anim_in_out_elastic.png
'''
p = .3 * 1.5
s = p / 4.0
q = progress * 2
if q == 2:
return 1.0
if q < 1:
q -= 1.0
return -.5 * (pow(2, 10 * q) * sin((q - s) * (2.0 * pi) / p))
else:
q -= 1.0
return pow(2, -10 * q) * sin((q - s) * (2.0 * pi) / p) * .5 + 1.0
@staticmethod
def in_back(progress):
'''.. image:: images/anim_in_back.png
'''
return progress * progress * ((1.70158 + 1.0) * progress - 1.70158)
@staticmethod
def out_back(progress):
'''.. image:: images/anim_out_back.png
'''
p = progress - 1.0
return p * p * ((1.70158 + 1) * p + 1.70158) + 1.0
@staticmethod
def in_out_back(progress):
'''.. image:: images/anim_in_out_back.png
'''
p = progress * 2.
s = 1.70158 * 1.525
if p < 1:
return 0.5 * (p * p * ((s + 1.0) * p - s))
p -= 2.0
return 0.5 * (p * p * ((s + 1.0) * p + s) + 2.0)
@staticmethod
def _out_bounce_internal(t, d):
p = t / d
if p < (1.0 / 2.75):
return 7.5625 * p * p
elif p < (2.0 / 2.75):
p -= (1.5 / 2.75)
return 7.5625 * p * p + .75
elif p < (2.5 / 2.75):
p -= (2.25 / 2.75)
return 7.5625 * p * p + .9375
else:
p -= (2.625 / 2.75)
return 7.5625 * p * p + .984375
@staticmethod
def _in_bounce_internal(t, d):
return 1.0 - AnimationTransition._out_bounce_internal(d - t, d)
@staticmethod
def in_bounce(progress):
'''.. image:: images/anim_in_bounce.png
'''
return AnimationTransition._in_bounce_internal(progress, 1.)
@staticmethod
def out_bounce(progress):
'''.. image:: images/anim_out_bounce.png
'''
return AnimationTransition._out_bounce_internal(progress, 1.)
@staticmethod
def in_out_bounce(progress):
'''.. image:: images/anim_in_out_bounce.png
'''
p = progress * 2.
if p < 1.:
return AnimationTransition._in_bounce_internal(p, 1.) * .5
return AnimationTransition._out_bounce_internal(p - 1., 1.) * .5 + .5 | PypiClean |
/DeepCell-CPU-0.12.9.tar.gz/DeepCell-CPU-0.12.9/deepcell/utils/backbone_utils.py | import copy
from tensorflow.keras import backend as K
from tensorflow.keras import applications
from tensorflow.keras.backend import is_keras_tensor
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Conv3D, BatchNormalization
from tensorflow.keras.layers import Activation, MaxPool2D, MaxPool3D
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.utils import get_source_inputs
def featurenet_block(x, n_filters):
"""Add a set of layers that make up one unit of the featurenet backbone
Args:
x (tensorflow.keras.Layer): Keras layer object to pass to
backbone unit
n_filters (int): Number of filters to use for convolutional layers
Returns:
tensorflow.keras.Layer: Keras layer object
"""
df = K.image_data_format()
# conv set 1
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same', data_format=df)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
# conv set 2
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same', data_format=df)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
# Final max pooling stage
x = MaxPool2D(pool_size=(2, 2), padding='same', data_format=df)(x)
return x
def featurenet_3D_block(x, n_filters):
"""Add a set of layers that make up one unit of the featurenet 3D backbone
Args:
x (tensorflow.keras.Layer): Keras layer object to pass to
backbone unit
n_filters (int): Number of filters to use for convolutional layers
Returns:
tensorflow.keras.Layer: Keras layer object
"""
df = K.image_data_format()
# conv set 1
x = Conv3D(n_filters, (3, 3, 3), strides=(1, 1, 1), padding='same', data_format=df)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
# conv set 2
x = Conv3D(n_filters, (3, 3, 3), strides=(1, 1, 1), padding='same', data_format=df)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
# Final max pooling stage
x = MaxPool3D(pool_size=(2, 2, 2), data_format=df)(x)
return x
def featurenet_backbone(input_tensor=None, input_shape=None,
n_filters=32, **kwargs):
"""Construct the deepcell backbone with five convolutional units
Args:
input_tensor (tensor): Input tensor to specify input size
n_filters (int): Number of filters for convolutional layers
Returns:
tuple: List of backbone layers, list of backbone names
"""
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Build out backbone
c1 = featurenet_block(img_input, n_filters) # 1/2 64x64
c2 = featurenet_block(c1, n_filters) # 1/4 32x32
c3 = featurenet_block(c2, n_filters) # 1/8 16x16
c4 = featurenet_block(c3, n_filters) # 1/16 8x8
c5 = featurenet_block(c4, n_filters) # 1/32 4x4
backbone_features = [c1, c2, c3, c4, c5]
backbone_names = ['C1', 'C2', 'C3', 'C4', 'C5']
output_dict = {}
for name, feature in zip(backbone_names, backbone_features):
output_dict[name] = feature
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs=inputs, outputs=backbone_features)
return model, output_dict
def featurenet_3D_backbone(input_tensor=None, input_shape=None,
n_filters=32, **kwargs):
"""Construct the deepcell backbone with five convolutional units
Args:
input_tensor (tensor): Input tensor to specify input size
n_filters (int): Number of filters for convolutional layers
Returns:
tuple: List of backbone layers, list of backbone names
"""
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Build out backbone
c1 = featurenet_3D_block(img_input, n_filters) # 1/2 64x64
c2 = featurenet_3D_block(c1, n_filters) # 1/4 32x32
c3 = featurenet_3D_block(c2, n_filters) # 1/8 16x16
c4 = featurenet_3D_block(c3, n_filters) # 1/16 8x8
c5 = featurenet_3D_block(c4, n_filters) # 1/32 4x4
backbone_features = [c1, c2, c3, c4, c5]
backbone_names = ['C1', 'C2', 'C3', 'C4', 'C5']
output_dict = {}
for name, feature in zip(backbone_names, backbone_features):
output_dict[name] = feature
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs=inputs, outputs=backbone_features)
return model, output_dict
def get_backbone(backbone, input_tensor=None, input_shape=None,
use_imagenet=False, return_dict=True,
frames_per_batch=1, **kwargs):
"""Retrieve backbones for the construction of feature pyramid networks.
Args:
backbone (str): Name of the backbone to be retrieved.
input_tensor (tensor): The input tensor for the backbone.
Should have channel dimension of size 3
use_imagenet (bool): Load pre-trained weights for the backbone
return_dict (bool): Whether to return a dictionary of backbone layers,
e.g. ``{'C1': C1, 'C2': C2, 'C3': C3, 'C4': C4, 'C5': C5}``.
If false, the whole model is returned instead
kwargs (dict): Keyword dictionary for backbone constructions.
Relevant keys include ``'include_top'``,
``'weights'`` (should be ``None``),
``'input_shape'``, and ``'pooling'``.
Returns:
tensorflow.keras.Model: An instantiated backbone
Raises:
ValueError: bad backbone name
ValueError: featurenet backbone with pre-trained imagenet
"""
_backbone = str(backbone).lower()
featurenet_backbones = {
'featurenet': featurenet_backbone,
'featurenet3d': featurenet_3D_backbone,
'featurenet_3d': featurenet_3D_backbone
}
vgg_backbones = {
'vgg16': applications.vgg16.VGG16,
'vgg19': applications.vgg19.VGG19,
}
densenet_backbones = {
'densenet121': applications.densenet.DenseNet121,
'densenet169': applications.densenet.DenseNet169,
'densenet201': applications.densenet.DenseNet201,
}
mobilenet_backbones = {
'mobilenet': applications.mobilenet.MobileNet,
'mobilenetv2': applications.mobilenet_v2.MobileNetV2,
'mobilenet_v2': applications.mobilenet_v2.MobileNetV2
}
resnet_backbones = {
'resnet50': applications.resnet.ResNet50,
'resnet101': applications.resnet.ResNet101,
'resnet152': applications.resnet.ResNet152,
}
resnet_v2_backbones = {
'resnet50v2': applications.resnet_v2.ResNet50V2,
'resnet101v2': applications.resnet_v2.ResNet101V2,
'resnet152v2': applications.resnet_v2.ResNet152V2,
}
# resnext_backbones = {
# 'resnext50': applications.resnext.ResNeXt50,
# 'resnext101': applications.resnext.ResNeXt101,
# }
nasnet_backbones = {
'nasnet_large': applications.nasnet.NASNetLarge,
'nasnet_mobile': applications.nasnet.NASNetMobile,
}
efficientnet_backbones = {
'efficientnetb0': applications.efficientnet.EfficientNetB0,
'efficientnetb1': applications.efficientnet.EfficientNetB1,
'efficientnetb2': applications.efficientnet.EfficientNetB2,
'efficientnetb3': applications.efficientnet.EfficientNetB3,
'efficientnetb4': applications.efficientnet.EfficientNetB4,
'efficientnetb5': applications.efficientnet.EfficientNetB5,
'efficientnetb6': applications.efficientnet.EfficientNetB6,
'efficientnetb7': applications.efficientnet.EfficientNetB7,
}
efficientnet_v2_backbones = {
'efficientnetv2b0': applications.efficientnet_v2.EfficientNetV2B0,
'efficientnetv2b1': applications.efficientnet_v2.EfficientNetV2B1,
'efficientnetv2b2': applications.efficientnet_v2.EfficientNetV2B2,
'efficientnetv2b3': applications.efficientnet_v2.EfficientNetV2B3,
'efficientnetv2bl': applications.efficientnet_v2.EfficientNetV2L,
'efficientnetv2bm': applications.efficientnet_v2.EfficientNetV2M,
'efficientnetv2bs': applications.efficientnet_v2.EfficientNetV2S,
}
# TODO: Check and make sure **kwargs is in the right format.
# 'weights' flag should be None, and 'input_shape' must have size 3 on the channel axis
if frames_per_batch == 1:
if input_tensor is not None:
img_input = input_tensor
else:
if input_shape:
img_input = Input(shape=input_shape)
else:
img_input = Input(shape=(None, None, 3))
else:
# using 3D data but a 2D backbone.
# TODO: why ignore input_tensor
if input_shape:
img_input = Input(shape=input_shape)
else:
img_input = Input(shape=(None, None, 3))
if use_imagenet:
kwargs_with_weights = copy.copy(kwargs)
kwargs_with_weights['weights'] = 'imagenet'
else:
kwargs['weights'] = None
if _backbone in featurenet_backbones:
if use_imagenet:
raise ValueError('A featurenet backbone that is pre-trained on '
'imagenet does not exist')
model_cls = featurenet_backbones[_backbone]
model, output_dict = model_cls(input_tensor=img_input, **kwargs)
layer_outputs = [output_dict['C1'], output_dict['C2'], output_dict['C3'],
output_dict['C4'], output_dict['C5']]
elif _backbone in vgg_backbones:
model_cls = vgg_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_names = ['block1_pool', 'block2_pool', 'block3_pool', 'block4_pool', 'block5_pool']
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in densenet_backbones:
model_cls = densenet_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
if _backbone == 'densenet121':
blocks = [6, 12, 24, 16]
elif _backbone == 'densenet169':
blocks = [6, 12, 32, 32]
elif _backbone == 'densenet201':
blocks = [6, 12, 48, 32]
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_names = ['conv1/relu'] + [f'conv{idx + 2}_block{block_num}_concat'
for idx, block_num in enumerate(blocks)]
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in resnet_backbones:
model_cls = resnet_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
if _backbone == 'resnet50':
layer_names = ['conv1_relu', 'conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out']
elif _backbone == 'resnet101':
layer_names = ['conv1_relu', 'conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out']
elif _backbone == 'resnet152':
layer_names = ['conv1_relu', 'conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out']
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in resnet_v2_backbones:
model_cls = resnet_v2_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
if _backbone == 'resnet50v2':
layer_names = ['post_relu', 'conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out']
elif _backbone == 'resnet101v2':
layer_names = ['post_relu', 'conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out']
elif _backbone == 'resnet152v2':
layer_names = ['post_relu', 'conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out']
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
# elif _backbone in resnext_backbones:
# model_cls = resnext_backbones[_backbone]
# model = model_cls(input_tensor=img_input, **kwargs)
#
# # Set the weights of the model if requested
# if use_imagenet:
# model_with_weights = model_cls(**kwargs_with_weights)
# model_with_weights.save_weights('model_weights.h5')
# model.load_weights('model_weights.h5', by_name=True)
#
# if _backbone == 'resnext50':
# layer_names = ['conv1_relu', 'conv2_block3_out', 'conv3_block4_out',
# 'conv4_block6_out', 'conv5_block3_out']
# elif _backbone == 'resnext101':
# layer_names = ['conv1_relu', 'conv2_block3_out', 'conv3_block4_out',
# 'conv4_block23_out', 'conv5_block3_out']
#
# layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in mobilenet_backbones:
model_cls = mobilenet_backbones[_backbone]
alpha = kwargs.pop('alpha', 1.0)
model = model_cls(alpha=alpha, input_tensor=img_input, **kwargs)
if _backbone.endswith('v2'):
block_ids = (2, 5, 12)
layer_names = ['expanded_conv_project_BN'] + \
['block_%s_add' % i for i in block_ids] + \
['block_16_project_BN']
else:
block_ids = (1, 3, 5, 11, 13)
layer_names = ['conv_pw_%s_relu' % i for i in block_ids]
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(alpha=alpha, **kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in nasnet_backbones:
model_cls = nasnet_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
if _backbone.endswith('large'):
block_ids = [5, 12, 18]
else:
block_ids = [3, 8, 12]
# Set the weights of the model if requested
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_names = ['stem_bn1', 'reduction_concat_stem_1']
layer_names.extend(['normal_concat_%s' % i for i in block_ids])
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in efficientnet_backbones:
model_cls = efficientnet_backbones[_backbone]
model = model_cls(input_tensor=img_input, **kwargs)
if use_imagenet:
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_names = ['block2a_expand_activation', 'block3a_expand_activation',
'block4a_expand_activation', 'block6a_expand_activation',
'top_activation']
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
elif _backbone in efficientnet_v2_backbones:
model_cls = efficientnet_v2_backbones[_backbone]
kwargs['include_preprocessing'] = False
model = model_cls(input_tensor=img_input, **kwargs)
if use_imagenet:
kwargs_with_weights['include_preprocessing'] = False
model_with_weights = model_cls(**kwargs_with_weights)
model_with_weights.save_weights('model_weights.h5')
model.load_weights('model_weights.h5', by_name=True)
layer_names = ['block1b_add', 'block2c_add',
'block4a_expand_activation', 'block6a_expand_activation',
'top_activation']
layer_outputs = [model.get_layer(name=ln).output for ln in layer_names]
else:
join = lambda x: [v for y in x for v in list(y.keys())]
backbones = join([featurenet_backbones, densenet_backbones,
resnet_backbones, resnet_v2_backbones,
vgg_backbones, nasnet_backbones,
mobilenet_backbones, efficientnet_backbones,
efficientnet_v2_backbones])
raise ValueError('Invalid value for `backbone`. Must be one of: %s' %
', '.join(backbones))
if frames_per_batch > 1:
time_distributed_outputs = []
for i, out in enumerate(layer_outputs):
td_name = f'td_{i}'
model_name = f'model_{i}'
time_distributed_outputs.append(
TimeDistributed(Model(model.input, out, name=model_name),
name=td_name)(input_tensor))
if time_distributed_outputs:
layer_outputs = time_distributed_outputs
output_dict = {f'C{i + 1}': j for i, j in enumerate(layer_outputs)}
return (model, output_dict) if return_dict else model | PypiClean |
/Flask-Inflate-0.3.tar.gz/Flask-Inflate-0.3/README.md | # Flask-Inflate
A simple flask extension to automatically decompress gzipped (compressed) request data sent by clients.
This doesn't read the whole data into memory, it just allows reading from the original data stream as it was never compressed.
If the content is not gzipped, it will remain intact.
## Install
`pip install flask-inflate`
## How to use
If you want to enable it for all requests, application wide:
```python
from flask import Flask
from flask_inflate import Inflate
app = Flask(__name__)
Inflate(app)
# or
inf = Inflate()
inf.init_app(app)
@app.route('/')
def hello_world():
return 'Hello, World!'
```
If you want to enable decompressing only for certain views:
```python
from flask import Flask
from flask_inflate import inflate
app = Flask(__name__)
@app.route('/a')
@inflate
def possibly_gzipped_content_function():
return 'I can deal with both gzipped and regular content!'
@app.route('/a')
def regular_function():
return 'I can deal only with non gzipped content!'
```
Feature requests, issues and PRs are welcome! | PypiClean |
/Claver-Message-Board-0.0.4.tar.gz/Claver-Message-Board-0.0.4/interface/settings/categories/AboutMenu.py | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf
from interface.settings.SettingsMenuTemplate import SettingsMenuTemplate
import interface.settings.Settings as settings
class AboutMenu(SettingsMenuTemplate):
__DEFAULT_CONTENT_WIDTH = 882 # ! NOTE !! - This is only true for 1280 * 720
def __init__(self, settingsManager):
""" Constructor """
super().__init__(settingsManager)
self.__headerImageFile = settings.res_dir['TEXTURES'] + "AboutHeader.png"
self.__headerImageWidth = 800
self.__headerImageHeight = 219
self.__headerImage = None
self.__portraitImageFile = settings.res_dir['TEXTURES'] + "ClaverCanvasMatthew.png"
self.__portraitImageWidth = 250
self.__portraitImageHeight = 350
self.__portraitImage = None
self.__layoutContainer = Gtk.Overlay()
self.__build_content()
def getLayoutContainer(self):
""" Accessor function: returns Gtk layout container """
return self.__layoutContainer
@SettingsMenuTemplate.callback_on_resize
def resizeAreaCallback(self):
if super().getContentAreaSize()[0] != self.__DEFAULT_CONTENT_WIDTH:
self.__DEFAULT_CONTENT_WIDTH = super().getContentAreaSize()[0]
self.__updateHeaderImage()
self.__updatePortraitImage()
def __build_content(self):
main_layer = Gtk.Grid(column_homogeneous=False, column_spacing=0, row_spacing=0)
# Header Image
self.image_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self.image_box.set_hexpand(True)
self.__headerImage = self.__constructHeaderImage(width=self.__headerImageWidth, height=self.__headerImageHeight, file=self.__headerImageFile, margin=15, hexpand=True)
self.image_box.add(self.__headerImage)
main_layer.attach(child=self.image_box, left=0, top=0, width=1, height=1) # Add label to top of grid container
# Information Area
contentBox = Gtk.Box()
contentBox.set_vexpand(True)
contentBox.set_valign(Gtk.Align.START)
contentBox.set_halign(Gtk.Align.CENTER)
contentBox.get_style_context().add_class('test')
self.__setAboutDetails(contentBox)
main_layer.attach(child=contentBox, left=0, top=1, width=1, height=1) # Add label to top of grid container
self.__layoutContainer.add(main_layer)
# Header Portrait
self.__portrait_box = Gtk.Box()
self.__portrait_box.set_halign(Gtk.Align.END)
self.__portrait_box.set_valign(Gtk.Align.START)
self.__portraitImage = self.__constructHeaderImage(width=self.__portraitImageWidth, height=self.__portraitImageHeight, file=self.__portraitImageFile, css='about-menu-portrait')
self.__portrait_box.add(self.__portraitImage)
self.__layoutContainer.add_overlay(self.__portrait_box)
def __constructHeaderImage(self, width, height, file, margin=0, css=None, hexpand=False):
buffer = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=file,
width=width,
height=height,
preserve_aspect_ratio=True)
image = Gtk.Image.new_from_pixbuf(buffer)
image.set_hexpand(hexpand)
image.set_margin_top(margin)
if css is not None:
image.get_style_context().add_class(css)
return image
def __updateHeaderImage(self):
self.image_box.remove(self.__headerImage)
self.__headerImage = self.__constructHeaderImage(width=0.9 * super().getContentAreaSize()[0], height=self.__headerImageWidth / self.__headerImageHeight * 0.9 * super().getContentAreaSize()[0], file=self.__headerImageFile, margin=15, hexpand=True)
self.image_box.add(self.__headerImage)
self.image_box.show_all()
def __updatePortraitImage(self):
self.__portrait_box.remove(self.__portraitImage)
self.__portraitImage = self.__constructHeaderImage(width=0.283 * super().getContentAreaSize()[0], height=self.__portraitImageWidth / self.__portraitImageHeight * 0.9 * super().getContentAreaSize()[0], file=self.__portraitImageFile, css='about-menu-portrait')
self.__portrait_box.add(self.__portraitImage)
self.__portrait_box.show_all()
def __setAboutDetails(self, contentBox):
# - get required info details
# - send off to convert to labels
# - add to content box
build = self.__addItemLabel("Build", settings.build_number)
contentBox.add(build)
def __addItemLabel(self, title, value):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
box.set_homogeneous(False)
cat_title = Gtk.Label()
cat_title.set_text(title)
cat_value = Gtk.Label()
cat_value.set_text(value)
cat_value.get_style_context().add_class('test-size')
box.add(cat_title)
box.add(cat_value)
return box | PypiClean |
/CubeLang-0.1.4-py3-none-any.whl/cli/entry.py | import sys
from argparse import ArgumentParser
from lark import UnexpectedCharacters
from lark.exceptions import LarkError
from .cube_builder import init_cube_args_parser, build_cube
from .error_display import ErrorsOutput
from .options import file_contents_type
from .postprocessors_builder import init_postprocessors_args_parser, build_postprocessors_chain
from ..compiler import Stack, parser
from ..compiler.errors import CompileTimeError, FunctionArgumentsError
from ..cube_runtime import CubeRuntime
from ..execution import ExecutionContext
from ..stdlib import stdlib
from .. import __version__
def main():
args_parser = ArgumentParser()
args_parser.add_argument("source", type=file_contents_type,
help="program's source")
args_parser.add_argument("-v", "--version", action="version", version="%(prog)s " + __version__)
init_cube_args_parser(args_parser)
init_postprocessors_args_parser(args_parser)
args = args_parser.parse_args()
cube, orientation = build_cube(args)
postprocessor = build_postprocessors_chain(args)
runtime = CubeRuntime(cube, orientation, postprocessor.process, postprocessor.done)
stack = Stack()
stdlib.initialize_stack(stack)
runtime.functions.initialize_stack(stack)
exec_globals = {**stdlib.exec_globals, **runtime.functions.exec_globals}
context = ExecutionContext(exec_globals)
errors = ErrorsOutput(sys.stderr, use_color=True)
try:
program = parser.parse(args.source, stack)
context.compile(program)
except UnexpectedCharacters as e:
errors.write_error(f"Unexpected character: `{args.source[e.pos_in_stream]}`", e.line - 1, e.column - 1)
errors.display_code(args.source, e.line - 1, e.column - 1, e.line - 1, e.column - 1)
return
except LarkError as e:
errors.write_error(str(e))
return
except CompileTimeError as e:
message = str(e)
errors.write_error(message, e.start_line, e.start_column)
if isinstance(e, FunctionArgumentsError):
errors.write_supplied_arguments(e.arguments)
errors.write_function_overloads(e.function_name, e.function)
end_line = e.end_line if e.end_line is not None else e.start_line
end_column = e.end_column if e.end_column is not None else e.start_column
errors.display_code(args.source, e.start_line - 1, e.start_column - 1, end_line - 1, end_column - 1)
return
# if not pycode:
context.execute(errors)
runtime.finished() | PypiClean |
/Aruana-1.1.1-py3-none-any.whl/aruana/stopwords.py | english = ['i',
'me',
'my',
'myself',
'we',
'our',
'ours',
'ourselves',
'you',
'your',
'yours',
'yourself',
'yourselves',
'he',
'him',
'his',
'himself',
'she',
'her',
'hers',
'herself',
'it',
'its',
'itself',
'they',
'them',
'their',
'theirs',
'themselves',
'what',
'which',
'who',
'whom',
'this',
'that',
'these',
'those',
'am',
'is',
'are',
'was',
'were',
'be',
'been',
'being',
'have',
'has',
'had',
'having',
'do',
'does',
'did',
'doing',
'a',
'an',
'the',
'and',
'but',
'if',
'or',
'because',
'as',
'until',
'while',
'of',
'at',
'by',
'for',
'with',
'about',
'against',
'between',
'into',
'through',
'during',
'before',
'after',
'above',
'below',
'to',
'from',
'up',
'down',
'in',
'out',
'on',
'off',
'over',
'under',
'again',
'further',
'then',
'once',
'here',
'there',
'when',
'where',
'why',
'how',
'all',
'any',
'both',
'each',
'few',
'more',
'most',
'other',
'some',
'such',
'no',
'nor',
'not',
'only',
'own',
'same',
'so',
'than',
'too',
'very',
's',
't',
'can',
'will',
'just',
'don',
'should',
'now']
"""Holds known English stop words."""
french = ['a',
'abord',
'absolument',
'afin',
'ah',
'ai',
'aie',
'aient',
'aies',
'ailleurs',
'ainsi',
'ait',
'allaient',
'allo',
'allons',
'allô',
'alors',
'anterieur',
'anterieure',
'anterieures',
'apres',
'après',
'as',
'assez',
'attendu',
'au',
'aucun',
'aucune',
'aucuns',
'aujourd',
'aujourd\'hui',
'aupres',
'auquel',
'aura',
'aurai',
'auraient',
'aurais',
'aurait',
'auras',
'aurez',
'auriez',
'aurions',
'aurons',
'auront',
'aussi',
'autre',
'autrefois',
'autrement',
'autres',
'autrui',
'aux',
'auxquelles',
'auxquels',
'avaient',
'avais',
'avait',
'avant',
'avec',
'avez',
'aviez',
'avions',
'avoir',
'avons',
'ayant',
'ayez',
'ayons',
'b',
'bah',
'bas',
'basee',
'bat',
'beau',
'beaucoup',
'bien',
'bigre',
'bon',
'boum',
'bravo',
'brrr',
'c',
'car',
'ce',
'ceci',
'cela',
'celle',
'celle-ci',
'celle-là',
'celles',
'celles-ci',
'celles-là',
'celui',
'celui-ci',
'celui-là',
'celà',
'cent',
'cependant',
'certain',
'certaine',
'certaines',
'certains',
'certes',
'ces',
'cet',
'cette',
'ceux',
'ceux-ci',
'ceux-là',
'chacun',
'chacune',
'chaque',
'cher',
'chers',
'chez',
'chiche',
'chut',
'chère',
'chères',
'ci',
'cinq',
'cinquantaine',
'cinquante',
'cinquantième',
'cinquième',
'clac',
'clic',
'combien',
'comme',
'comment',
'comparable',
'comparables',
'compris',
'concernant',
'contre',
'couic',
'crac',
'd',
'da',
'dans',
'de',
'debout',
'dedans',
'dehors',
'deja',
'delà',
'depuis',
'dernier',
'derniere',
'derriere',
'derrière',
'des',
'desormais',
'desquelles',
'desquels',
'dessous',
'dessus',
'deux',
'deuxième',
'deuxièmement',
'devant',
'devers',
'devra',
'devrait',
'different',
'differentes',
'differents',
'différent',
'différente',
'différentes',
'différents',
'dire',
'directe',
'directement',
'dit',
'dite',
'dits',
'divers',
'diverse',
'diverses',
'dix',
'dix-huit',
'dix-neuf',
'dix-sept',
'dixième',
'doit',
'doivent',
'donc',
'dont',
'dos',
'douze',
'douzième',
'dring',
'droite',
'du',
'duquel',
'durant',
'dès',
'début',
'désormais',
'e',
'effet',
'egale',
'egalement',
'egales',
'eh',
'elle',
'elle-même',
'elles',
'elles-mêmes',
'en',
'encore',
'enfin',
'entre',
'envers',
'environ',
'es',
'essai',
'est',
'et',
'etant',
'etc',
'etre',
'eu',
'eue',
'eues',
'euh',
'eurent',
'eus',
'eusse',
'eussent',
'eusses',
'eussiez',
'eussions',
'eut',
'eux',
'eux-mêmes',
'exactement',
'excepté',
'extenso',
'exterieur',
'eûmes',
'eût',
'eûtes',
'f',
'fais',
'faisaient',
'faisant',
'fait',
'faites',
'façon',
'feront',
'fi',
'flac',
'floc',
'fois',
'font',
'force',
'furent',
'fus',
'fusse',
'fussent',
'fusses',
'fussiez',
'fussions',
'fut',
'fûmes',
'fût',
'fûtes',
'g',
'gens',
'h',
'ha',
'haut',
'hein',
'hem',
'hep',
'hi',
'ho',
'holà',
'hop',
'hormis',
'hors',
'hou',
'houp',
'hue',
'hui',
'huit',
'huitième',
'hum',
'hurrah',
'hé',
'hélas',
'i',
'ici',
'il',
'ils',
'importe',
'j',
'je',
'jusqu',
'jusque',
'juste',
'k',
'l',
'la',
'laisser',
'laquelle',
'las',
'le',
'lequel',
'les',
'lesquelles',
'lesquels',
'leur',
'leurs',
'longtemps',
'lors',
'lorsque',
'lui',
'lui-meme',
'lui-même',
'là',
'lès',
'm',
'ma',
'maint',
'maintenant',
'mais',
'malgre',
'malgré',
'maximale',
'me',
'meme',
'memes',
'merci',
'mes',
'mien',
'mienne',
'miennes',
'miens',
'mille',
'mince',
'mine',
'minimale',
'moi',
'moi-meme',
'moi-même',
'moindres',
'moins',
'mon',
'mot',
'moyennant',
'multiple',
'multiples',
'même',
'mêmes',
'n',
'na',
'naturel',
'naturelle',
'naturelles',
'ne',
'neanmoins',
'necessaire',
'necessairement',
'neuf',
'neuvième',
'ni',
'nombreuses',
'nombreux',
'nommés',
'non',
'nos',
'notamment',
'notre',
'nous',
'nous-mêmes',
'nouveau',
'nouveaux',
'nul',
'néanmoins',
'nôtre',
'nôtres',
'o',
'oh',
'ohé',
'ollé',
'olé',
'on',
'ont',
'onze',
'onzième',
'ore',
'ou',
'ouf',
'ouias',
'oust',
'ouste',
'outre',
'ouvert',
'ouverte',
'ouverts',
'o|',
'où',
'p',
'paf',
'pan',
'par',
'parce',
'parfois',
'parle',
'parlent',
'parler',
'parmi',
'parole',
'parseme',
'partant',
'particulier',
'particulière',
'particulièrement',
'pas',
'passé',
'pendant',
'pense',
'permet',
'personne',
'personnes',
'peu',
'peut',
'peuvent',
'peux',
'pff',
'pfft',
'pfut',
'pif',
'pire',
'pièce',
'plein',
'plouf',
'plupart',
'plus',
'plusieurs',
'plutôt',
'possessif',
'possessifs',
'possible',
'possibles',
'pouah',
'pour',
'pourquoi',
'pourrais',
'pourrait',
'pouvait',
'prealable',
'precisement',
'premier',
'première',
'premièrement',
'pres',
'probable',
'probante',
'procedant',
'proche',
'près',
'psitt',
'pu',
'puis',
'puisque',
'pur',
'pure',
'q',
'qu',
'quand',
'quant',
'quant-à-soi',
'quanta',
'quarante',
'quatorze',
'quatre',
'quatre-vingt',
'quatrième',
'quatrièmement',
'que',
'quel',
'quelconque',
'quelle',
'quelles',
'quelqu\'un',
'quelque',
'quelques',
'quels',
'qui',
'quiconque',
'quinze',
'quoi',
'quoique',
'r',
'rare',
'rarement',
'rares',
'relative',
'relativement',
'remarquable',
'rend',
'rendre',
'restant',
'reste',
'restent',
'restrictif',
'retour',
'revoici',
'revoilà',
'rien',
's',
'sa',
'sacrebleu',
'sait',
'sans',
'sapristi',
'sauf',
'se',
'sein',
'seize',
'selon',
'semblable',
'semblaient',
'semble',
'semblent',
'sent',
'sept',
'septième',
'sera',
'serai',
'seraient',
'serais',
'serait',
'seras',
'serez',
'seriez',
'serions',
'serons',
'seront',
'ses',
'seul',
'seule',
'seulement',
'si',
'sien',
'sienne',
'siennes',
'siens',
'sinon',
'six',
'sixième',
'soi',
'soi-même',
'soient',
'sois',
'soit',
'soixante',
'sommes',
'son',
'sont',
'sous',
'souvent',
'soyez',
'soyons',
'specifique',
'specifiques',
'speculatif',
'stop',
'strictement',
'subtiles',
'suffisant',
'suffisante',
'suffit',
'suis',
'suit',
'suivant',
'suivante',
'suivantes',
'suivants',
'suivre',
'sujet',
'superpose',
'sur',
'surtout',
't',
'ta',
'tac',
'tandis',
'tant',
'tardive',
'te',
'tel',
'telle',
'tellement',
'telles',
'tels',
'tenant',
'tend',
'tenir',
'tente',
'tes',
'tic',
'tien',
'tienne',
'tiennes',
'tiens',
'toc',
'toi',
'toi-même',
'ton',
'touchant',
'toujours',
'tous',
'tout',
'toute',
'toutefois',
'toutes',
'treize',
'trente',
'tres',
'trois',
'troisième',
'troisièmement',
'trop',
'très',
'tsoin',
'tsouin',
'tu',
'té',
'u',
'un',
'une',
'unes',
'uniformement',
'unique',
'uniques',
'uns',
'v',
'va',
'vais',
'valeur',
'vas',
'vers',
'via',
'vif',
'vifs',
'vingt',
'vivat',
'vive',
'vives',
'vlan',
'voici',
'voie',
'voient',
'voilà',
'vont',
'vos',
'votre',
'vous',
'vous-mêmes',
'vu',
'vé',
'vôtre',
'vôtres',
'w',
'x',
'y',
'z',
'zut',
'à',
'â',
'ça',
'ès',
'étaient',
'étais',
'était',
'étant',
'état',
'étiez',
'étions',
'été',
'étée',
'étées',
'étés',
'êtes',
'être',
'ô']
"""Holds known French stop words."""
portuguese = ['de',
'a',
'o',
'que',
'ue',
'oi',
'rt',
'e',
'do',
'da',
'em',
'um',
'ai',
'pra',
'para',
'com',
'uma',
'os',
'no',
'se',
'na',
'por',
'mais',
'as',
'pq',
'dos',
'como',
'mas',
'ao',
'ele',
'das',
'à',
'seu',
'sua',
'ou',
'quando',
'muito',
'nos',
'já',
'ja',
'eu',
'também',
'tambem',
'só',
'so',
'pelo',
'pela',
'pelos',
'pelas',
'até',
'ate',
'isso',
'ela',
'entre',
'depois',
'sem',
'mesmo',
'aos',
'seus',
'quem',
'nas',
'me',
'esse',
'eles',
'você',
'voce',
'essa',
'num',
'nem',
'suas',
'meu',
'às',
'minha',
'numa',
'elas',
'qual',
'nós',
'lhe',
'deles',
'essas',
'esses',
'este',
'dele',
'tu',
'q',
'te',
'vocês',
'voces',
'vos',
'lhes',
'meus',
'minhas',
'teu',
'tua',
'teus',
'tuas',
'nosso',
'nossa',
'nossos',
'nossas',
'dela',
'delas',
'esta',
'estes',
'estas',
'aquele',
'aquela',
'aqueles',
'aquelas',
'isto',
'aquilo',
'estou',
'está',
'estamos',
'estão',
'estao',
'estive',
'esteve',
'estivemos',
'estiveram',
'estava',
'estávamos',
'estavamos',
'estavam',
'estivera',
'estivéramos',
'estiveramos',
'esteja',
'estejamos',
'estejam',
'estivesse',
'estivéssemos',
'estivessemos',
'estivessem',
'estiver',
'estivermos',
'estiverem',
'hei',
'há',
'ha',
'havemos',
'hão',
'hao',
'houve',
'houvemos',
'houveram',
'houvera',
'houvéramos',
'houveramos',
'haja',
'hajamos',
'hajam',
'houvesse',
'houvéssemos',
'houvessem',
'houver',
'houvermos',
'houverem',
'houverei',
'houverá',
'houveremos',
'houverão',
'houveria',
'houveríamos',
'houveriam',
'sou',
'somos',
'são',
'é',
'era',
'éramos',
'eram',
'fui',
'foi',
'fomos',
'foram',
'fora',
'fôramos',
'seja',
'sejamos',
'sejam',
'fosse',
'fôssemos',
'fossem',
'for',
'formos',
'forem',
'serei',
'será',
'seremos',
'serão',
'seria',
'seríamos',
'seriam',
'tenho',
'tem',
'têm',
'temos',
'tém',
'tinha',
'tínhamos',
'tinham',
'tive',
'teve',
'tivemos',
'tiveram',
'tivera',
'tivéramos',
'tenha',
'tenhamos',
'tenham',
'tivesse',
'tivéssemos',
'tivessem',
'tiver',
'tivermos',
'tiverem',
'terei',
'terá',
'teremos',
'terão',
'teria',
'teríamos',
'tava',
'tavam',
'tô',
'tá',
'to',
'ta',
'teriam',
'nele',
'nela',
'neles',
'nelas',
'vc']
"""Holds known Portuguese stop words.""" | PypiClean |
/Code_groupe1_laguilhon-0.0.1.tar.gz/Code_groupe1_laguilhon-0.0.1/src/Code/Algo_main_CelebA_vf.py | import numpy as np
import matplotlib
#mac
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
from skimage import io, transform
from skimage.transform import resize
import os, time, sys, json, glob
import csv
import cv2
import math, random
from importlib import reload
import tensorflow as tf
from tensorflow import keras
from keras.models import Model
from keras.callbacks import TensorBoard
from keras import layers
#Path
import pathlib
path=str(pathlib.Path(__file__).parent.resolve().parent.resolve())
import image_celeba as ic
# DOWNLOAD THE DATASET
dataset_img=path+'/f_annexes/img_align_celeba_607'
# DOWNLOAD THE ATTRIBUTES OF THE IMAGES
def load_attr():
"""
Create the liste of filesnames and sexe of the images
output :
sexe (list) :
To use just some images at the start and not all images
filesnames (list) :
To give numerotation to the images
"""
db=pd.read_csv(path+'/f_annexes/new_list_attr_celba.csv',sep=",",usecols=['nb_pic','Male'],low_memory=False)
file=open(path+"/f_annexes/filesnames607.txt", "r")
filesnames=[]
for line in file :
new=line.split('\n')
filesnames.append(new[0])
sexe=[]
for i in range(len(filesnames)):
sexe.append(int(db[db.nb_pic==filesnames[i]]["Male"]))
return(list(sexe),filesnames)
def import_images(sexe,filesnames,nb_images = 607,start = 0):
"""
Import images of the dataset. Import nb_images, strat with the images number start and take the same number of male and female.
input :
nb_images (int) :
Number of images that we will use
start (int) :
To use just some images at the start and not all images
sexe (list) :
To know the sexe presented on the images
filesnames (list) :
To give numerotation to the images
output :
x_data (numpy array) :
List of the images as np.array
"""
dataset_img=path+'/f_annexes/img_align_celeba_607'
x=[]
compt_Male=0
compt_Female=0
for i in range(start,start+nb_images):
# take into account the sexe to have the same number of male and female
if sexe[i]==1:
compt_Male+=1
if compt_Male<=nb_images/2:
image=io.imread(f'{dataset_img}/{filesnames[i]}') #download image
image_resize=resize(image,(128,128)) #resize picture with size 128x128
x.append(image_resize)
else :
compt_Female+=1
if compt_Female<=nb_images/2:
image=io.imread(f'{dataset_img}/{filesnames[i]}') #download image
image_resize=resize(image,(128,128)) #resize picture with size 128x128
x.append(image_resize)
x_data=np.array(x) #transform list into numpy array
x=None
return(x_data)
# DOWNLOAD AUTOENCODER
autoencodeur=tf.keras.models.load_model(path+'/f_annexes/autoencodeurFLATTEN4.tf')
decodeur=autoencodeur.decoder
encodeur=autoencodeur.encoder
# ALGO GENETIQUE
def mutation_function_flatten(vec, lap) :
"""
It includes modifications into one of the selected vectors (so into an image in the latent space),
in order to move in the latent space. Thus creating some mutations to the previous face.
The modification are random but depend of the lap : the more the lap is small, the more there are mutations
input :
vec (np.array) :
One vector (that represents a face in the latent space)
lap (int) :
The number of laps already completed vector
output :
vec (np.array) :
The vector after modifications
"""
new_vec = vec.copy()
n = len(vec)
new_vec += np.random.randn(n).reshape(new_vec.shape)*(30-lap)*0.6
return new_vec
def mutations_test(x_data) :
"""
This method allow to check if the mutation_function_flatten works well.
We plot an original face (randomly selected), then we plot its version after the VAE
with which we will compare the result of the mutation_function_flatten.
"""
i = random.randint(0,len(x_data))
print("Num of the selected face : ", i)
plt.figure(figsize=(10,2))
img = x_data[i]
plt.imshow(tf.squeeze(img))
plt.title("origine")
encoded_imgs=encodeur(x_data)
img_2 = encoded_imgs[i]
P = np.array([img_2])
img_2 = decodeur.predict(P)
plt.figure(figsize=(10,2))
plt.imshow(tf.squeeze(img_2))
plt.title("reconstructed")
P_mut = np.array([mutation_function_flatten(P[0],lap = 0)])
plt.figure(figsize=(10,2))
img_3 = decodeur.predict(P_mut)
plt.imshow(tf.squeeze(img_3))
plt.title("mutation lap = 0")
P_mut2 = np.array([mutation_function_flatten(P[0],lap = 25)])
plt.figure(figsize=(10,2))
img_4 = decodeur.predict(P_mut2)
plt.imshow(tf.squeeze(img_4))
plt.title("mutation lap = 25")
plt.show()
# mutations_test(x_data)
# The face decoded is not well, so with the mutations, it does not look like a precised face.
# Nevertheless, we can see that the mutations according to the loop (lap) works quite well :
# there are much more mutations at lap = 0 than at lap = 25, that means that whenthe more the user use the software
# the more we reduce the possibilities of variations.
def crossing_over_function_flatten(r, selected_pop, lap) :
"""
It includes crossing over in one of the selected vectors, in order to move in the latent space
The crossing over is the exchange of some 'points' of the latent space between some vectors
It depends of the selected vectors and of the lap
input :
r (int) :
The indice of the selected vector (of the list selected_pop) with which we willd do the crossing-over
selected_pop (np.array) :
The vectors selected
lap (int) :
The number of laps already completed
output :
new_pop[r] (np.array) :
The selected vector after the crossing over
"""
new_pop = np.copy(selected_pop)
shape = new_pop.shape
if (shape[0] == 1) :
print("Pas de crossing-over")
else :
for indc in range(0,shape[0]) : # each loop corresponds to a crossing over with the vector at indice "indc" in the selected_pop
if indc != r :
if lap < 10 :
i = 0
j = int(shape[1]/6) ; print("j",j)
for l in range(0, 3) :
# Positions in the selected vector (= selected_pop[r]) that will be exchanged :
pos_x1 = random.randint(i, i + j -1)
pos_x2 = random.randint(i, i + j -1)
# Values of the selected vector that will be exchanged with the vector at indice "indc" :
v_selected = selected_pop[r][min(pos_x1,pos_x2):max(pos_x1,pos_x2) + 1]
# We define a new vector cp (for copy) that takes all the values that will be exchanged (v_selected) of the vector at indice "indc"
cp = selected_pop[indc][min(pos_x1,pos_x2):max(pos_x1,pos_x2) + 1]
# We replace the values v_selected of the selected vector by the values cp of the vector at indice "indc"
new_pop[r][min(pos_x1,pos_x2):max(pos_x1,pos_x2) + 1] = cp
i += j ; print("i", i)
else :
pos_x1 = random.randint(0, shape[1]-1)
pos_x2 = random.randint(0, shape[1]-1)
cp = selected_pop[indc][min(pos_x1,pos_x2):max(pos_x1,pos_x2) + 1]
new_pop[r][min(pos_x1,pos_x2):max(pos_x1,pos_x2) + 1] = cp
return new_pop[r]
def initialisation_Liste_5_premiers():
"""
Create a list of the initial images for x_data that will be printed in the
screen and a list of the same images but into their encoded form (so as vectors)
input :
output :
List_images (list(np.array)) :
List of the initial images as object "image_celeba"
"""
encoded_imgs=encodeur(x_data) # We use the encoded images (so the images in the latent space)
initial_img=[] # the nb_faces first images used and then presented to the user are selected randoml among the nb_images selected encoded
list_img_non_encoded =[]
for i in range(nb_faces):
r=random.randint(0,len(encoded_imgs))
initial_img.append(encoded_imgs[r])
list_img_non_encoded.append(x_data[r])
P=np.array(initial_img)
List_images=saving_images_and_getting_list_initial(P, list_img_non_encoded)
return List_images
def saving_images_and_getting_list_initial(P, list_img_non_encoded):
"""
Save the initial images passed in list_img_non_encoded in .jpg and return the
list of the same images as object "image_celeba"
input :
P (np.array) : the initial images into their encoded form
list_img_non_encoded (list) : the initial images into their form before the encoder
output :
List_images (list(np.array)) : list of the initial images as object "image_celeba"
"""
List_images=[]
plt.figure(figsize=(20, 4))
for i in range(nb_faces):
##### Save the images into format jpg
# Display reconstruction
plt.clf()
ax = plt.subplot(2, nb_faces, i + 1 + nb_faces)
plt.imshow(tf.squeeze(list_img_non_encoded[i]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
jpg_name=path+"/IMG/Celeb_"+str(i)+".jpg"
plt.imsave(jpg_name, list_img_non_encoded[i])
im=ic.image_celeba(jpg_name, i,P[i],P)
List_images.append(im)
return List_images
def saving_images_and_getting_list(P):
"""
Save the selected images, passed into their encoded form in P, and return the
list of the same images as object "image_celeba"
input :
P (np.array) : the selected images into their encoded form
output :
List_images (list(np.array)) : list of the selected images as object "image_celeba"
"""
List_images=[]
decoded_imgs = decodeur.predict(P)
plt.figure(figsize=(20, 4))
for i in range(nb_faces):
##### Save the images into format jpg
# Display reconstruction
plt.clf()
ax = plt.subplot(2, nb_faces, i + 1 + nb_faces)
plt.imshow(tf.squeeze(decoded_imgs[i]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
jpg_name=path+"/IMG/Celeb_"+str(i)+".jpg"
plt.imsave(jpg_name, decoded_imgs[i])
im=ic.image_celeba(jpg_name, i,P[i],P)
List_images.append(im)
return List_images
def generate_5_new_photos (img_list, img_number_choosen,lap):
"""
Generated new nb_faces images, into their encoded, and return the
list of the same images as object "image_celeba"
input :
img_list (list(image_celeba)) : list of the images as object "image_celeba" selected by the user
img_number_choosen (list(int)) : indices of the selected images in the previous P list
lap (int) : the number of laps already completed vector
output :
List_images (list(np.array)) : list of the selected images as object "image_celeba"
"""
P=img_list[0].P # the list of the faces, into their encoded form, that were printed in the previous lap
sorted_P = [] # the list of the faces, into their encoded form, that the will be selected by the user
for a_number in img_number_choosen :
sorted_P.append(P[a_number]) # we add the encoded form of the selected face. So now in sorted_P we have the vectors selected by the user
l = len(sorted_P)
print("You have selected : ",l, " faces.") # number of images selected by the user
sorted_P = np.array(sorted_P) # # transformation of the list of the selected vectors (= selected images into their encoded form) into a np.array
new_P = np.copy(P)
for i in range(nb_faces) : # for each new face that the program will create the program apply the genetic algorithm
r = random.randint(0, l-1) ; print("r : ",r) # selection of a selected vector (so one of the encoded images in the latent space),
new_P[i] = mutation_function_flatten(sorted_P[r], lap) # the program does modifications on the selected vector
new_P[i] = crossing_over_function_flatten(r, sorted_P, lap) # the program does crossing-over between with the selected vectors
P = new_P # the program replace the old images (into their encoded form) by the new images form
lap += 1
print("lap=", lap)
#saving the new photos
List_new_images=saving_images_and_getting_list(P)
return List_new_images
##### MAIN ######
# Parameters for initialisation (outisde functions)
nb_faces = 5 # nb of faces that we want print at each iteration
lap = 0 # lap counts the iterations
C = True # C is the condition to continue or not the iterations (if C == False : it is the end of the program)
a = "{}".format(nb_faces) # string version of the number of faces
#nb_images = 1000 # number of images that we will use for the program
sexes, filenames = load_attr() # download of information about the sexe of the people of the image and the name of the image
# start = random.randint(0,len(filenames) - 1001) # the position of the first image in the dataset (to have different faces at each use of the program)
start = 0 # by default
nb_images = 607
x_data = import_images(sexes, filenames, nb_images, start) # download of the images that we will use | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/form/manager/_ValueMixin.js.uncompressed.js | define("dojox/form/manager/_ValueMixin", [
"dojo/_base/lang",
"dojo/_base/kernel",
"dojo/_base/declare"
], function(lang, dojo, declare){
return declare("dojox.form.manager._ValueMixin", null, {
// summary:
// Form manager's mixin for getting/setting form values in the unified manner.
// description:
// This mixin adds unified access to form widgets and form elements
// in terms of name-value regardless of the underlying type of
// an element. It should be used together with dojox.form.manager.Mixin.
elementValue: function(name, value){
// summary:
// Set or get a form widget/element or an attached point node by name.
// name: String:
// The name.
// value: Object?:
// Optional. The value to set.
if(name in this.formWidgets){
return this.formWidgetValue(name, value); // Object
}
if(this.formNodes && name in this.formNodes){
return this.formNodeValue(name, value); // Object
}
return this.formPointValue(name, value); // Object
},
gatherFormValues: function(names){
// summary:
// Collect form values.
// names: Object?:
// If it is an array, it is a list of names of form elements to be collected.
// If it is an object, dictionary keys are names to be collected.
// If it is omitted, all known form elements are to be collected.
var result = this.inspectFormWidgets(function(name){
return this.formWidgetValue(name);
}, names);
if(this.inspectFormNodes){
lang.mixin(result, this.inspectFormNodes(function(name){
return this.formNodeValue(name);
}, names));
}
lang.mixin(result, this.inspectAttachedPoints(function(name){
return this.formPointValue(name);
}, names));
return result; // Object
},
setFormValues: function(values){
// summary:
// Set values to form elements
// values: Object:
// A dictionary of key-value pairs.
if(values){
this.inspectFormWidgets(function(name, widget, value){
this.formWidgetValue(name, value);
}, values);
if(this.inspectFormNodes){
this.inspectFormNodes(function(name, node, value){
this.formNodeValue(name, value);
}, values);
}
this.inspectAttachedPoints(function(name, node, value){
this.formPointValue(name, value);
}, values);
}
return this;
}
});
}); | PypiClean |
/GoogleCalendarV3-0.1.7.tar.gz/GoogleCalendarV3-0.1.7/google_calendar_v3/google_calendar_v3.py | __author__ = 'static@siftcal.com (Ashutosh Priyadarshy)'
from requests_oauthlib import OAuth2Session
class GoogleCalendarAPI(object):
def __init__(self, client_id=None, client_secret=None,
acc_token=None, ref_token=None, expires_in=None,
token_updater=None):
"""Construct a new authenticated instance of GoogleCalendarAPI V3.
:param client_id: Client Id obtained in application creation.
:param client_secret: Client Secret obtained in application creation.
:param access_token: Token obtained via standard OAuth2 flow.
:param refresh_token: Additional token obtained via standard OAuth2 flow.
:param expires_in: Time until access_token expires.
:param auto_refresh_url: HTTP endpoint to request new access token on refresh.
:param token_updater: Method with one argument, token, to be used to update
your token database on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session (requests.session) constructor.
"""
self.refresh_url = u'https://accounts.google.com/o/oauth2/token'
self.base_url = u'https://www.googleapis.com/calendar/v3/'
self.client_id = client_id
self.client_secret = client_secret
self.acc_token = acc_token
self.ref_token = ref_token
self.expires_in = expires_in
token_dict = self.__construct_token_dictionary()
refresh_dict = self.__construct_refresh_dictionary()
self.session = OAuth2Session(client_id, token=token_dict,
auto_refresh_url=self.refresh_url,
auto_refresh_kwargs=refresh_dict,
token_updater=token_updater)
def __repr__(self):
return u'<GoogleCalendarAPI Instance>'
# Parameter constructors.
def __construct_token_dictionary(self):
return {u'access_token': self.acc_token,
u'refresh_token': self.ref_token,
u'token_type': u'Bearer',
u'expires_in': self.expires_in}
def __construct_refresh_dictionary(self):
return {u'client_id':self.client_id,
u'client_secret':self.client_secret}
# URL Construction helpers.
def __events_exturl_calendar_id(self, calendar_id):
return self.base_url + u'calendars/{calendarId}/events/'.format(calendarId=calendar_id)
def __events_exturl_calendar_id_event_id(self, calendar_id, event_id):
return self.__events_exturl_calendar_id(calendar_id) + u'{eventId}/'.format(eventId=event_id)
def __calendar_list_base_url(self):
return self.base_url + u'users/me/calendarList/'
def __calendar_list_ext_url_calendar_id(self, calendar_id):
return self.base_url + u'users/me/calendarList/{calendarId}/'.format(calendarId=calendar_id)
def __calendars_base_url(self):
return self.base_url + u'calendars/'
def __calendars_ext_url_calendar_id(self, calendar_id):
self.__calendars_base_url() + u'{calendarId}/'.format(calendarId=calendar_id)
def __settings_base_url(self):
return self.base_url + u'users/me/settings/'
def __acl_base_url(self, calendar_id):
return self.base_url + u'calendars/{calendarId}/acl/'.format(calendarId=calendar_id)
def __acl_ext_url_rule_id(self, calendar_id, rule_id):
return __acl_base_url(calendar_id) + u'{ruleId}/'.format(ruleId=rule_id)
# Acl Resource Calls.
def acl_delete(self, calendar_id, rule_id):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.delete(url)
def acl_get(self, calendar_id, rule_id):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.get(url)
def acl_insert(self, calendar_id, body):
url = self.__acl_base_url(calendar_id)
return self.session.post(url, data=body)
def acl_list(self, calendar_id, **kwargs):
url = self.__acl_base_url(calendar_id)
return self.session.get(url, {u'params':kwargs})
def acl_patch(self, calendar_id, rule_id, body):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.patch(url, data=body)
def acl_update(self, calendar_id, rule_id, body):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.put(url, data=body)
def acl_watch(self, calendarId, body):
url = self.__acl_base_url(calendar_id) + u'watch/'
return self.session.post(url, data=body)
# CalendarList Resource Calls.
def calendar_list_delete(self, calendar_id):
url = __calendar_list_ext_url_calendar_id(calendar_id)
return self.session.delete(url)
def calendar_list_get(self, calendar_id):
url = self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.get(url)
def calendar_list_insert(self, body, **kwargs):
url = self.__calendar_list_base_url()
return self.session.post(url, data=body, **{'params':kwargs})
def calendar_list_list(self, **kwargs):
url = self.__calendar_list_base_url()
return self.session.get(url, **{'params':kwargs})
def calendar_list_patch(self, body, **kwargs):
url =self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.patch(url, data=body, **{'params':kwargs})
def calendar_list_update(self, body, **kwargs):
url = self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.put(url, data=body, **{'params':kwargs})
def calendar_list_watch(self, body):
url = self.__calendar_list_base_url() + u'watch/'
return self.session.post(url, data=body)
# Calendars Resource Calls.
def calendars_clear(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id) + u'clear/'
return self.session.post(url)
def calendars_delete(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.delete(url)
def calendars_get(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.get(url)
def calendars_insert(self, body):
url = self.__calendars_base_url()
return self.session.post(url, data=body)
def calendars_patch(self, calendar_id, body):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.patch(url, data=body)
def calendars_update(self, calendar_id, body):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.put(url, data=body)
# Colors Resource Calls.
def colors_get(self):
url = self.base_url + u'colors/'
return self.session.get(url)
# Events Resource Calls.
def events_delete(self, calendar_id, event_id, **kwargs):
url = self. __events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.delete(url, **{'params':kwargs})
def events_get(self, calendar_id, event_id, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.get(url, **{'params':kwargs})
def events_import(self, calendar_id, body):
url = self.__events_exturl_calendar_id(calendar_id) + u'import/'
return self.session.post(url, data=body, **{'params':kwargs})
def events_insert(self, calendar_id, body, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id)
return self.session.post(url, data=body, **{'params':kwargs})
def events_instances(self, calendar_id, event_id, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id) + u'instances/'
return self.session.get(url, **{'params':kwargs})
def events_list(self, calendar_id, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id)
return self.session.get(url, **{'params':kwargs})
def events_move(self, calendar_id, event_id, destination, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id) + u'move/'
kwargs[u'destination'] = destination # Add
return self.session.post(url, data=body, **{'params':kwargs})
def events_patch(self, calendar_id, event_id, body, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.patch(url, data=body, **{'params':kwargs})
def events_quick_add(self, calendar_id, text, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id) + u'quickAdd/'
kwargs[u'text'] = text
return self.session.post(url, **{'params':kwargs})
def events_update(self, calendar_id, event_id, body, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.put(url, data=body, **{'params':kwargs})
def events_watch(self, calendar_id, body):
url = self.__events_exturl_calendar_id(calendar_id) + u'watch/'
return self.session.post(url, data=body)
# Freebusy Resource Calls.
def freebusy_query(self, body):
url = self.base_url + u'freeBusy/'
return self.session.post(url, data=body)
# Settings Resource Calls.
def settings_get(self, setting):
url = self.__settings_base_url() + u'{setting}/'.format(setting=setting)
return self.session.get(url)
def settings_list(self, **kwargs):
url = self.__settings_base_url()
return self.session.get(url, **{u'params':kwargs})
def settings_watch(self, body):
url = self.__settings_base_url + u'watch/'
return self.session.post(url, data=body)
# Channels Resource Calls.
def channels_stop(self, body):
url = self.base_url + u'channels/stop/'
return self.session.post(url, data=body) | PypiClean |
/BlazeWeb-0.6.2-py3-none-any.whl/blazeweb/templating/jinja.py | from __future__ import with_statement
from __future__ import absolute_import
import logging
from os import path
from jinja2 import Environment, TemplateNotFound, BaseLoader, \
Template as j2Template, contextfilter
from jinja2.utils import Markup
from blazeweb.globals import settings
from blazeweb.hierarchy import FileNotFound, findfile, split_endpoint
import blazeweb.templating as templating
import six
log = logging.getLogger(__name__)
class _RootRenderWrapper(object):
def __init__(self, tpl_name, root_render_func):
self.tpl_name = tpl_name
self.root_render_func = root_render_func
def __call__(self, context):
endpoint_stack = context.get('__TemplateContent.endpoint_stack', [])
endpoint_stack.append(self.tpl_name)
for event in self.root_render_func(context):
yield event
endpoint_stack.pop()
def __eq__(self, other):
"""
when super() is used in a block, jinja's implimentation looks
for the original render func in a list using list.index(), so we
need to make our function look like the original for the equals
operator
"""
return other == self.root_render_func
class Template(j2Template):
@classmethod
def _from_namespace(cls, environment, namespace, globals):
# wrap the main root_render_func to track the name of the template
# that is being rendered
namespace['root'] = _RootRenderWrapper(namespace['name'], namespace['root'])
# also wrap the root rendering function for each of this template's
# blocks, otherwise our include functions will not calculate the current
# template's name correctly when inside a block that is replacing the
# the block of a parent template
for block_name, block_root_render_func in six.iteritems(namespace['blocks']):
namespace['blocks'][block_name] = _RootRenderWrapper(
namespace['name'], block_root_render_func
)
return j2Template._from_namespace(environment, namespace, globals)
class Translator(templating.EngineBase):
def __init__(self):
self.env = Environment(
loader=self.create_loader(),
**self.get_settings()
)
self.env.template_class = Template
self.init_globals()
self.init_filters()
def create_loader(self):
return HierarchyLoader()
def get_settings(self):
def guess_autoescape(template_name):
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('html', 'htm', 'xml')
jsettings = settings.jinja
if isinstance(jsettings.autoescape, (list, tuple)):
jsettings.autoescape = guess_autoescape
return jsettings.todict()
def init_globals(self):
self.env.globals.update(self.get_globals())
def init_filters(self):
filters = self.get_filters()
filters['content'] = content_filter
self.env.filters.update(filters)
def render_template(self, endpoint, context):
self.update_context(context)
return self.env.get_template(endpoint).render(context)
def render_string(self, string, context):
return self.env.from_string(string).render(context)
def mark_safe(self, value):
""" when a template has auto-escaping enabled, mark a value as safe """
return Markup(value)
class HierarchyLoader(BaseLoader):
"""
A modification of Jinja's FileSystemLoader to take into account
the hierarchy.
"""
def __init__(self, encoding=settings.default.charset):
self.encoding = encoding
def find_template_path(self, endpoint):
# try module level first
try:
component, template = split_endpoint(endpoint)
endpoint = path.join('templates', template)
if component:
endpoint = '%s:%s' % (component, endpoint)
return findfile(endpoint)
except FileNotFound:
pass
# try app level second if module wasn't specified
# try:
# if ':' not in template:
# endpoint = 'templates/%s' % template
# return findfile(endpoint)
# except FileNotFound:
# pass
def get_source(self, environment, endpoint):
log.debug('get_source() processing: %s' % endpoint)
fpath = self.find_template_path(endpoint)
if not fpath:
raise TemplateNotFound(endpoint)
with open(fpath, 'rb') as f:
contents = f.read().decode(self.encoding)
old = path.getmtime(fpath)
return contents, fpath, lambda: path.getmtime(fpath) == old
@contextfilter
def content_filter(context, child_content):
parent_content = context['__TemplateContent.obj']
parent_content.update_nonprimary_from_content(child_content)
return Markup(child_content.primary) | PypiClean |
/Hyperion-0.9.10.tar.gz/Hyperion-0.9.10/hyperion/conf/conf_files.py | from __future__ import print_function, division
import warnings
import numpy as np
from astropy import log as logger
from astropy.extern import six
from ..util.functions import FreezableClass, bool2str, str2bool, is_numpy_array
from ..filter import Filter
class OutputConf(FreezableClass):
def __init__(self):
'''
Initialize default output configuration
'''
self.output_density = 'none'
self.output_density_diff = 'none'
self.output_specific_energy = 'last'
self.output_n_photons = 'none'
self._freeze()
@classmethod
def read(cls, group):
self = cls()
self.output_density = group.attrs['output_density'].decode('utf-8')
self.output_density_diff = group.attrs['output_density_diff'].decode('utf-8')
self.output_specific_energy = group.attrs['output_specific_energy'].decode('utf-8')
self.output_n_photons = group.attrs['output_n_photons'].decode('utf-8')
return self
def write(self, group):
group.attrs['output_density'] = np.string_(self.output_density.encode('utf-8'))
group.attrs['output_density_diff'] = np.string_(self.output_density_diff.encode('utf-8'))
group.attrs['output_specific_energy'] = np.string_(self.output_specific_energy.encode('utf-8'))
group.attrs['output_n_photons'] = np.string_(self.output_n_photons.encode('utf-8'))
class RunConf(object):
def __init__(self):
'''
Initialize default run configuration
'''
self.set_n_initial_iterations(5)
self.n_photons = {}
self.set_propagation_check_frequency(0.001)
self.set_seed(-124902)
self.set_raytracing(False)
self.set_max_interactions(1000000)
self.set_max_reabsorptions(1000000)
self.set_pda(False)
self.set_mrw(False)
self.set_convergence(False)
self.set_kill_on_absorb(False)
self.set_kill_on_scatter(False)
self.set_forced_first_interaction(True)
self.set_output_bytes(8)
self.set_sample_sources_evenly(False)
self.set_enforce_energy_range(True)
self.set_copy_input(True)
self._monochromatic = False
self.set_specific_energy_type('initial')
super(RunConf, self).__init__()
def set_propagation_check_frequency(self, frequency):
'''
Set how often to check that the photon is in the right cell
During photon propagation, it is possible that floating point issues
cause a photon to end up in the wrong cell. By default, the code will
randomly double check the position and cell of a photon for every 1 in
1000 cell wall crossings, but this can be adjusted with this method.
Note that values higher than around 0.001 will cause the code to slow
down.
Parameters
----------
frequency : float
How often the photon position and cell should be double-checked (1
is always, 0 is never).
'''
if not np.isscalar(frequency) or isinstance(frequency, six.string_types):
raise TypeError("frequency should be a scalar value")
if frequency < 0. or frequency > 1.:
raise ValueError("frequency should be between 0 and 1")
self._frequency = frequency
def _read_propagation_check_frequency(self, group):
self._frequency = group.attrs['propagation_check_frequency']
def _write_propagation_check_frequency(self, group):
group.attrs['propagation_check_frequency'] = self._frequency
def set_seed(self, seed):
'''
Set the seed for the random number generation
Parameters
----------
seed : int
The seed with which to initialize the random number generation.
This should be negative.
'''
if type(seed) != int or seed >= 0:
raise ValueError("seed should be a negative integer")
self._seed = seed
def _read_seed(self, group):
self._seed = group.attrs['seed']
def _write_seed(self, group):
group.attrs['seed'] = self._seed
def set_n_initial_iterations(self, n_iter):
'''
Set the number of initial iterations for computing the specific
energy in each cell.
Parameters
----------
n_iter : int
The number of initial iterations
'''
self.n_iter = n_iter
def _read_n_initial_iterations(self, group):
self.n_iter = group.attrs['n_initial_iter']
def _write_n_initial_iterations(self, group):
group.attrs['n_initial_iter'] = self.n_iter
def set_n_photons(self, initial=None, imaging=None,
imaging_sources=None, imaging_dust=None,
raytracing_sources=None, raytracing_dust=None,
stats=0):
'''
Set the number of photons for the different iterations
Note that any values not specified will be set to the default
values.
Parameters
----------
initial : float, optional
Number of photons for the initial specific energy iterations
imaging : float, optional
Number of photons for the main SED/image iteration. This argument
is used in the case of non-monochromatic radiation transfer.
imaging_sources : float, optional
Number of photons emitted from sources during the main SED/image
iteration in the case of monochromatic radiation transfer.
imaging_dust : float, optional
Number of photons emitted from dust during the main SED/image
iteration in the case of monochromatic radiation transfer.
raytracing_sources : float, optional
Number of photons emitted from sources during the raytracing
SED/image iteration, if applicable.
raytracing_dust : float, optional
Number of photons emitted from dust during the raytracing
SED/image iteration, if applicable.
stats : float, optional
How often to print out statistics. Also used to determine the
photon chunk size for MPI.
'''
# Reset number of photons to avoid issues when reading in models
self.n_photons.clear()
if self.n_iter == 0:
if initial is not None:
raise Exception("[n_photons] initial should not be set since no initial interations are being computed")
else:
if 'initial' in self.n_photons:
del self.n_photons['initial']
else:
if initial is None:
raise Exception("[n_photons] initial should be set since the initial iterations are being computed")
else:
self.n_photons['initial'] = initial
if self.raytracing:
if raytracing_sources is None:
raise Exception("[n_photons] raytracing_sources needs to be set in raytracing mode")
else:
self.n_photons['raytracing_sources'] = raytracing_sources
if raytracing_dust is None:
raise Exception("[n_photons] raytracing_dust needs to be set in raytracing mode")
else:
self.n_photons['raytracing_dust'] = raytracing_dust
else:
if raytracing_sources is not None:
raise Exception("[n_photons] raytracing_sources should not be set as raytracing is not being used")
if raytracing_dust is not None:
raise Exception("[n_photons] raytracing_dust should not be set as raytracing is not being used")
if self._monochromatic:
if imaging_sources is None:
raise Exception("[n_photons] imaging_sources needs to be set in monochromatic mode")
else:
self.n_photons['last_sources'] = imaging_sources
if imaging_dust is None:
raise Exception("[n_photons] imaging_dust needs to be set in monochromatic mode")
else:
self.n_photons['last_dust'] = imaging_dust
if imaging is not None:
raise Exception("[n_photons] imaging should not be set in monochromatic mode")
else:
if imaging_sources is not None:
raise Exception("[n_photons] imaging_sources should not be set as the monochromatic option is not being used")
if imaging_dust is not None:
raise Exception("[n_photons] imaging_dust should not be set as the monochromatic option is not being used")
if imaging is None:
raise Exception("[n_photons] imaging should bet set")
else:
self.n_photons['last'] = imaging
self.n_photons['stats'] = stats
def _read_n_photons(self, group):
if self.n_iter != 0:
self.n_photons['initial'] = group.attrs['n_initial_photons']
if self._monochromatic:
self.n_photons['last_sources'] = group.attrs['n_last_photons_sources']
self.n_photons['last_dust'] = group.attrs['n_last_photons_dust']
else:
self.n_photons['last'] = group.attrs['n_last_photons']
if self.raytracing:
self.n_photons['raytracing_sources'] = group.attrs['n_ray_photons_sources']
self.n_photons['raytracing_dust'] = group.attrs['n_ray_photons_dust']
self.n_photons['stats'] = group.attrs['n_stats']
def _write_n_photons(self, group):
if self.n_photons == {}:
raise Exception("Photon numbers not set")
if self.n_iter == 0:
if 'initial' in self.n_photons and self.n_photons['initial'] is not None:
raise Exception("[n_photons] initial should not be set since no initial interations are being computed")
else:
if 'initial' in self.n_photons and self.n_photons['initial'] is not None:
group.attrs['n_initial_photons'] = self.n_photons['initial']
else:
raise Exception("[n_photons] initial should be set since the initial iterations are being computed")
if self._monochromatic:
if 'last_sources' in self.n_photons:
group.attrs['n_last_photons_sources'] = self.n_photons['last_sources']
else:
raise Exception("[n_photons] imaging_sources needs to be set in monochromatic mode")
if 'last_dust' in self.n_photons:
group.attrs['n_last_photons_dust'] = self.n_photons['last_dust']
else:
raise Exception("[n_photons] imaging_dust needs to be set in monochromatic mode")
if 'last' in self.n_photons:
raise Exception("[n_photons] imaging should not be set in monochromatic mode")
else:
if 'last_sources' in self.n_photons:
raise Exception("[n_photons] imaging_sources should not be set as the monochromatic option is not being used")
if 'last_dust' in self.n_photons:
raise Exception("[n_photons] imaging_dust should not be set as the monochromatic option is not being used")
if 'last' in self.n_photons:
group.attrs['n_last_photons'] = self.n_photons['last']
else:
raise Exception("[n_photons] imaging should bet set")
if self.raytracing:
if 'raytracing_sources' in self.n_photons:
group.attrs['n_ray_photons_sources'] = self.n_photons['raytracing_sources']
else:
raise Exception("[n_photons] raytracing_sources needs to be set in raytracing mode")
if 'raytracing_dust' in self.n_photons:
group.attrs['n_ray_photons_dust'] = self.n_photons['raytracing_dust']
else:
raise Exception("[n_photons] raytracing_dust needs to be set in raytracing mode")
else:
if 'raytracing_sources' in self.n_photons:
raise Exception("[n_photons] raytracing_sources should not be set as raytracing is not being used")
if 'raytracing_dust' in self.n_photons:
raise Exception("[n_photons] raytracing_dust should not be set as raytracing is not being used")
group.attrs['n_stats'] = self.n_photons['stats']
def set_raytracing(self, raytracing):
'''
Set whether to use raytracing for the non-scattered flux
If enabled, only scattered photons are peeled off in the iteration
following the initial iterations, and an additional final
iteration is carrried out, with raytracing of the remaining flux
(sources and thermal and non-thermal dust emission).
Parameters
----------
raytracing : bool
Whether or not to use raytracing in the final iteration
'''
self.raytracing = raytracing
def _read_raytracing(self, group):
self.raytracing = str2bool(group.attrs['raytracing'])
def _write_raytracing(self, group):
group.attrs['raytracing'] = bool2str(self.raytracing)
def set_max_interactions(self, inter_max, warn=True):
'''
Set the maximum number of interactions a photon can have.
Parameters
----------
inter_max : int
Maximum number of interactions for a single photon. This can be
used to prevent photons from getting stuck in very optically
thick regions, especially if the modified random walk is not
used.
warn : bool, optional
Whether to emit a warning whenever photons are killed for exceeding
the maximum number of iterations.
'''
self.n_inter_max = inter_max
self.n_inter_max_warn = warn
def _read_max_interactions(self, group):
self.n_inter_max = group.attrs['n_inter_max']
if 'n_inter_max_warn' in group.attrs:
self.n_inter_max_warn = str2bool(group.attrs['n_inter_max_warn'])
else:
self.n_inter_max_warn = True
def _write_max_interactions(self, group):
group.attrs['n_inter_max'] = self.n_inter_max
group.attrs['n_inter_max_warn'] = bool2str(self.n_inter_max_warn)
def set_max_reabsorptions(self, reabs_max, warn=True):
'''
Set the maximum number of successive reabsorptions by a source that a
photon can have.
Parameters
----------
reabs_max : int
Maximum number of reabsorptions for a single photon.
warn : bool, optional
Whether to emit a warning whenever photons are killed for exceeding
the maximum number of reabsorptions.
'''
self.n_reabs_max = reabs_max
self.n_reabs_max_warn = warn
def _read_max_reabsorptions(self, group):
self.n_reabs_max = group.attrs['n_reabs_max']
if 'n_reabs_max_warn' in group.attrs:
self.n_reabs_max_warn = str2bool(group.attrs['n_reabs_max_warn'])
else:
self.n_reabs_max_warn = True
def _write_max_reabsorptions(self, group):
group.attrs['n_reabs_max'] = self.n_reabs_max
group.attrs['n_reabs_max_warn'] = bool2str(self.n_reabs_max_warn)
def set_pda(self, pda):
'''
Set whether to use the Partial Diffusion Approximation (PDA)
If enabled, the PDA is used to compute the specific energy in cells
which have seen few or no photons by formally solving the diffusion
equations, using the cells with valid specific energies as boundary
conditions.
Parameters
----------
pda : bool
Whether or not to use the PDA
References
----------
Min et al. 2009, Astronomy and Astrophysics, 497, 155
'''
self.pda = pda
def _read_pda(self, group):
self.pda = str2bool(group.attrs['pda'])
def _write_pda(self, group):
group.attrs['pda'] = bool2str(self.pda)
def set_mrw(self, mrw, gamma=1.0, inter_max=1000, warn=True):
'''
Set whether to use the Modified Random Walk (MRW) approximation
If enabled, the MRW speeds up the propagation of photons in very
optically thick regions by locally setting up a spherical diffusion
region.
Parameters
----------
mrw : bool
Whether or not to use the MRW
gamma : float, optional
The parameter describing the starting criterion for the MRW.
The MRW is carried out if the distance to the closest cell is
larger than `gamma` times the Rosseland mean free path.
inter_max : int, optional
Maximum number of interactions during a single random walk.
This can be used to prevent photons from getting stuck in the
corners of cells in very optically thick regions, where the MRW
stars to become inefficient itself.
warn : bool, optional
Whether to emit a warning whenever photons are killed for exceeding
the maximum number of mrw steps.
References
----------
Min et al. 2009, Astronomy and Astrophysics, 497, 155
'''
self.mrw = mrw
self.mrw_gamma = gamma
self.n_inter_mrw_max = inter_max
self.n_inter_mrw_max_warn = warn
def _read_mrw(self, group):
self.mrw = str2bool(group.attrs['mrw'])
if self.mrw:
self.mrw_gamma = group.attrs['mrw_gamma']
self.n_inter_mrw_max = group.attrs['n_inter_mrw_max']
if 'n_inter_mrw_max_warn' in group.attrs:
self.n_inter_mrw_max_warn = str2bool(group.attrs['n_inter_mrw_max_warn'])
else:
self.n_inter_mrw_max_warn = True
def _write_mrw(self, group):
group.attrs['mrw'] = bool2str(self.mrw)
if self.mrw:
group.attrs['mrw_gamma'] = self.mrw_gamma
group.attrs['n_inter_mrw_max'] = self.n_inter_mrw_max
group.attrs['n_inter_mrw_max_warn'] = bool2str(self.n_inter_mrw_max_warn)
def set_convergence(self, convergence, percentile=100., absolute=0., relative=0.):
'''
Set whether to check for convergence over the initial iterations
If enabled, the code will check whether the specific energy absorbed
in each cell has converged. First, the ratio between the previous
and current specific energy absorbed in each cell is computed in each
cell, and the value at the specified percentile (`percentile`) is
found. Then, convergence has been achieved if this value is less than
an absolute threshold (`absolute`), and if it changed by less than
a relative threshold ratio (`relative`).
Parameters
----------
convergence : bool
Whether or not to check for convergence.
percentile : float, optional
The percentile at which to check for convergence.
absolute : float, optional
The abolute threshold below which the percentile value of the
ratio has to be for convergence.
relative : float, optional
The relative threshold below which the ratio in the percentile
value has to be for convergence.
'''
self.check_convergence = True
self.convergence_percentile = percentile
self.convergence_absolute = absolute
self.convergence_relative = relative
def _read_convergence(self, group):
self.check_convergence = str2bool(group.attrs['check_convergence'])
if self.check_convergence:
self.convergence_percentile = group.attrs['convergence_percentile']
self.convergence_absolute = group.attrs['convergence_absolute']
self.convergence_relative = group.attrs['convergence_relative']
def _write_convergence(self, group):
group.attrs['check_convergence'] = bool2str(self.check_convergence)
if self.check_convergence:
group.attrs['convergence_percentile'] = self.convergence_percentile
group.attrs['convergence_absolute'] = self.convergence_absolute
group.attrs['convergence_relative'] = self.convergence_relative
def set_kill_on_absorb(self, kill_on_absorb):
'''
Set whether to kill absorbed photons
Parameters
----------
kill_on_absorb : bool
Whether to kill absorbed photons
'''
self.kill_on_absorb = kill_on_absorb
def _read_kill_on_absorb(self, group):
self.kill_on_absorb = str2bool(group.attrs['kill_on_absorb'])
def _write_kill_on_absorb(self, group):
group.attrs['kill_on_absorb'] = bool2str(self.kill_on_absorb)
def set_kill_on_scatter(self, kill_on_scatter):
'''
Set whether to kill scattered photons
Parameters
----------
kill_on_scatter : bool
Whether to kill scattered photons
'''
self.kill_on_scatter = kill_on_scatter
def _read_kill_on_scatter(self, group):
if 'kill_on_scatter' in group.attrs:
self.kill_on_scatter = str2bool(group.attrs['kill_on_scatter'])
else:
self.kill_on_scatter = False
def _write_kill_on_scatter(self, group):
group.attrs['kill_on_scatter'] = bool2str(self.kill_on_scatter)
def set_forced_first_interaction(self, forced_first_interaction, algorithm='wr99', baes16_xi=0.5):
'''
Set whether to ensure that photons scatter at least once before
escaping the grid.
Parameters
----------
forced_first_interaction : bool
Whether to force at least one scattering before escaping the
grid
algorithm : 'wr99' or 'baes16'
Which algorithm to use for the forced first interaction. The
algorithms are described in the notes below.
Notes
-----
The 'wr99' algorithm refers to that described in Wood & Reynolds, 1999,
The Astrophysical Journal, 525, 799. During normal un-forced photon
propagation, we sample the optical depth from a probability density
function (PDF) that follows exp(-tau) from tau=0 to infinity. The Wood
and Reynolds algorithm modifies the PDF to be truncated at tau_escape
(the optical depth for the photon to escape the grid). This ensures that
all photons interact at least once before leaving the system. This
algorithm is ideal for cases where the optical depths are very small
and you are interested in making images. Note that this algorithm does
not apply to the temperature calculation iterations since it is not
needed there.
The 'baes16' algorithm refers to that described in Baes et al. 2019,
Astronomy and Astrophysics, 590, A55. In this algorithm, the PDF is
the weighted combination of a truncated decaying exponential and a
constant, which ensures that interactions will occur with a reasonable
probability anywhere along the photon escape path. This is useful for
cases where there are shadowed regions that otherwise would not receive
many photons. The relative weight of the truncated exponential versus
the constant is given by baes16_xi, which should be in the range 0 to 1.
'''
if baes16_xi < 0 or baes16_xi > 1:
raise ValueError('baes16_xi should be in the range 0 to 1')
if algorithm not in ('wr99', 'baes16'):
raise ValueError('algorithm should be wr99 or baes16')
self.forced_first_interaction = forced_first_interaction
self.forced_first_interaction_algorithm = algorithm
self.forced_first_interaction_baes16_xi = baes16_xi
set_forced_first_scattering = set_forced_first_interaction
def _read_forced_first_interaction(self, group):
if 'forced_first_scattering' in group.attrs: # old API
self.forced_first_interaction = str2bool(group.attrs['forced_first_scattering'])
self.forced_first_interaction_algorithm = 'wr99'
self.forced_first_interaction_baes16_xi = 0.5
else:
self.forced_first_interaction = str2bool(group.attrs['forced_first_interaction'])
self.forced_first_interaction_algorithm = group.attrs['forced_first_interaction_algorithm'].decode()
self.forced_first_interaction_baes16_xi = group.attrs['forced_first_interaction_baes16_xi']
def _write_forced_first_interaction(self, group):
group.attrs['forced_first_interaction'] = bool2str(self.forced_first_interaction)
group.attrs['forced_first_interaction_algorithm'] = np.string_(self.forced_first_interaction_algorithm.encode('utf-8'))
group.attrs['forced_first_interaction_baes16_xi'] = self.forced_first_interaction_baes16_xi
def set_enforce_energy_range(self, enforce):
'''
Set how to deal with cells that have specific energy rates that are
below or above that provided in the mean opacities and emissivities.
Parameters
----------
enforce : bool
Whether or not to reset specific energies that are above or below
the range of values used to specify the mean opacities and
emissivities to the maximum or minimum value of the range. Setting
this to True modifies the energy in the simulation, but ensures
that the emissivities are consistent with the energy in the cells.
Setting this to False means that the total energy in the grid will
be correct, but that the emissivities may be inconsistent with the
energy in the cells (if an energy is out of range, the code will
pick the closest available one). In both cases, warnings will be
displayed to notify the user whether this is happening.
'''
self.enforce_energy_range = enforce
def _read_enforce_energy_range(self, group):
self.enforce_energy_range = str2bool(group.attrs['enforce_energy_range'])
def _write_enforce_energy_range(self, group):
group.attrs['enforce_energy_range'] = bool2str(self.enforce_energy_range)
def set_copy_input(self, copy):
'''
Set whether to copy the input data into the output file.
Parameters
----------
copy : bool
Whether to copy the input data into the output file (True) or
whether to link to it (False)
'''
self.copy_input = copy
def _read_copy_input(self, group):
self.copy_input = str2bool(group.attrs['copy_input'])
def _write_copy_input(self, group):
group.attrs['copy_input'] = bool2str(self.copy_input)
def set_output_bytes(self, io_bytes):
'''
Set whether to output physical quantity arrays in 32-bit or 64-bit
Parameters
----------
io_bytes : int
The number of bytes for the output. This should be either 4
(for 32-bit) or 8 (for 64-bit).
'''
self.physics_io_bytes = io_bytes
def _read_output_bytes(self, group):
self.physics_io_bytes = group.attrs['physics_io_bytes']
def _write_output_bytes(self, group):
group.attrs['physics_io_bytes'] = self.physics_io_bytes
def set_sample_sources_evenly(self, sample_sources_evenly):
'''
If set to 'True', sample evenly from all sources and apply
probability weight based on relative luminosities. Otherwise,
sample equal energy photons from sources with probability given by
relative luminosities.
Parameters
----------
sample_evenly : bool
Whether to sample different sources evenly
'''
self.sample_sources_evenly = sample_sources_evenly
def _read_sample_sources_evenly(self, group):
self.sample_sources_evenly = str2bool(group.attrs['sample_sources_evenly'])
def _write_sample_sources_evenly(self, group):
group.attrs['sample_sources_evenly'] = bool2str(self.sample_sources_evenly)
def set_specific_energy_type(self, specific_energy_type):
'''
Set whether to use the specific specific energy as an initial value or
an additional component at each iteration.
This only has an effect if a specific energy was specified during ``add_density_grid``.
Parameters
----------
specific_energy_type : str
Can be ``'initial'`` (use only as initial value) or
``'additional'`` (add at every iteration)
'''
if specific_energy_type in ['initial', 'additional']:
self.specific_energy_type = specific_energy_type
else:
raise ValueError("specific_energy_type should be one of 'initial' or 'additional'")
def _read_specific_energy_type(self, group):
if 'specific_energy_type' in group.attrs:
self.specific_energy_type = group.attrs['specific_energy_type'].decode('ascii')
else:
self.specific_energy_type = 'initial'
def _write_specific_energy_type(self, group):
group.attrs['specific_energy_type'] = np.string_(self.specific_energy_type.encode('utf-8'))
# TODO: only do this if the specific energy was specified during add_density_grid
def read_run_conf(self, group): # not a class method because inherited
'''
Read the configuation in from an HDF5 group
Parameters
----------
group : h5py.File or h5py.Group
The HDF5 group to read the configuration from
'''
self._read_propagation_check_frequency(group)
self._read_seed(group)
self._read_n_initial_iterations(group)
self._read_raytracing(group)
self._read_n_photons(group)
self._read_max_interactions(group)
self._read_max_reabsorptions(group)
self._read_pda(group)
self._read_mrw(group)
self._read_convergence(group)
self._read_kill_on_absorb(group)
self._read_kill_on_scatter(group)
self._read_forced_first_interaction(group)
self._read_output_bytes(group)
self._read_sample_sources_evenly(group)
self._read_enforce_energy_range(group)
self._read_copy_input(group)
self._read_specific_energy_type(group)
def write_run_conf(self, group):
'''
Writes out the configuation to an HDF5 group
Parameters
----------
group : h5py.File or h5py.Group
The HDF5 group to write the configuration to
'''
self._write_propagation_check_frequency(group)
self._write_seed(group)
self._write_n_initial_iterations(group)
self._write_n_photons(group)
self._write_raytracing(group)
self._write_max_interactions(group)
self._write_max_reabsorptions(group)
self._write_pda(group)
self._write_mrw(group)
self._write_convergence(group)
self._write_kill_on_absorb(group)
self._write_kill_on_scatter(group)
self._write_forced_first_interaction(group)
self._write_output_bytes(group)
self._write_sample_sources_evenly(group)
self._write_enforce_energy_range(group)
self._write_copy_input(group)
self._write_specific_energy_type(group)
class ImageConf(FreezableClass):
def __init__(self, sed=True, image=True):
'''
Initialize default image configuration
'''
self.sed = sed
self.image = image
if self.sed:
self.set_aperture_radii(1, np.inf, np.inf) # reasonable default
# Since there are no reasonable defaults for image size and limits,
# as well as wavelength range, we don't set any. But we still have
# to set the variables since new attributes cannot be created later.
if self.image:
self.n_x = self.n_y = None
self.xmin = self.xmax = self.ymin = self.ymax = None
self.n_wav = None
self.wav_min = None
self.wav_max = None
self.iwav_min = None
self.iwav_max = None
self.set_output_bytes(8)
self.set_track_origin('no')
self.set_uncertainties(False)
self.set_stokes(False)
self._set_monochromatic(False)
self._filters = []
self._freeze()
def add_filter(self, **kwargs):
"""
Add a filter to internally convolve images or SED fluxes with
transmission curves.
Any keyword arguments are passed to :class:`hyperion.filters.Filter`.
"""
filt = Filter(**kwargs)
self._filters.append(filt)
return filt
def _read_filters(self, group):
if 'use_filters' in group.attrs and str2bool(group.attrs['use_filters']):
self._filters = []
n_filt = group.attrs['n_filt']
for ifilter in range(n_filt):
self._filters.append(Filter.from_hdf5_group(group, 'filter_{0:05d}'.format(ifilter + 1)))
else:
self._filters = None
def _write_filters(self, group):
if self.n_wav is not None:
raise ValueError("Cannot specify both filters and wavelength range")
group.attrs['use_filters'] = bool2str(len(self._filters) > 0)
if self._filters is not None:
group.attrs['n_filt'] = len(self._filters)
for ifilter, filt in enumerate(self._filters):
filt.to_hdf5_group(group, 'filter_{0:05d}'.format(ifilter + 1))
def set_output_bytes(self, io_bytes):
'''
Set whether to output images/SEDs in 32-bit or 64-bit.
Parameters
----------
io_bytes : int
The number of bytes for the output. This should be either 4
(for 32-bit) or 8 (for 64-bit).
'''
self.io_bytes = io_bytes
def _read_output_bytes(self, group):
self.io_bytes = group.attrs['io_bytes']
def _write_output_bytes(self, group):
group.attrs['io_bytes'] = self.io_bytes
def set_image_size(self, n_x, n_y):
'''
Set the size of the output images
Parameters
----------
n_x, n_y : int
The number of pixels in the x and y directions
'''
if type(n_x) != int:
raise Exception("n_x should be an integer")
if type(n_y) != int:
raise Exception("n_y should be an integer")
self.n_x = n_x
self.n_y = n_y
def _read_image_size(self, group):
self.n_x = group.attrs['n_x']
self.n_y = group.attrs['n_y']
def _write_image_size(self, group):
if self.n_x is None or self.n_y is None:
raise Exception("Image size has not been set")
group.attrs['n_x'] = self.n_x
group.attrs['n_y'] = self.n_y
def set_image_limits(self, xmin, xmax, ymin, ymax):
'''
Set the extent of the output images
Parameters
----------
xmin, xmax, ymin, ymax : float
The extent of the images, which are either in cm (if using
standard binned images or peeloff images) or in degrees (if
using peeling off to a point inside the model).
'''
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def _read_image_limits(self, group):
self.xmin = group.attrs['x_min']
self.xmax = group.attrs['x_max']
self.ymin = group.attrs['y_min']
self.ymax = group.attrs['y_max']
def _write_image_limits(self, group):
if self.xmin is None or self.xmax is None or self.ymin is None or self.ymax is None:
raise Exception("Image limits have not been set")
group.attrs['x_min'] = self.xmin
group.attrs['x_max'] = self.xmax
group.attrs['y_min'] = self.ymin
group.attrs['y_max'] = self.ymax
def set_aperture_range(self, n_ap, ap_min, ap_max):
warnings.warn("set_aperture_range has been renamed to set_aperture_radii", DeprecationWarning)
return self.set_aperture_radii(n_ap, ap_min, ap_max)
def set_aperture_radii(self, n_ap, ap_min, ap_max):
'''
Set the range of apertures to use for SEDs/Images
Parameters
----------
n_ap : int
The number of apertures to compute SEDs in
ap_min, ap_max : float
The smallest and largest aperture to use, in cm
'''
if type(n_ap) is not int:
raise Exception("n_ap should be an integer")
self.n_ap = n_ap
self.ap_min = ap_min
self.ap_max = ap_max
def _read_aperture_range(self, group):
self.n_ap = group.attrs['n_ap']
self.ap_min = group.attrs['ap_min']
self.ap_max = group.attrs['ap_max']
def _write_aperture_range(self, group):
group.attrs['n_ap'] = self.n_ap
group.attrs['ap_min'] = self.ap_min
group.attrs['ap_max'] = self.ap_max
def _set_monochromatic(self, monochromatic, frequencies=None):
"""
Set whether the images are being computed in monochromatic mode.
This is not meant to be used directly by users.
"""
self._monochromatic = monochromatic
if monochromatic:
if self.n_wav is not None or self.wav_min is not None or self.wav_max is not None:
logger.warning("Removing non-monochromatic wavelength settings")
self.n_wav = None
self.wav_min = None
self.wav_max = None
self.set_wavelength_index_range(0, len(frequencies) - 1)
else:
if self.iwav_min is not None or self.iwav_max is not None:
logger.warning("Removing monochromatic wavelength settings")
self.iwav_min = None
self.iwav_max = None
def _read_monochromatic(self, group):
self._monochromatic = str2bool(group.attrs['monochromatic'])
def _write_monochromatic(self, group):
group.attrs['monochromatic'] = bool2str(self._monochromatic)
def set_wavelength_range(self, n_wav, wav_min, wav_max):
'''
Set the range of wavelengths to use for SEDs/Images
Parameters
----------
n_wav : int
The number of wavelengths to compute SEDs/Images for
wav_min, wav_max : float
The smallest and largest wavelength to use, in microns
'''
if type(n_wav) is not int:
raise Exception("n_wav should be an integer")
if self._monochromatic:
raise Exception("set_wavelength_range cannot be used in monochromatic mode")
self.n_wav = n_wav
self.wav_min = wav_min
self.wav_max = wav_max
def _read_wavelength_range(self, group):
self.n_wav = group.attrs['n_wav']
self.wav_min = group.attrs['wav_min']
self.wav_max = group.attrs['wav_max']
def _write_wavelength_range(self, group):
if self.n_wav is None:
raise Exception("Wavelength range has not been set")
group.attrs['n_wav'] = self.n_wav
if self.wav_min is None:
raise Exception("Wavelength minimum has not been set")
group.attrs['wav_min'] = self.wav_min
if self.wav_max is None:
raise Exception("Wavelength maximum has not been set")
group.attrs['wav_max'] = self.wav_max
def set_wavelength_index_range(self, iwav_min, iwav_max):
'''
Set the range of wavelengths to use for SEDs/Images
This is intended to be used when the monochromatic option is turned on.
Parameters
----------
iwav_min, iwav_max : int
The index of the first and last frequency to compute SEDs/Images
for. This is the index in the array of wavelengths used when
calling ``set_monochromatic``. These values should be zero-based.
'''
if not self._monochromatic:
raise Exception("set_frequency_index_range cannot be used when not in monochromatic mode")
self.iwav_min = iwav_min
self.iwav_max = iwav_max
def _read_wavelength_index_range(self, group):
# For backward-compability reasons, the variables are called ``inu``
# instead of ``iwav`` in the HDF5 files, but these are variable names
# the user will never be exposed to.
self.iwav_min = group.attrs['inu_min'] - 1
self.iwav_max = group.attrs['inu_max'] - 1
def _write_wavelength_index_range(self, group):
group.attrs['n_wav'] = self.iwav_max - self.iwav_min + 1
group.attrs['inu_min'] = self.iwav_min + 1
group.attrs['inu_max'] = self.iwav_max + 1
def set_track_origin(self, track_origin, n_scat=None):
'''
Set whether to track the origin of the photons. The options are:
'no' - does not split up the images/SEDs by origin
'basic' - this splits up the images/SEDs into:
* The photons last emitted from a source and did not undergo
any subsequent interactions.
* The photons last emitted from dust and did not undergo any
subsequent interactions
* The photons last emitted from a source and subsequently
scattered
* The photons last emitted from dust and subsequently
scattered
'detailed' - as above, but in each category, the origin is further
refined into each individual source and dust type.
'scatterings' - as for 'basic', but splitting the scatterings into the
images for 1, 2, 3, ..., ``n_scat`` scatterings since the last
emission.
Parameters
----------
track_origin : bool
Whether to track the origin of the photons as described above.
'''
if track_origin is True:
track_origin = 'basic'
elif track_origin is False:
track_origin = 'no'
elif track_origin not in ['no', 'basic', 'detailed', 'scatterings']:
raise Exception("track_origin should be one of no/basic/detailed/scatterings")
self.track_origin = track_origin
self.track_n_scat = n_scat or 0
def _read_track_origin(self, group):
self.track_origin = group.attrs['track_origin'].decode('ascii')
if 'track_n_scat' in group.attrs:
self.track_n_scat = group.attrs['track_n_scat']
else:
self.track_n_scat = 0
def _write_track_origin(self, group):
group.attrs['track_origin'] = np.string_(self.track_origin.encode('utf-8'))
group.attrs['track_n_scat'] = self.track_n_scat
def set_uncertainties(self, uncertainties):
'''
Set whether to compute uncertainties on the images/SEDs
Parameters
----------
uncertainties : bool
Whether to compute uncertainties on the images/SEDs.
'''
self.uncertainties = uncertainties
def _read_uncertainties(self, group):
self.uncertainties = str2bool(group.attrs['uncertainties'])
def _write_uncertainties(self, group):
group.attrs['uncertainties'] = bool2str(self.uncertainties)
def set_stokes(self, stokes):
'''
Set whether to save the full Stokes vector for the images/SEDs.
If set to `False`, only the I component is saved.
Parameters
----------
stokes : bool
Whether to save the full Stokes vector for the images/SEDs.
'''
self.stokes = stokes
def _read_stokes(self, group):
if 'compute_stokes' in group.attrs:
self.stokes = str2bool(group.attrs['compute_stokes'])
else:
self.stokes = True
def _write_stokes(self, group):
group.attrs['compute_stokes'] = bool2str(self.stokes)
@classmethod
def read(cls, group):
self = cls()
self._read_viewing_info(group)
self._read_main_info(group)
return self
def write(self, group):
self._write_viewing_info(group)
self._write_main_info(group)
def _read_viewing_info(self, group):
pass
def _validate_viewing_info(self):
pass
def _write_viewing_info(self, group):
pass
def _read_main_info(self, group):
self.sed = str2bool(group.attrs['compute_sed'])
self.image = str2bool(group.attrs['compute_image'])
if self.sed:
self._read_aperture_range(group)
if self.image:
self._read_image_size(group)
self._read_image_limits(group)
self._read_monochromatic(group)
if self._monochromatic:
self._read_wavelength_index_range(group)
else:
if 'use_filters' in group.attrs and group.attrs['use_filters']:
self._read_filters(group)
else:
self._read_wavelength_range(group)
self._read_output_bytes(group)
self._read_track_origin(group)
self._read_uncertainties(group)
self._read_stokes(group)
def _write_main_info(self, group):
group.attrs['compute_sed'] = bool2str(self.sed)
group.attrs['compute_image'] = bool2str(self.image)
if self.sed:
self._write_aperture_range(group)
if self.image:
self._write_image_size(group)
self._write_image_limits(group)
self._write_monochromatic(group)
if self._monochromatic:
self._write_wavelength_index_range(group)
else:
if len(self._filters) > 0:
self._write_filters(group)
else:
self._write_wavelength_range(group)
self._write_output_bytes(group)
self._write_track_origin(group)
self._write_uncertainties(group)
self._write_stokes(group)
class BinnedImageConf(ImageConf):
def __init__(self, n_theta=None, n_phi=None, **kwargs):
self.n_theta = n_theta
self.n_phi = n_phi
ImageConf.__init__(self, **kwargs)
def set_viewing_bins(self, n_theta, n_phi):
'''
Set the number of viewing angles to use
Parameters
----------
n_theta, n_phi
The number of viewing angles to use in the theta and phi
directions respectively.
'''
self.n_theta = n_theta
self.n_phi = n_phi
def _read_viewing_bins(self, group):
self.n_theta = group.attrs['n_theta']
self.n_phi = group.attrs['n_phi']
def _write_viewing_bins(self, group):
group.attrs['n_theta'] = self.n_theta
group.attrs['n_phi'] = self.n_phi
def _read_viewing_info(self, group):
self._read_viewing_bins(group)
def _write_viewing_info(self, group):
self._write_viewing_bins(group)
class PeeledImageConf(ImageConf):
def __init__(self, **kwargs):
self.set_viewing_angles([], [])
self.set_inside_observer(None)
self.set_ignore_optical_depth(False)
self.set_peeloff_origin(None)
self.set_depth(-np.inf, np.inf)
ImageConf.__init__(self, **kwargs)
def set_viewing_angles(self, theta, phi):
'''
Set the viewing angles to use
Parameters
----------
theta, phi : iterable of floats
The viewing angles to compute SEDs for.
Examples
--------
Set viewing angles using lists of well-defined angles:
>>> image.set_viewing_angles([30.,55.,87.],[22.,44.,34.])
Set viewing angles using generated numpy arrays:
>>> image.set_viewing_angles(np.linspace(0.,90.,10), np.repeat(30.,10))
Set a single viewing direction:
>>> image.set_viewing_angles([77.],[25.])
'''
if isinstance(theta, (list, tuple)):
theta = np.array(theta)
if isinstance(phi, (list, tuple)):
phi = np.array(phi)
if not is_numpy_array(theta) or theta.ndim != 1:
raise ValueError("theta should be a 1-D sequence")
if not is_numpy_array(phi) or phi.ndim != 1:
raise ValueError("phi should be a 1-D sequence")
if len(theta) != len(phi):
raise ValueError("Length of theta and phi arrays do not match")
self.viewing_angles = list(zip(theta, phi))
self.n_view = len(self.viewing_angles)
def _read_viewing_angles(self, group):
angles = group['angles']
self.viewing_angles = list(zip(angles['theta'], angles['phi']))
def _write_viewing_angles(self, group):
group.attrs['n_view'] = len(self.viewing_angles)
group.create_dataset('angles', data=np.array(self.viewing_angles, dtype=[('theta', float), ('phi', float)]))
def set_inside_observer(self, position):
'''
Set the observer to be inside the model
Parameters
----------
position : tuple of 3 floats
The spatial coordinates of the observer, in cm
'''
if isinstance(position, (list, tuple)):
position = np.array(position)
if position is not None:
if not is_numpy_array(position) or position.ndim != 1 or len(position) != 3:
raise ValueError("position should be a 1-D sequence with 3 elements")
self.inside_observer = position
def _read_inside_observer(self, group):
self.inside_observer = (group.attrs['observer_x'],
group.attrs['observer_y'],
group.attrs['observer_z'])
def _write_inside_observer(self, group):
group.attrs['observer_x'] = self.inside_observer[0]
group.attrs['observer_y'] = self.inside_observer[1]
group.attrs['observer_z'] = self.inside_observer[2]
def set_ignore_optical_depth(self, ignore_optical_depth):
'''
Ingore optical depth when creating images.
This is useful in cases where one wants to understand how much the
optical depth is affecting a set of images.
Parameters
----------
ignore_optical_depth : bool
Whether to ignore optical depth effects (default is False)
'''
self.ignore_optical_depth = ignore_optical_depth
def _read_ignore_optical_depth(self, group):
self.ignore_optical_depth = str2bool(group.attrs['ignore_optical_depth'])
def _write_ignore_optical_depth(self, group):
group.attrs['ignore_optical_depth'] = bool2str(self.ignore_optical_depth)
def set_peeloff_origin(self, position):
'''
Set the origin for the peeloff.
Parameters
----------
position : tuple of 3 floats
The coordinates of the origin of the peeling-off, in cm
'''
if isinstance(position, (list, tuple)):
position = np.array(position)
if position is not None:
if not is_numpy_array(position) or position.ndim != 1 or len(position) != 3:
raise ValueError("position should be a 1-D sequence with 3 elements")
self.peeloff_origin = position
def _read_peeloff_origin(self, group):
self.peeloff_origin = (group.attrs['peeloff_x'],
group.attrs['peeloff_y'],
group.attrs['peeloff_z'])
def _write_peeloff_origin(self, group):
group.attrs['peeloff_x'] = self.peeloff_origin[0]
group.attrs['peeloff_y'] = self.peeloff_origin[1]
group.attrs['peeloff_z'] = self.peeloff_origin[2]
def set_depth(self, d_min, d_max):
'''
Set the minimum and maximum distance between which photons should be
peeled off.
By default, d_min and d_max are set to -inf and +inf respectively.
This option can be useful to compute for example models in a spherical
polar grid, but including only the photons in a slab.
Parameters
----------
d_min, d_max : float
The minimum and maximum distance between which photons should be
peeled-off. Distance increases away from the observer, and d_min
and d_max are the distances closest and furthest from the observer
respectively. The origin is the position of the observer if inside
the model, otherwise it is the origin of the grid.
'''
self.d_min = d_min
self.d_max = d_max
def _read_depth(self, group):
self.d_min = group.attrs['d_min']
self.d_max = group.attrs['d_max']
def _write_depth(self, group):
group.attrs['d_min'] = self.d_min
group.attrs['d_max'] = self.d_max
def _read_viewing_info(self, group):
if str2bool(group.attrs['inside_observer']):
self._read_inside_observer(group)
else:
self._read_peeloff_origin(group)
self._read_ignore_optical_depth(group)
self._read_viewing_angles(group)
self._read_depth(group)
def _write_viewing_info(self, group):
if self.peeloff_origin is not None and self.inside_observer is not None:
raise Exception("Cannot specify inside observer and peeloff origin at the same time")
if self.inside_observer is not None:
group.attrs['inside_observer'] = bool2str(True)
self._write_inside_observer(group)
if self.viewing_angles == []:
self.set_viewing_angles([90.], [0.])
if self.image and self.xmin < self.xmax:
raise ValueError("longitudes should increase towards the left for inside observers")
if self.d_min < 0.:
if self.d_min != -np.inf:
raise ValueError("Lower limit of depth should be positive for inside observer")
self.d_min = 0.
if self.d_max < 0.:
raise ValueError("Upper limit of depth should be positive for inside observer")
elif len(self.viewing_angles) > 0:
group.attrs['inside_observer'] = bool2str(False)
if self.peeloff_origin is None:
self.set_peeloff_origin((0., 0., 0.))
self._write_peeloff_origin(group)
else:
raise Exception("Need to specify either observer position, or viewing angles")
self._write_ignore_optical_depth(group)
self._write_viewing_angles(group)
self._write_depth(group) | PypiClean |
/MOM-Tapyr-1.6.2.tar.gz/MOM-Tapyr-1.6.2/Entity.py |
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
from _MOM import MOM
from _TFL import TFL
import _MOM._Attr.Kind
import _MOM._Attr.Manager
import _MOM._Attr.Spec
import _MOM._Attr.Type
import _MOM._Meta.M_Entity
import _MOM._Pred.Kind
import _MOM._Pred.Manager
import _MOM._Pred.Spec
import _MOM._Pred.Type
import _MOM._SCM.Change
from _MOM._Attr.Type import *
from _MOM._Attr.Date_Time import *
from _MOM._Attr import Attr
from _MOM._Pred import Pred
import _TFL._Meta.Once_Property
import _TFL.Decorator
import _TFL.defaultdict
import _TFL.Sorted_By
import _TFL.Undef
from _TFL.I18N import _, _T, _Tn
import _TFL.json_dump
from _TFL.object_globals import object_globals
from _TFL.portable_repr import portable_repr
from _TFL.predicate import paired
from _TFL.pyk import pyk
import itertools
import logging
import traceback
class Entity (TFL.Meta.BaM (TFL.Meta.Object, metaclass = MOM.Meta.M_Entity)) :
"""Internal root class for MOM entities with and without identity."""
PNS = MOM
auto_derived_p = False ### Set by meta machinery
auto_derived_root = None ### Set by meta machinery
deprecated_attr_names = {}
electric = False
has_identity = False
init_finished = False
is_partial = True
is_relevant = False
is_used = True
polymorphic_epk = False ### Set by meta machinery
polymorphic_epks = False ### Set by meta machinery
relevant_root = None ### Set by meta machinery
show_in_ui = True ### Modified by meta machinery
show_in_ui_T = True ### Default for descendent classes
show_package_prefix = False ### Include `PNS` in `ui_name` ???
spk = None
spk_attr_name = None ### Name of `surrogate primary key` attribute
ui_display_sep = ", "
x_locked = False
_app_globals = {}
_attrs_to_update_combine = ("deprecated_attr_names", )
_home_scope = None
_init_raw_default = False
_Reload_Mixin_ = None
class _Attributes (MOM.Attr.Spec) :
class FO (A_Blob) :
"""`FO.foo` gives the `ui_display` of attribute `foo`, if defined,
or the raw value of `foo`, otherwise.
"""
kind = Attr.Auto_Cached
def computed (self, obj) :
return obj._FO_ (obj)
# end def computed
# end class FO
class playback_p (A_Boolean) :
kind = Attr.Computed
hidden = True
def computed (self, obj) :
return obj.home_scope.playback_p
# end def computed
# end class playback_p
class ui_display (A_String) :
"""Display in user interface"""
kind = Attr.Computed
max_length = 0
def computed (self, obj) :
return obj.ui_display_format % obj.FO
# end def computed
# end class ui_display
class ui_repr (A_String) :
"""Repr for user interface"""
kind = Attr.Computed
max_length = 0
def computed (self, obj) :
return "%s %s" % \
( obj.type_name
, portable_repr
(tuple (a.get_raw (self) for a in self.sig_attr))
)
# end def computed
# end class ui_repr
# end class _Attributes
class _Predicates (MOM.Pred.Spec) :
pass
# end class _Predicates
@pyk.adapt__str__
class _FO_ (TFL.Meta.Object) :
"""Formatter for attributes of object."""
undefined = TFL.Undef ("value")
def __init__ (self, obj) :
self.__obj = obj
# end def __init__
def __call__ (self, name, value = undefined) :
getter = getattr (TFL.Getter, name)
obj = self.__obj
try :
names = name.split (".")
attr = obj.attributes [names [0]]
obj, attr, value = attr.FO_nested (obj, names [1:], value)
except (AttributeError, LookupError) :
result = self._get_repr (name, getter)
else :
if isinstance (attr, MOM.Attr.Kind) :
if value is self.undefined :
value = attr.get_value (obj)
get_raw = lambda : attr.get_raw (obj)
else :
def get_raw () :
result = attr.attr.as_string (value)
if isinstance (value, pyk.string_types) :
result = portable_repr (result)
elif result == "" :
result = "None"
return result
if isinstance (value, Entity) :
return value.FO
else :
uid = getattr (value, "ui_display", None)
if uid :
result = uid
else :
result = get_raw ()
else :
if value is self.undefined :
result = self._get_repr (name, getter)
else :
result = value
return result
# end def __call__
def _get_repr (self, name, getter) :
try :
result = portable_repr (getter (self.__obj))
except (AttributeError, LookupError) :
if "." in name :
result = ""
else :
raise
# end def _get_repr
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws
### `ValueError`
return getattr (self.__super, name)
result = self (name)
if "." not in name :
setattr (self, name, result)
return result
# end def __getattr__
def __getitem__ (self, key) :
try :
return self.__getattr__ (key)
except AttributeError :
raise KeyError (key)
# end def __getitem__
def __str__ (self) :
return self.__obj.ui_display
# end def __str__
# end class _FO_
@TFL.json_dump.default.add_type (_FO_)
def __json_encode_FO_ (fo) :
return pyk.text_type (fo)
# end def __json_encode_FO_
@property
def home_scope (self) :
return self._home_scope or MOM.Scope.active
# end def home_scope
@home_scope.setter
def home_scope (self, value) :
self._home_scope = value
# end def home_scope
@property
def raw_attr_dict (self) :
"""Dictionary with raw attr values of :attr:`user_attr` that
`has_substance`.
"""
return dict \
( (a.name, a.get_raw (self))
for a in self.user_attr if a.has_substance (self)
)
# end def raw_attr_dict
@property
def recordable_attrs (self) :
return self.__class__.m_recordable_attrs
# end def recordable_attrs
@property
def ui_display_format (self) :
"""Format used for :attr:`ui_display`."""
return self.ui_display_sep.join \
( "%%(%s)s" % a.name for a in self.sig_attr
if a.has_substance (self)
)
# end def ui_display_format
@TFL.Meta.Class_Property
@TFL.Meta.Class_and_Instance_Method
def ui_name_T (soc) :
"""Localized `ui_name`."""
### Must not be a `Once_Property`, because `language` can change
return _T (soc.ui_name)
# end def ui_name_T
def __new__ (cls, * args, ** kw) :
if cls.is_partial :
raise MOM.Error.Partial_Type (cls.ui_name_T)
result = cls.__c_super.__new__ (cls)
result._home_scope = kw.get ("scope")
result._init_meta_attrs ()
return result
# end def __new__
def __init__ (self, * args, ** kw) :
self._init_attributes ()
kw.pop ("scope", None)
self._main__init__ (* args, ** kw)
# end def __init__
@classmethod
def from_attr_pickle_cargo (cls, scope, cargo) :
"""Create new entity in `scope` based on pickle `cargo`."""
result = cls.__new__ (cls, scope = scope)
result._init_attributes ()
result.set_pickle_cargo (cargo)
return result
# end def from_attr_pickle_cargo
### provide read-only access to this class' __init__
_MOM_Entity__init__ = property (lambda self, __init__ = __init__ : __init__)
def after_init (self) :
"""Is called by the UI after an instance of the class was
(successfully) created. `after_init` can create additional objects
automatically to ease the life of the interactive user of the
application.
"""
pass
# end def after_init
def as_attr_pickle_cargo (self) :
"""Dictionary with pickle cargo of :attr:`attributes` that are
`save_to_db`.
"""
return dict \
( (a.name, a.get_pickle_cargo (self))
for a in pyk.itervalues (self.attributes) if a.save_to_db
)
# end def as_attr_pickle_cargo
def as_code (self) :
return "%s (%s)" % (self.type_name, self.attr_as_code ())
# end def as_code
def attr_as_code (self) :
uai = self.user_attr_iter ()
return ", ".join ("%s = %s" % (a.name, a.as_code (v)) for (a, v) in uai)
# end def attr_as_code
@classmethod
def attr_prop (cls, name) :
"""Return the property of the attribute named `name`.
Return None if there is no such attribute.
"""
return cls.attributes.get (name)
# end def attr_prop
def attr_value_maybe (self, name) :
attr = self.attributes.get (name)
if attr :
return attr.get_value (self)
# end def attr_value_maybe
@TFL.Meta.Class_and_Instance_Method
def cooked_attrs (soc, kw, on_error = None) :
"""Dictionary `kw` converted to cooked values."""
attributes = soc.attributes
result = {}
if on_error is None :
on_error = soc._raise_attr_error
for name, value in pyk.iteritems (kw) :
attr = attributes.get (name)
if attr :
try :
try :
result [name] = attr.from_string (value)
except MOM.Error.Attribute_Value as exc :
raise
except (TypeError, ValueError) as exc :
raise MOM.Error.Attribute_Value \
(soc, name, value, attr.kind, exc)
except Exception as exc :
on_error (exc)
return result
# end def cooked_attrs
def globals (self) :
return self.__class__._app_globals or object_globals (self)
# end def globals
def has_substance (self) :
"""TRUE if there is at least one attribute with a non-default value."""
return any (a.has_substance (self) for a in self.user_attr)
# end def has_substance
def is_correct (self, attr_dict = {}, _kind = "object") :
"""True if predicates of `_kind` are satisfied for `attr_dict`."""
ews = self._pred_man.check_kind (_kind, self, attr_dict)
return not ews
# end def is_correct
def raw_attr (self, name) :
"""Returns the raw value of attribute `name`, i.e., the value entered
by the user into the object editor.
"""
attr = self.attributes.get (name)
if attr :
return attr.get_raw (self) or u""
# end def raw_attr
def record_attr_change (self, kw) :
if kw and self._home_scope :
self.home_scope.record_change (self.SCM_Change_Attr, self, kw)
# end def record_attr_change
def reload_from_pickle_cargo (self, cargo) :
self.init_finished = False
self._init_pending = []
self._init_attributes ()
self.set_pickle_cargo (cargo)
self._finish__init__ ()
# end def reload_from_pickle_cargo
def reset_syncable (self) :
self._attr_man.reset_syncable ()
# end def reset_syncable
def set (self, _pred_kinds = None, on_error = None, ** kw) :
"""Set attributes from cooked values specified in `kw`."""
assert "raw" not in kw
ukw = dict (self._kw_undeprecated (kw))
return self._set_ckd_inner (_pred_kinds, on_error, ** kw)
# end def set
def set_attr_iter (self, attr_dict, on_error = None) :
attr_get = self.E_Type.attr_prop
if on_error is None :
on_error = self._raise_attr_error
for name, val in pyk.iteritems (attr_dict) :
attr = attr_get (name)
if attr is not None :
if not attr.is_settable :
on_error \
(MOM.Error.Attribute_Set (self, name, val, attr.kind))
else :
yield (name, val, attr)
elif name != "raw" :
on_error (MOM.Error.Attribute_Unknown (self, name, val))
# end def set_attr_iter
def set_pickle_cargo (self, cargo) :
attr_get = self.attributes.get
for k, v in pyk.iteritems (cargo) :
attr = attr_get (k)
### XXX Add legacy lifting
if attr :
attr.set_pickle_cargo (self, v)
# end def set_pickle_cargo
def set_raw (self, _pred_kinds = None, on_error = None, ** kw) :
"""Set attributes from raw values specified in `kw`."""
assert "raw" not in kw
ukw = dict (self._kw_undeprecated (kw))
pkw = self._kw_polished (ukw, on_error)
return self._set_raw_inner (_pred_kinds, on_error, ** pkw)
# end def set_raw
def sync_attributes (self) :
"""Synchronizes all user attributes with the values from
_raw_attr and all sync-cached attributes.
"""
self._attr_man.sync_attributes (self)
# end def sync_attributes
def user_attr_iter (self) :
user_attr = self.user_attr
return ((a, a.get_value (self)) for a in user_attr if a.to_save (self))
# end def user_attr_iter
def _finish__init__ (self) :
"""Redefine this to perform additional initialization."""
assert not self.init_finished
self.init_finished = True
for ip in self._init_pending :
ip ()
try :
del self._init_pending
except AttributeError :
pass
# end def _finish__init__
def _init_attributes (self) :
self._attr_man.reset_attributes (self)
# end def _init_attributes_
def _init_meta_attrs (self) :
self._init_pending = []
self._attr_man = MOM.Attr.Manager (self._Attributes)
self._pred_man = MOM.Pred.Manager (self._Predicates)
# end def _init_meta_attrs
def _kw_check_required (self, * args, ** kw) :
needed = tuple (m.name for m in self.required)
missing = tuple (k for k in needed if kw.get (k) is None)
if missing :
on_error = kw.pop ("on_error") or self._raise_attr_error
all_needed = tuple (m.name for m in self.primary_required) + needed
error = MOM.Error.Required_Missing \
(self.__class__, all_needed, missing, args, kw)
on_error (error)
raise error
# end def _kw_check_required
def _kw_check_predicates (self, _kinds = None, on_error = None, ** kw) :
result = True
errors = []
if _kinds is None :
_kinds = ["object"]
for _kind in _kinds :
kr = self.is_correct (kw, _kind)
if not kr :
errors.extend (self._pred_man.errors [_kind])
result = False
if not result :
if on_error is None :
on_error = self._raise_attr_error
on_error (MOM.Error.Invariants (errors))
return result
# end def _kw_check_predicates
def _kw_raw_check_predicates (self, _kinds = None, on_error = None, ** kw) :
Err = MOM.Error
ckd_kw = {}
to_do = []
errors = []
if on_error is None :
on_error = self._raise_attr_error
for name, val, attr in self.set_attr_iter (kw, on_error) :
if val is not None :
try :
ckd_kw [name] = ckd_val = attr.from_string (val, self)
except (Err.Attribute_Value, Err.Attribute_Syntax) as exc :
errors.append (exc)
to_do.append ((attr, u"", None))
except Err.Invariants as exc :
exc.embed (self, name, attr)
errors.append (exc)
to_do.append ((attr, u"", None))
except (TypeError, ValueError, Err.Error) as exc :
errors.append \
( MOM.Error.Attribute_Value
(self, name, val, attr.kind, exc)
)
if __debug__ :
logging.exception \
( "\n %s %s, attribute conversion error "
"%s: %s [%s]"
, self.type_name, self, name, val, type (val)
)
to_do.append ((attr, u"", None))
except Exception as exc :
if __debug__ :
logging.exception \
( "\n %s %s, attribute conversion error %s: %s [%s]"
, self.type_name, self, name, val, type (val)
)
else :
to_do.append ((attr, val, ckd_val))
else :
to_do.append ((attr, u"", None))
ckd_kw [name] = None
if errors :
on_error (MOM.Error.Invariants (errors))
result = self._kw_check_predicates \
(_kinds = _kinds, on_error = on_error, ** ckd_kw)
return result, to_do
# end def _kw_raw_check_predicates
@TFL.Meta.Class_and_Instance_Method
def _kw_polished (soc, attr_dict, on_error = None) :
Err = MOM.Error
errors = []
result = attr_dict
self = soc if isinstance (soc, Entity) else None
if on_error is None :
on_error = soc._raise_attr_error
for attr in soc.polish_attr :
if attr.name in result or attr.polisher.polish_empty :
val = result.get (attr.name)
try :
result = attr.polisher \
( attr, result
, essence = self
, picky = True
, value = val
)
except (Err.Attribute_Value, Err.Attribute_Syntax) as exc :
errors.append (exc)
except Err.Invariants as exc :
if self is not None :
exc.embed (self, attr.name, attr)
errors.append (exc)
except (TypeError, ValueError, Err.Error) as exc :
errors.append \
( Err.Attribute_Value
(soc, attr.name, val, attr.kind, exc)
)
except Exception as exc :
if __debug__ :
logging.exception \
( "\n %s %s, attribute polisher error %s: %s [%s]"
, soc.type_name, self, attr.name, val, type (val)
)
if errors :
on_error (MOM.Error.Invariants (errors))
return result
# end def _kw_polished
@classmethod
def _kw_undeprecated (cls, attr_dict) :
for name, val in pyk.iteritems (attr_dict) :
cnam = cls.deprecated_attr_names.get (name, name)
yield cnam, val
# end def _kw_undeprecated
def _print_attr_err (self, exc) :
if debug:
logging.exception (repr (self))
print (self, exc)
# end def _print_attr_err
@TFL.Meta.Class_and_Instance_Method
def _raise_attr_error (soc, exc) :
raise exc
# end def _raise_attr_error
@TFL.Contextmanager
def _record_context (self, gen, Change) :
if not self._home_scope :
yield
else :
rvr = dict (gen)
yield rvr
if rvr :
self.home_scope.record_change (Change, self, rvr)
# end def _record_context
def _record_iter (self, kw) :
attributes = self.__class__.attributes
recordable = self.recordable_attrs
for name, value in pyk.iteritems (kw) :
attr = attributes.get (name, None)
if attr in recordable :
yield attr, attr.name, value
# end def _record_iter
def _record_iter_raw (self, kw) :
for a, name, value in self._record_iter (kw) :
yield a, name, value, a.get_raw (self), a.get_raw_pid (self)
# end def _record_iter_raw
def _set_ckd (self, _pred_kinds = None, on_error = None, ** kw) :
man = self._attr_man
tc = man.total_changes
man.reset_pending ()
if kw :
is_correct = self._kw_check_predicates \
(_kinds = _pred_kinds, on_error = on_error, ** kw)
if is_correct :
for name, val, attr in self.set_attr_iter (kw, on_error) :
attr.set_cooked (self, val)
if man.updates_pending :
try :
man.do_updates_pending (self)
except Exception :
pass
return man.total_changes - tc
# end def _set_ckd
def _set_ckd_inner (self, _pred_kinds = None, on_error = None, ** kw) :
gen = \
( (name, attr.get_raw_pid (self))
for attr, name, value in self._record_iter (kw)
if attr.get_value (self) != value
)
with self._record_context (gen, self.SCM_Change_Attr) :
return self._set_ckd (_pred_kinds, on_error, ** kw)
# end def _set_ckd_inner
def _set_raw (self, _pred_kinds = None, on_error = None, ** kw) :
man = self._attr_man
tc = man.total_changes
if kw :
is_correct, to_do = self._kw_raw_check_predicates \
(_kinds = _pred_kinds, on_error = on_error, ** kw)
man.reset_pending ()
if is_correct :
for attr, raw_val, val in to_do :
attr._set_raw (self, raw_val, val)
if man.updates_pending :
man.do_updates_pending (self)
return man.total_changes - tc
# end def _set_raw
def _set_raw_inner (self, _pred_kinds = None, on_error = None, ** kw) :
gen = \
( (name, raw_pid)
for attr, name, value, raw, raw_pid in self._record_iter_raw (kw)
if raw != value
)
with self._record_context (gen, self.SCM_Change_Attr) :
return self._set_raw (_pred_kinds, on_error, ** kw)
# end def _set_raw_inner
def _store_attr_error (self, exc) :
logging.exception ("Setting attribute failed with exception")
if self._home_scope :
self.home_scope._attr_errors.append (exc)
# end def _store_attr_error
@staticmethod
def _ui_display (o) :
return o.ui_display
# end def _ui_display
def __getattr__ (self, name) :
### just to ease up-chaining in descendents
raise AttributeError ("%r <%s>" % (name, self.type_name))
# end def __getattr__
def __ne__ (self, rhs) :
return not (self == rhs)
# end def __ne__
def __repr__ (self) :
try :
return pyk.reprify (self._repr (self.type_name))
except AttributeError :
return "<%s Incomplete>" % (self.ui_name_T, )
# end def __repr__
# end class Entity
@pyk.adapt__bool__
@pyk.adapt__str__
class An_Entity (TFL.Meta.BaM (Entity, metaclass = MOM.Meta.M_An_Entity)) :
"""Root class for anonymous entities without identity."""
is_partial = True
is_primary = False
owner = None
attr_name = None
@property
def hash_key (self) :
"""Hash key: tuple of hash values for attributes in :attr:`hash_sig`.
"""
return tuple (a.get_hash (self) for a in self.hash_sig)
# end def hash_key
@property
def SCM_Change_Attr (self) :
return MOM.SCM.Change.Attr_Composite
# end def SCM_Change_Attr
def as_pickle_cargo (self) :
"""Pickle cargo of `self`."""
return (str (self.type_name), self.as_attr_pickle_cargo ())
# end def as_pickle_cargo
def as_string (self) :
"""Serialization of `self` as string"""
return tuple (sorted (pyk.iteritems (self.raw_attr_dict)))
# end def as_string
def attr_as_code (self) :
attrs = self.attr_tuple_to_save ()
values = tuple (a.as_code (a.get_value (self)) for a in attrs)
if len (values) == 1 :
### trailing comma for single element tuple
values += ("", )
result = ", ".join ("%s" % (v, ) for v in values)
return result
# end def attr_as_code
def attr_tuple_to_save (self) :
"""Tuple of attributes in :attr:`user_attr` that need saving."""
result = self.user_attr
save_p = tuple (a.to_save (self) for a in result) [::-1]
to_drop = tuple (itertools.takewhile ((lambda x : not x), save_p))
if to_drop :
### drop trailing attributes that don't need to be saved
result = result [: -len (to_drop)]
return result
# end def attr_tuple_to_save
def attr_tuple (self) :
"""Tuple of of cooked values of attributes in :attr:`user_attr`"""
return tuple (a.get_value (self) for a in self.user_attr)
# end def attr_tuple
def copy (self, ** kw) :
scope = kw.pop ("scope", self.home_scope)
etype = scope.entity_type (self.type_name)
result = etype (scope = scope, ** kw)
raw_kw = dict \
( (a.name, a.get_raw (self))
for a in self.user_attr if a.name not in kw
)
if raw_kw :
result.set_raw (** raw_kw)
return result
# end def copy
@classmethod
def example_attrs (cls, full = False) :
attrs = cls.user_attr if full else cls.required
return dict ((a.name, a.example) for a in attrs)
# end def example_attrs
@property
def owner_attr (self) :
"""Return the attribute (kind property) of the `owner` object that
holds `self`.
"""
if self.owner and self.attr_name :
return self.owner.attr_prop (self.attr_name)
# end def owner_attr
def _init_attributes (self) :
self.owner = None
self.__super._init_attributes ()
# end def _init_attributes_
@staticmethod
def _json_encode (o) :
return pyk.text_type (o)
# end def _json_encode
def _main__init__ (self, * args, ** kw) :
raw = bool (kw.pop ("raw", self._init_raw_default))
akw = self.args_as_kw (* args, ** kw)
ukw = dict (self._kw_undeprecated (akw))
skw = self._kw_polished (ukw) if raw else ukw
self._kw_check_required (* args, ** skw)
if skw :
setter = self._set_raw if raw else self._set_ckd
setter (_pred_kinds = ("object_init", "object"), ** skw)
# end def _main__init__
def _repr (self, type_name) :
return u"%s (%s)" % (type_name, self.attr_as_code ().rstrip (", "))
# end def _repr
def _set_ckd_inner (self, _pred_kinds = None, on_error = None, ** kw) :
owner_attr = self.owner_attr
if owner_attr is None or self.electric or not owner_attr.record_changes :
return self._set_ckd (_pred_kinds, on_error, ** kw)
elif owner_attr and owner_attr.is_primary :
### Change in primary attribute might be a `rename`
return self.owner.set (** {self.attr_name : self.copy (** kw)})
else :
return self.__super._set_ckd_inner (_pred_kinds, on_error, ** kw)
# end def _set_ckd_inner
def _set_raw_inner (self, _pred_kinds = None, on_error = None, ** kw) :
owner_attr = self.owner_attr
if owner_attr is None or self.electric or not owner_attr.record_changes :
return self._set_raw (_pred_kinds, on_error, ** kw)
elif owner_attr and owner_attr.is_primary :
### Change in primary attribute might be a `rename`
return self.owner.set \
(** {self.attr_name : self.copy (raw = True, ** kw)})
else :
return self.__super._set_raw_inner (on_error, ** kw)
# end def _set_raw_inner
def __eq__ (self, rhs) :
rhs = getattr (rhs, "hash_key", rhs)
return self.hash_key == rhs
# end def __eq__
def __hash__ (self) :
return hash (self.hash_key)
# end def __hash__
def __bool__ (self) :
return self.has_substance ()
# end def __bool__
def __str__ (self) :
return "(%s)" % (self.attr_as_code ())
# end def __str__
# end class An_Entity
_Ancestor_Essence = Entity
@TFL.Add_To_Class ("P_Type", _A_Id_Entity_)
@TFL.Add_To_Class ("P_Type_S", _A_Id_Entity_)
@pyk.adapt__str__
class Id_Entity \
(TFL.Meta.BaM (_Ancestor_Essence, metaclass = MOM.Meta.M_Id_Entity)) :
"""Root class for MOM entities with identity, i.e.,
objects and links.
"""
has_identity = True
is_partial = True
max_count = 0
record_changes = True
refuse_links = set ()
sorted_by = TFL.Meta.Alias_Property ("sorted_by_epk")
spk = TFL.Meta.Alias_Property ("pid")
spk_attr_name = "pid" ### Name of `surrogate primary key` attribute
tutorial = None
### Thanks to `Alias_Property`, `uniqueness_dbw` and `uniqueness_ems` are
### accessible for both the instances and the class
uniqueness_dbw = TFL.Meta.Alias_Property \
("_Predicates.uniqueness_dbw")
uniqueness_ems = TFL.Meta.Alias_Property \
("_Predicates.uniqueness_ems")
_attrs_to_update_combine = ("refuse_links", "_UI_Spec_Defaults")
_UI_Spec_Defaults = dict \
( show_in_admin = False
)
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class _A_Change_ (A_Rev_Ref) :
"""Creation change of the object"""
P_Type = "MOM.MD_Change"
Ref_Type = P_Type
ref_name = "pid"
hidden = False
hidden_nested = 1
q_able = True
# end class _A_Change_
class created_by (A_Id_Entity) :
"""User that created the entity"""
kind = Attr.Computed
P_Type = "MOM.Id_Entity"
def computed (self, obj) :
cc = obj.creation
if cc is not None :
result = cc.c_user
if isinstance (result, pyk.int_types) :
try :
result = obj.home_scope.pid_query (result)
except Exception :
return
return result
# end def computed
# end class created_by
class creation (_A_Change_) :
"""Creation change of the object"""
sqx_filter = (Q.kind == "Create")
# end class creation
class creation_date (A_Date_Time) :
"""Date/time of creation."""
kind = Attr.Computed
def computed (self, obj) :
cc = obj.creation
if cc is not None :
return cc.c_time
# end def computed
# end class creation_date
class electric (A_Boolean) :
"""Indicates if object/link was created automatically or not."""
kind = Attr.Internal
default = False
hidden = True
# end class electric
class is_used (A_Int) :
"""Specifies whether entity is used by another entity."""
kind = Attr.Cached
default = 1
# end class is_used
class last_change (_A_Change_) :
"""Last change of the object"""
finished_query = A_Rev_Ref.finished_query_first
sort_key = TFL.Sorted_By ("-cid")
# end class last_change
class last_changed (A_Date_Time) :
"""Date/time of last change."""
kind = Attr.Computed
def computed (self, obj) :
lc = obj.last_change
if lc is not None :
return lc.time
# end def computed
# end class last_changed
class last_changed_by (A_Id_Entity) :
"""User that applied the last change."""
kind = Attr.Computed
P_Type = "MOM.Id_Entity"
def computed (self, obj) :
lc = obj.last_change
if lc is not None :
result = lc.user
if isinstance (result, pyk.int_types) :
try :
result = obj.home_scope.pid_query (result)
except Exception :
return
return result
# end def computed
# end class last_changed_by
class last_cid (A_Int) :
"""Change id of last change for this entity."""
kind = Attr.Internal
default = 0
hidden_nested = 1
record_changes = False
use_index = True
# end class last_cid
class pid (A_Surrogate) :
"""Permanent id of the instance."""
explanation = """
The `pid` is unique over all entities in a given scope. Once
created, the `pid` of an instance never changes and is not ever
reused for a different instance.
The `pid` remains unchanged during database migrations.
"""
hidden_nested = 1
# end class pid
class type_name (A_String) :
"""Name of type of this entity."""
kind = Attr.Internal
Kind_Mixins = (Attr._Type_Name_Mixin_, )
hidden_nested = 1
max_length = 0
# end class type_name
class ui_repr (_Ancestor.ui_repr) :
def computed (self, obj) :
return "%s %s" % \
(obj.type_name, portable_repr (obj.epk_raw [:-1]))
# end def computed
# end class ui_repr
class x_locked (A_Boolean) :
"""Specifies if object can be changed by user"""
kind = Attr.Internal
default = False
hidden = True
q_able = False
# end class x_locked
# end class _Attributes
class _Predicates (_Ancestor_Essence._Predicates) :
class completely_defined (Pred.Condition) :
"""All necessary attributes must be defined."""
kind = Pred.System
guard = "is_used"
guard_attr = ("is_used", )
def eval_condition (self, obj, glob_dict, val_dict) :
result = []
add = result.append
for a in obj.necessary :
if not a.has_substance (obj) :
m = _T ("Necessary attribute %s is not defined") % (a, )
add (m)
self._error_info.extend (result)
return not result
# end def eval_condition
# end class completely_defined
class object_correct (Pred.Condition) :
"""All object invariants must be satisfied."""
kind = Pred.System
def eval_condition (self, obj, glob_dict, val_dict) :
result = []
add = result.append
for p in obj._pred_man.errors ["object"] :
add (str (p))
self._error_info.extend (result)
return not result
# end def eval_condition
# end class object_correct
# end class _Predicates
@TFL.Meta.Once_Property
def ETM (self) :
"""E_Type_Manager managing `self`."""
return self.home_scope [self.type_name]
# end def ETM
@TFL.Meta.Once_Property
def epk (self) :
"""Essential primary key as tuple of cooked values."""
return tuple \
(a.get_value (self) for a in self.primary) + (self.type_name,)
# end def epk
@property
def epk_as_code (self) :
def _conv (tup) :
if len (tup) == 1 :
tup += ("", )
for t in tup :
if isinstance (t, tuple) :
if len (t) == 1 :
t += ("", )
t = "(%s)" % (", ".join (_conv (t)))
yield t
def _gen () :
for a in self.primary :
r = a.as_code (a.get_value (self))
if isinstance (r, tuple) :
r = "(%s)" % (", ".join (_conv (r)))
yield r
try :
result = tuple (_gen ())
except Exception :
result = ("...", )
return result
# end def epk_as_code
@TFL.Meta.Once_Property
def epk_as_dict (self) :
"""Dictionary of values in :attr:`epk`."""
return dict (zip (self.epk_sig, self.epk))
# end def epk_as_dict
@property
def epk_raw (self) :
"""Essential primary key as tuple of raw values."""
return \
( tuple (a.get_raw_epk (self) for a in self.primary)
+ (self.type_name, )
)
# end def epk_raw
@property
def epk_raw_pid (self) :
"""Essential primary key as tuple of raw values
but pids for Id_Entity attributes.
"""
return \
( tuple (a.get_raw_pid (self) for a in self.primary)
+ (self.type_name, )
)
# end def epk_raw
@property
def errors (self) :
return iter (self._pred_man)
# end def errors
@property
def has_errors (self) :
return self._pred_man.has_errors
# end def has_errors
@property
def has_warnings (self) :
return self._pred_man.has_warnings
# end def has_warnings
@property
def SCM_Change_Attr (self) :
return MOM.SCM.Change.Attr
# end def SCM_Change_Attr
@property
def ui_display_format (self) :
"""Format used for :attr:`ui_display`."""
return self.ui_display_sep.join \
( "%%(%s)s" % a.name for (a, v) in zip (self.primary, self.epk)
if a.has_substance (self)
)
# end def ui_display_format
@TFL.Meta.Class_and_Instance_Once_Property
def UI_Spec (soc) :
try :
UI_Spec_module = soc.PNS._Import_Module ("UI_Spec")
except ImportError :
pass
else :
try :
UI_Spec = UI_Spec_module.UI_Spec
except AttributeError :
pass
else :
defaults = soc._UI_Spec_Defaults
try :
result = getattr (UI_Spec, soc.type_base_name)
except AttributeError :
result = dict (defaults)
setattr (UI_Spec, soc.type_base_name, result)
else :
show_in_admin = result.get ("show_in_admin", True)
for k in defaults :
if k not in result :
result [k] = defaults [k]
result ["show_in_admin"] = show_in_admin
return result
# end def UI_Spec
def add_error (self, err, kind = "object") :
"""Add `err` to error-list of predicate manager"""
self._pred_man.errors [kind].append (err)
# end def add_error
def all_links (self) :
return sorted \
( self.all_referrers ()
, key = self.home_scope.MOM.Id_Entity.sort_key_pm ()
)
# end def all_links
def all_referrers (self) :
"""Return query set of all entities that refer to `self`."""
scope = self.home_scope
def _gen (self, ref_map, scope) :
pid = self.pid
for ET, attrs in pyk.iteritems (ref_map) :
qfs = tuple ((getattr (Q, a) == pid) for a in attrs)
ETM = scope [ET.type_name]
yield ETM.query (Q.OR (* qfs)).distinct ()
return scope.ems.Q_Result_Composite \
( tuple (_gen (self, self.__class__.Ref_Req_Map, scope))
+ tuple (_gen (self, self.__class__.Ref_Opt_Map, scope))
)
# end def all_referrers
def async_changes (self, * filter, ** kw) :
"""Changes that happened asynchronously since `self` was last read from
database.
"""
result = self.home_scope.query_changes \
(Q.cid > self.last_cid, Q.pid == self.pid)
if filters or kw :
result = result.filter (* filters, ** kw)
return result
# end def async_changes
def as_migration (self) :
"""Migration of `self`."""
def _gen (self) :
skip = set (("last_cid", "pid", "type_name"))
for ak in self.db_attr :
if not (ak.is_primary or ak.name in skip) :
yield ak.name, ak.get_raw_epk (self)
return (self.epk_raw, dict (_gen (self)))
# end def as_migration
def as_pickle_cargo (self) :
"""Pickle cargo of `self`."""
return (str (self.type_name), self.as_attr_pickle_cargo (), self.pid)
# end def as_pickle_cargo
def attr_as_code (self) :
eas = self.epk_as_code
aas = (self.__super.attr_as_code (), )
result = ", ".join (eas + aas)
if "," not in result :
result += ","
return result
# end def attr_as_code
def changes (self, * filters, ** kw) :
"""Return change objects for `self` that match `filters` and `kw`."""
result = self.home_scope.query_changes (Q.pid == self.pid)
if filters or kw :
result = result.filter (* filters, ** kw)
return result
# end def changes
def check_all (self) :
"""True if all predicates are satisfied."""
return self._pred_man.check_all (self)
# end def check_all
def copy (self, * new_epk, ** kw) :
"""Make copy with primary key `new_epk`."""
scope = self.home_scope
etype = self.__class__
result = etype (* new_epk, scope = scope, ** kw)
with scope.nested_change_recorder \
(MOM.SCM.Change.Copy, result) as change :
scope.add (result)
change.pid = result.pid
raw_kw = dict \
( (a.name, a.get_raw (self))
for a in self.user_attr if a.name not in kw and a.to_save (self)
)
if raw_kw :
result.set_raw (** raw_kw)
return result
# end def copy
def correct_unknown_attr (self, error) :
"""Try to correct an unknown attribute error."""
pass
# end def correct_unknown_attr
def destroy (self) :
"""Remove entity from `home_scope`."""
if self._home_scope :
if self is self.home_scope.root :
self.home_scope.destroy ()
else :
self.home_scope.remove (self)
# end def destroy
def destroy_dependency (self, other) :
for attr in self.object_referring_attributes.pop (other, ()) :
if attr.is_required :
self.destroy ()
elif attr.is_primary :
### resetting a primary attribute means a rename operation
self.set (** {attr.name : None})
else :
old = attr.get_value (self)
raw = attr.get_raw_pid (self)
attr.reset (self)
if old != attr.get_value (self) :
self.record_attr_change ({attr.name : raw})
if self and other in self.dependencies :
del self.dependencies [other]
# end def destroy_dependency
@classmethod
def example_attrs (cls, full = False) :
attrs = itertools.chain \
(cls.primary, cls.user_attr if full else cls.required)
return dict ((a.name, a.example) for a in attrs)
# end def example_attrs
@classmethod
def epk_as_kw (cls, * epk, ** kw) :
"""Dictionary with values of `epk` and values of `kw`"""
on_error = kw.pop ("on_error", None)
if epk and isinstance (epk [-1], cls.Type_Name_Type) :
epk = epk [:-1]
return dict (cls.args_as_kw (* epk, ** kw), on_error = on_error)
# end def epk_as_kw
@classmethod
def epkified (cls, * epk, ** kw) :
"""Return `epk` tuple and `kw` dictionary, no matter if `epk` values
were passed as positional or named arguments.
"""
if epk and isinstance (epk [-1], cls.Type_Name_Type) :
epk = epk [:-1]
raw = bool (kw.get ("raw", False))
epkifier = (cls.epkified_ckd, cls.epkified_raw) [raw]
### `epkified_ckd` and `epkified_raw` are created by meta machinery
try :
return epkifier (* epk, ** kw)
except TypeError as exc :
on_error = kw.pop ("on_error", None) or cls._raise_attr_error
needed = tuple (m.name for m in cls.primary_required)
missing = tuple (p for p in needed [len (epk):] if p not in kw)
if missing :
error = MOM.Error.Required_Missing \
(cls, needed, missing, epk, kw, "primary")
on_error (error)
raise error
else :
raise TypeError \
( _T ("%s needs the arguments %s, got %s instead")
% (cls.ui_name_T, needed, portable_repr (epk))
)
# end def epkified
def is_defined (self) :
"""True if all necessary attributes have substance."""
return \
( (not self.is_used)
or all (a.has_substance (self) for a in self.necessary)
)
# end def is_defined
def is_g_correct (self) :
"""True if all system predicates are satisfied."""
ews = self._pred_man.check_kind ("system", self)
return not ews
# end def is_g_correct
@TFL.Meta.Class_and_Instance_Method
def is_locked (soc) :
return soc.x_locked or soc.electric
# end def is_locked
def notify_dependencies_destroy (self) :
"""Notify all entities registered in `self.dependencies` and
`self.object_referring_attributes` about the destruction of `self`.
"""
### Use `list` because dictionaries are changed inside loop
for d in list (self.dependencies) :
d.destroy_dependency (self)
for o in list (self.object_referring_attributes) :
o.destroy_dependency (self)
# end def notify_dependencies_destroy
def register_dependency (self, other) :
"""Register that `other` depends on `self`"""
self.dependencies [other] += 1
# end def register_dependency
def restore (self, * epk, ** kw) :
"""Restore an object that was destroyed before but not committed."""
if not (self.pid and self.home_scope) :
raise TypeError \
("%r: pid %r, scope %r" % (self, self.pid, self.home_scope))
self.init_finished = False
self._init_pending = []
self.__init__ (* epk, ** kw)
self.home_scope.add (self, pid = self.pid)
return self
# end def restore
def unregister_dependency (self, other) :
"""Unregister dependency of `other` on `self`"""
deps = self.dependencies
deps [other] -= 1
if deps [other] <= 0 :
del deps [other]
# end def unregister_dependency
def user_diff (self, other, ignore = ()) :
"""Return differences in user attributes between `self` and `other`."""
result = {}
undef = TFL.Undef ()
if ignore :
ignore = set (ignore)
if self.type_name != other.type_name :
result ["type_name"] = (self.type_name, other.type_name)
pc_s = self.as_attr_pickle_cargo ()
pc_o = other.as_attr_pickle_cargo ()
for k in set (pc_s).union (pc_o) :
if k in ignore :
continue
p = pc_s.get (k, undef)
q = pc_o.get (k, undef)
if p != q :
result [k] = \
( p if p is not undef else "<Missing>"
, q if q is not undef else "<Missing>"
)
return result
# end def user_diff
def user_equal (self, other) :
"""Compare `self` and `other` concerning user attributes."""
return \
( self.type_name == other.type_name
and self.as_attr_pickle_cargo () == other.as_attr_pickle_cargo ()
)
# end def user_equal
def _destroy (self) :
self.notify_dependencies_destroy ()
# end def _destroy
def _extract_primary (self, kw) :
result = {}
for pka in self.primary :
name = pka.name
role_name = getattr (pka, "role_name", None)
if name in kw :
result [name] = kw.pop (name)
elif role_name and role_name in kw :
result [name] = kw.pop (role_name)
return result
# end def _extract_primary
def _extract_primary_ckd (self, kw) :
new_epk = []
pkas_ckd = self._extract_primary (kw)
pkas_raw = {}
for pka in self.primary :
name = pka.name
if name in pkas_ckd :
w = pkas_ckd [name]
v = pka.cooked (w)
pkas_raw [name] = pka.as_string (w)
else :
v = getattr (self, name)
new_epk.append (v)
return new_epk, pkas_raw, pkas_ckd
# end def _extract_primary_ckd
def _extract_primary_raw (self, kw) :
new_epk = []
pkas_ckd = {}
pkas_raw = self._extract_primary (kw)
for pka in self.primary :
name = pka.name
if name in pkas_raw :
pkas_ckd [name] = v = pka.from_string (pkas_raw [name], self)
else :
v = getattr (self, name)
new_epk.append (v)
return new_epk, pkas_raw, pkas_ckd
# end def _extract_primary_raw
def _init_epk (self, epk) :
return ((a.name, pka) for a, pka in zip (self.primary, epk))
# end def _init_epk
def _init_meta_attrs (self) :
self.__super._init_meta_attrs ()
self.dependencies = TFL.defaultdict (int)
self.object_referring_attributes = TFL.defaultdict (list)
# end def _init_meta_attrs
@staticmethod
def _json_encode (o) :
return dict (display = o.FO, pid = o.pid)
# end def _json_encode
def _main__init__ (self, * epk, ** kw) :
### `epk_as_kw` needs to raise errors to avoid follow-up errors
### --> override `on_error` in `kw` when calling `epk_as_kw`
self.implicit = kw.pop ("implicit", False)
raw = bool (kw.pop ("raw", self._init_raw_default))
akw = self.epk_as_kw (* epk, ** dict (kw, on_error = None))
ukw = dict (self._kw_undeprecated (akw))
pkw = self._kw_polished (ukw) if raw else ukw
checker = \
( self._kw_raw_check_predicates
if raw else self._kw_check_predicates
)
setter = self.__super._set_raw if raw else self.__super._set_ckd
### Need to use `__super.` methods here because it's not a `rename`
try :
epk, pkw = self.epkified (raw = raw, ** pkw)
self._kw_check_required (* epk, ** pkw)
except MOM.Error.Required_Missing as exc :
self._pred_man.missing_required = exc
pkw.update (self._init_epk (epk))
checker (** pkw)
raise MOM.Error.Invariants (self._pred_man)
pkw.update (self._init_epk (epk))
setter (_pred_kinds = ("object_init", "object"), ** pkw)
required_errors = self._pred_man.required_errors
if required_errors :
raise MOM.Error.Invariants (self._pred_man)
# end def _main__init__
def _rename (self, new_epk, pkas_raw, pkas_ckd) :
diffs = sum ((n != o) for n, o in paired (new_epk, self.epk [:-1]))
if diffs :
def _renamer () :
attributes = self.attributes
for k, v in pyk.iteritems (pkas_ckd) :
attr = attributes [k]
attr._set_cooked_inner (self, v)
attr._set_raw_inner (self, pkas_raw [k], v)
self._reset_epk ()
self._kw_check_predicates (on_error = None, ** pkas_ckd)
self.home_scope.rename (self, tuple (new_epk), _renamer)
return diffs
# end def _rename
def _repr (self, type_name) :
return "%s (%s)" % (type_name, ", ".join (self.epk_as_code))
# end def _repr
def _reset_epk (self) :
sd = self.__dict__
for a in ("epk", "epk_as_dict") :
if a in sd :
delattr (self, a)
# end def _reset_epk
def _set_ckd (self, _pred_kinds = None, on_error = None, ** kw) :
result = 0
if kw :
new_epk, pkas_raw, pkas_ckd = self._extract_primary_ckd (kw)
if pkas_ckd and tuple (new_epk) != self.epk [:-1] :
result += self._rename (new_epk, pkas_raw, pkas_ckd)
result += self.__super._set_ckd (_pred_kinds, on_error, ** kw)
return result
# end def _set_ckd
def _set_raw (self, _pred_kinds = None, on_error = None, ** kw) :
result = 0
if kw :
new_epk, pkas_raw, pkas_ckd = self._extract_primary_raw (kw)
if pkas_ckd and tuple (new_epk) != self.epk [:-1] :
result += self._rename (new_epk, pkas_raw, pkas_ckd)
result += self.__super._set_raw (_pred_kinds, on_error, ** kw)
return result
# end def _set_raw
def __eq__ (self, rhs) :
if isinstance (rhs, pyk.int_types) or \
( isinstance (rhs, TFL.Q_Result._Attr_)
and isinstance (rhs._VALUE, pyk.int_types)
) :
return self.pid == rhs
elif isinstance (rhs, pyk.string_types) :
try :
pid = int (rhs)
except (ValueError, TypeError) :
return False
else :
return self.pid == pid
else :
try :
rhs = (rhs.pid, rhs.home_scope.guid)
except AttributeError :
pass
return (self.pid, self.home_scope.guid) == rhs
# end def __eq__
def __ne__ (self, rhs) :
return not (self == rhs)
# end def __ne__
def __hash__ (self) :
return hash ((self.pid, self.home_scope.guid))
# end def __hash__
def __setattr__ (self, name, value) :
### If an attribute descriptor's `__get__` does not return `self`
### when accessed via the class it's `__set__` won't be used by Python
### --> call it manually here, instead
try :
attr = self.attributes [name]
except KeyError :
return self.__super.__setattr__ (name, value)
else :
attr.__set__ (self, value)
# end def __setattr__
def __str__ (self) :
epk = self.epk
if len (epk) == 1 :
format = "%s"
else :
format = "(%s)"
return format % (", ".join (self.epk_as_code))
# end def __str__
# end class Id_Entity
class _Id_Entity_Mixin_ (object) :
def __setattr__ (self, name, value) :
### Avoid `Id_Entity.__setattr__` triggering infinite recursion
object.__setattr__ (self, name, value)
# end def __setattr__
# end class _Id_Entity_Mixin_
class _Id_Entity_Reload_Mixin_ (_Id_Entity_Mixin_) :
"""Mixin triggering a reload from the database on any attribute access."""
def __getattribute__ (self, name) :
if name in ("__class__", ) :
return object.__getattribute__ (self, name)
else :
cls = object.__getattribute__ (self, "__class__")
reload = cls._RELOAD_INSTANCE
e_type = cls._RESTORE_CLASS (self)
reload (self, e_type)
return getattr (self, name)
# end def __getattribute__
@classmethod
def _RELOAD_INSTANCE (cls, self, e_type) :
raise TypeError \
( "%s needs to implement _RELOAD_INSTANCE"
% self.__class__.__bases__ [0]
)
# end def _RELOAD_INSTANCE
@classmethod
def _RESTORE_CLASS (cls, self) :
result = self.__class__ = cls.__bases__ [2]
return result
# end def _RESTORE_CLASS
@classmethod
def define_e_type (cls, e_type, mixin) :
e_type._RELOAD_E_TYPE = e_type.New \
( "_Reload"
, head_mixins = (mixin, cls)
)
# end def define_e_type
# end class _Id_Entity_Reload_Mixin_
@pyk.adapt__bool__
class _Id_Entity_Destroyed_Mixin_ (_Id_Entity_Mixin_) :
"""Mixin indicating an entity that was already destroyed."""
def __getattribute__ (self, name) :
if name in ("E_Type", "__bool__", "__class__", "__nonzero__", "__repr__") :
return object.__getattribute__ (self, name)
elif name in ("last_cid", "pid", "type_name") :
try :
### Need to reset `self.__class__` temporarily to allow
### properties to run
cls = self.__class__
self.__class__ = cls.__bases__ [1]
result = getattr (self, name)
finally :
self.__class__ = cls
return result
elif name == "restore" :
cls = self.__class__
self.__class__ = cls.__bases__ [1]
return getattr (self, name)
elif name == "destroy_dependency" :
return lambda s : True
else :
raise MOM.Error.Destroyed_Entity \
( "%r: access to attribute %r not allowed"
% (self, name)
)
# end def __getattribute__
@classmethod
def define_e_type (cls, e_type) :
e_type._DESTROYED_E_TYPE = type (cls) \
(str (e_type.type_base_name + "_Destroyed"), (cls, e_type), {})
# end def define_e_type
def __bool__ (self) :
return False
# end def __bool__
def __repr__ (self) :
### Need to reset `self.__class__` temporarily to get proper `__repr__`
try :
cls = self.__class__
self.__class__ = cls.__bases__ [1]
result = "<Destroyed entity %s>" % (self.__repr__ (), )
finally :
self.__class__ = cls
return result
# end def __repr__
# end class _Id_Entity_Destroyed_Mixin_
class MD_Entity (TFL.Meta.BaM (Entity, metaclass = MOM.Meta.M_MD_Entity)) :
"""Root class for meta-data entities, e.g., entities recording changes to
the object model.
"""
has_identity = True
is_locked = True
is_partial = True
record_changes = False
sorted_by = TFL.Sorted_By ()
x_locked = True
_sig_attr_names = ()
def _main__init__ (self, * args, ** kw) :
pass
# end def _main__init__
def _repr (self, type_name) :
return u"%s" % (type_name, )
# end def _repr
# end class MD_Entity
_Ancestor_Essence = MD_Entity
class MD_Change (_Ancestor_Essence) :
"""Meta-data about changes of the object model."""
record_changes = False
sorted_by = TFL.Sorted_By ("-cid")
spk = TFL.Meta.Alias_Property ("cid")
spk_attr_name = "cid" ### Name of `surrogate primary key` attribute
use_indices = [("type_name", "-cid")]
_sig_attr_names = ("kind", "time", "user")
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class _Derived_Attr_ (A_Attr_Type) :
class _Sync_Change_ (Attr.Kind) :
def _set_cooked_value_inner \
(self, obj, value, old_value = None) :
setattr (obj.scm_change, self.scm_name, value)
# end def _set_cooked_value_inner
def reset (self, obj) :
pass
# end def reset
# end class _Sync_Change_
kind = Attr.Internal
Kind_Mixins = (_Sync_Change_, Attr._Computed_Mixin_)
hidden_nested = 2
record_changes = False
def computed (self, obj) :
return getattr (obj.scm_change, self.scm_name, None)
# end def computed
@property
def scm_name (self) :
return self.name
# end def scm_name
# end class _Derived_Attr_
class cid (_Derived_Attr_, A_Surrogate) :
"""Change id."""
hidden_nested = 1
# end class cid
class c_time (_Derived_Attr_, A_Date_Time) :
"""Creation date and time (only for creation changes)."""
# end class c_time
class c_user (_Derived_Attr_, A_Id_Entity) :
"""User that triggered the creation change, if known."""
P_Type = "MOM.Id_Entity"
only_e_types = ("Auth.Account", "PAP.Person")
# end class c_user
class parent (A_Int) :
kind = Attr.Query
hidden_nested = 1
query = Q.parent_cid
# end class parent
class parent_cid (_Derived_Attr_, A_Int) :
"""Cid of parent change, if any."""
hidden_nested = 1
use_index = True
def computed (self, obj) :
parent = obj.scm_change.parent
if parent is not None :
return parent.cid
# end def computed
# end class parent_cid
class kind (_Derived_Attr_, A_String) :
"""Kind of change"""
max_length = 10
# end class kind
class pid (_Derived_Attr_, A_Int) :
"""Permanent id of the entity that was changed, if any."""
hidden_nested = 1
use_index = True
# end class pid
class scm_change (A_Blob) :
"""SCM.Change instance describing the change."""
kind = Attr.Internal
record_changes = False
class Pickler (TFL.Meta.Object) :
Type = _A_Binary_String_
@classmethod
def as_cargo (cls, attr_kind, attr_type, value) :
if value is not None :
return value.as_pickle ()
# end def as_cargo
@classmethod
def from_cargo (cls, scope, attr_kind, attr_type, cargo) :
if cargo is not None :
return MOM.SCM.Change._Change_.from_pickle (cargo)
# end def from_cargo
# end class Pickler
# end class scm_change
class time (_Derived_Attr_, A_Date_Time) :
"""Date and time of the change."""
# end class time
class type_name (_Derived_Attr_, A_String) :
"""Name of type of the entity that was changed, if any."""
hidden_nested = 1
# end class type_name
class user (_Derived_Attr_, A_Id_Entity) :
"""User that triggered the change, if known."""
P_Type = "MOM.Id_Entity"
only_e_types = ("Auth.Account", "PAP.Person")
# end class user
# end class _Attributes
def __init__ (self, scm_change) :
self.__super.__init__ ()
self.scm_change = scm_change
# end def __init__
@property
def ui_display_format (self) :
return self.ui_display_sep.join \
( "%%(%s)s" % a.name for a in self.sig_attr
if a.get_value (self) not in (None, "")
)
# end def ui_display_format
def _repr (self, type_name) :
return u"%s [%s]: %s, %s, %s" % \
(type_name, self.kind, self.cid, self.time, self.pid)
# end def _repr
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
return getattr (self.scm_change, name)
# end def __getattr__
# end class MD_Change
### «text» ### start of documentation
Id_Entity.__doc_attr_head__ = """
`MOM.Id_Entity` provides the framework for defining essential classes and
associations. Each essential class or association is characterized by
- `essential attributes`_
- `essential predicates`_
- `class attributes`_
- `methods`_
Each instance of `Id_Entity` has a attribute :attr:`home_scope` that
refers to the :class:`~_MOM.Scope.Scope` in which the instance lives.
`Id_Entity` is normally not directly used as a base class. Instead,
`Id_Entity`'s subclasses :class:`~_MOM.Object.Object` and
:class:`~_MOM.Link.Link` serve as root classes for the hierarchies
of essential classes and associations, respectively.
.. _`essential attributes`:
**Essential Attributes**
Essential attributes are defined inside the class `_Attributes`
that is nested in `Id_Entity` (or one of its derived classes).
Any essential class derived (directly or indirectly) from `Id_Entity`
needs to define a `_Attributes` class that's derived from its
ancestors `_Attributes`. The top-most `_Attributes` class is
derived from :class:`MOM.Attr.Spec<_MOM._Attr.Spec.Spec>`.
Each essential attribute is defined by a class derived from one of
the attribute types in :mod:`MOM.Attr.Type<_MOM._Attr.Type>`.
"""
Id_Entity.__doc_attr_tail__ = """
Each essential type provides introspective properties specifying the
various categories of essential attributes defined for the type:
.. attribute:: db_attr
All attributes stored in the database
.. attribute:: edit_attr
All editable attributes
.. attribute:: id_entity_attr
All attributes referring to instances of :class:`Id_Entity`
.. attribute:: link_ref_attr
All query attributes containing links to the essential type
.. attribute:: primary
All attributes that are part of the essential primary key
.. attribute:: primary_optional
All optional attributes that are part of the essential primary key
.. attribute:: primary_required
All required attributes that are part of the essential primary key
.. attribute:: q_able
All attributes that can be used in query expressions
.. attribute:: rev_ref_attr
All query attributes containing reverse references to the essential type
.. attribute:: surrogate_attr
All attributes containing surrogate keys
.. attribute:: ui_attr
All attributes accessible in a user interface (not all of these are
editable)
.. attribute:: user_attr
All editable attributes except for the ones listed by :attr:`primary`
"""
Id_Entity.__doc_pred_head__ = """
.. _`essential predicates`:
**Essential Predicates**
Essential predicates are defined inside the class `_Predicates` that
is nested in `Id_Entity` (or one of its derived classes).
Any essential class derived (directly or indirectly) from `Id_Entity`
needs to define a `_Predicates` class that's derived from its
ancestors `_Predicates`. The top-most `_Predicates` class is
derived from :class:`MOM.Pred.Spec<_MOM._Pred.Spec.Spec>`.
Each essential predicate is defined by a class derived from one of
the predicate types in :mod:`MOM.Pred.Type<_MOM._Pred.Type>`.
`MOM.Id_Entity` defines two predicates that should not be overriden by
descendant classes:
- completely_defined
- object_correct
Please note that these two predicates are *not* to be used as examples
of how predicates should be defined. Normally, predicates define
`assertion`, not `eval_condition`! This is explained in more detail in
:mod:`MOM.Pred.Type<_MOM._Pred.Type>`.
"""
Id_Entity.__doc_pred_tail__ = """
.. _`class attributes`:
**Class Attributes**
`MOM.Id_Entity` provides a number of class attributes that control various
aspects of the use of an essential class by the framework.
.. attribute:: default_child
Specifies which child of a partial class should be used by the UI by
default. The value of this attribute is set for the partial class by
one specific derived class.
.. attribute:: deprecated_attr_names
This is a dictionary that maps deprecated names
of attributes to the currently preferred names (this is used to
allow the reading of older databases without loss of information).
.. attribute:: home_scope
The scope to which the entity belongs.
.. attribute:: is_partial
Specifies if objects/links can be created for the essential
class in question.
`is_partial` must be explicitly set to `True` for each essential
class that doesn't allow the creation of objects or links. If
`is_partial` isn't defined for a class, `False` is assumed.
.. attribute:: max_count
Restricts the number of instances that can be created.
.. attribute:: PNS
The package namespace in which this class is defined.
Ideally, each package namespace defining essential classes defines a
common root for these, e.g., `SPN.Entity`, that defines
`PNS`, e.g., ::
class _SPN_Entity_ (MOM.Id_Entity) :
_real_name = "Entity"
PNS = SPN
...
.. attribute:: recordable_attrs
Set of attributes stored in the database for the entity.
.. attribute:: record_changes
Changes of the entity will only be recorded if `record_changes` is True.
.. attribute:: refuse_links
This is a set of (names of) classes that must not be linked
to instances of the essential class in question. This can be used if
objects of a derived class should not participate in associations of
a base class.
.. attribute:: show_in_ui
Class is shown in the UI only if `show_in_ui` is True.
`show_in_ui` is not inherited --- it must be set to `False` for every
single class that shouldn't be shown in the UI. Alternatively, a class
can set `show_in_ui_T` to False to force its descendents `show_in_ui` to
`False`.
The meta machinery modifies `show_in_ui` by combining it with
`record_changes`.
.. attribute:: show_package_prefix
Specifies whether the class name should be prefixed by the name of
the package namespace in the UI.
.. attribute:: tutorial
Describes why and how to define instances of the essential class and
is used in step-by-step tutorials.
.. _`methods`:
**Methods and Properties**
"""
__doc__ = """
"""
if __name__ != "__main__" :
MOM._Export ("*", "_Id_Entity_Reload_Mixin_", "_Id_Entity_Destroyed_Mixin_")
### __END__ MOM.Entity | PypiClean |
/NehorayRapid1-0.0.1-py3-none-any.whl/RapidBaseNe/Basic_Import_Libs.py | import easydict
from easydict import EasyDict
import PIL
from PIL import Image
import argparse
import os
import numpy
import numpy as np
import math
import glob
from glob import glob
import random
import importlib
import collections
from collections import OrderedDict
import sys
from datetime import datetime
import cv2
#from skimage.measure import compare_ssim
from shutil import get_terminal_size
import pickle
import lmdb
import ctypes # An included library with Python install.
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
plt.switch_backend('TkAgg')
# plt.switch_backend('WXAgg')
from operator import contains
from functools import partial
from itertools import filterfalse
from argparse import ArgumentParser
from pathlib import Path
from sys import argv
from sys import stdout
import kornia
import einops
import time
import shapely
from shapely.geometry import Point #TODO: understand what's going on!!?!!!?
import networkx
import re
from csv import reader
import tarfile
import copy
from copy import deepcopy
#import pydot
#import psutil
import shutil
from inspect import signature
import numpngw
# import graphviz
# import pydot_ng
length = len #use length instead of len.... make sure it doesn't cause problems
from tqdm import tqdm
from easydict import EasyDict
from easydict import EasyDict as edict
from scipy.io import loadmat
import scipy.signal as signal
import numpy as np
import io
import skimage
# Message Box:
def message_box(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
import pymsgbox
from pymsgbox import alert as alert_box
from pymsgbox import confirm as confirm_box
from pymsgbox import prompt as prompt_box
#(2). TorchVision (add FastAi stuff which are much faster and more intuitive as far as augmentations)
import torchvision
import torchvision.transforms as transforms
#(3). Torch Utils:
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision import datasets
from torch.autograd import Variable
#(4). Torch NN:
import torch
import torch.fft
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as LS
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data
#(5). More Torch Stuff:
from torch.nn.functional import normalize
from torch.nn.parameter import Parameter
from torch.optim import lr_scheduler
import torch.utils.data as data
from torch.optim import Adam
import torch.cuda as cuda
#(7). TensorBoard:
import tensorflow as tf
import tensorboard as TB
import tensorboardX as TBX
from torchvision import transforms
# from torchvision_x.transforms import functional as torchvisionX_F
from tensorboardX import SummaryWriter
import re
import functools | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/code_generation/CoroutineCodes.py | from .CodeHelpers import (
generateChildExpressionsCode,
generateStatementSequenceCode,
withObjectCodeTemporaryAssignment,
)
from .Emission import SourceCodeCollector
from .ErrorCodes import getErrorExitCode
from .FunctionCodes import (
finalizeFunctionLocalVariables,
getClosureCopyCode,
getFunctionCreationArgs,
getFunctionQualnameObj,
setupFunctionLocalVariables,
)
from .Indentation import indented
from .LineNumberCodes import emitLineNumberUpdateCode
from .ModuleCodes import getModuleAccessCode
from .templates.CodeTemplatesCoroutines import (
template_coroutine_exception_exit,
template_coroutine_noexception_exit,
template_coroutine_object_body,
template_coroutine_object_maker,
template_coroutine_return_exit,
template_make_coroutine,
)
from .YieldCodes import getYieldReturnDispatchCode
def _getCoroutineMakerIdentifier(function_identifier):
return "MAKE_COROUTINE_" + function_identifier
def getCoroutineObjectDeclCode(function_identifier, closure_variables):
coroutine_creation_args = getFunctionCreationArgs(
defaults_name=None,
kw_defaults_name=None,
annotations_name=None,
closure_variables=closure_variables,
)
return template_coroutine_object_maker % {
"coroutine_maker_identifier": _getCoroutineMakerIdentifier(function_identifier),
"coroutine_creation_args": ", ".join(coroutine_creation_args),
}
def getCoroutineObjectCode(
context,
function_identifier,
closure_variables,
user_variables,
outline_variables,
temp_variables,
needs_exception_exit,
needs_generator_return,
):
# A bit of details going on here, pylint: disable=too-many-locals
setupFunctionLocalVariables(
context=context,
parameters=None,
closure_variables=closure_variables,
user_variables=user_variables + outline_variables,
temp_variables=temp_variables,
)
function_codes = SourceCodeCollector()
coroutine_object_body = context.getOwner()
generateStatementSequenceCode(
statement_sequence=coroutine_object_body.subnode_body,
allow_none=True,
emit=function_codes,
context=context,
)
function_cleanup = finalizeFunctionLocalVariables(context)
if needs_exception_exit:
(
exception_type,
exception_value,
exception_tb,
_exception_lineno,
) = context.variable_storage.getExceptionVariableDescriptions()
generator_exit = template_coroutine_exception_exit % {
"function_cleanup": indented(function_cleanup),
"exception_type": exception_type,
"exception_value": exception_value,
"exception_tb": exception_tb,
}
else:
generator_exit = template_coroutine_noexception_exit % {
"function_cleanup": indented(function_cleanup)
}
if needs_generator_return:
generator_exit += template_coroutine_return_exit % {
"return_value": context.getReturnValueName()
}
function_locals = context.variable_storage.makeCFunctionLevelDeclarations()
local_type_decl = context.variable_storage.makeCStructLevelDeclarations()
function_locals += context.variable_storage.makeCStructInits()
if local_type_decl:
heap_declaration = """\
struct %(function_identifier)s_locals *coroutine_heap = \
(struct %(function_identifier)s_locals *)coroutine->m_heap_storage;""" % {
"function_identifier": function_identifier
}
else:
heap_declaration = ""
coroutine_creation_args = getFunctionCreationArgs(
defaults_name=None,
kw_defaults_name=None,
annotations_name=None,
closure_variables=closure_variables,
)
return template_coroutine_object_body % {
"function_identifier": function_identifier,
"function_body": indented(function_codes.codes),
"heap_declaration": indented(heap_declaration),
"function_local_types": indented(local_type_decl),
"function_var_inits": indented(function_locals),
"function_dispatch": indented(getYieldReturnDispatchCode(context)),
"coroutine_maker_identifier": _getCoroutineMakerIdentifier(function_identifier),
"coroutine_creation_args": ", ".join(coroutine_creation_args),
"coroutine_exit": generator_exit,
"coroutine_module": getModuleAccessCode(context),
"coroutine_name_obj": context.getConstantCode(
constant=coroutine_object_body.getFunctionName()
),
"coroutine_qualname_obj": getFunctionQualnameObj(
coroutine_object_body, context
),
"code_identifier": context.getCodeObjectHandle(
code_object=coroutine_object_body.getCodeObject()
),
"closure_name": "closure" if closure_variables else "NULL",
"closure_count": len(closure_variables),
}
def generateMakeCoroutineObjectCode(to_name, expression, emit, context):
coroutine_object_body = expression.subnode_coroutine_ref.getFunctionBody()
closure_variables = expression.getClosureVariableVersions()
closure_name, closure_copy = getClosureCopyCode(
closure_variables=closure_variables, context=context
)
args = []
if closure_name:
args.append(closure_name)
emit(
template_make_coroutine
% {
"to_name": to_name,
"coroutine_maker_identifier": _getCoroutineMakerIdentifier(
coroutine_object_body.getCodeName()
),
"args": ", ".join(str(arg) for arg in args),
"closure_copy": indented(closure_copy, 0, True),
}
)
context.addCleanupTempName(to_name)
def generateAsyncWaitCode(to_name, expression, emit, context):
emitLineNumberUpdateCode(expression, emit, context)
(value_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
if expression.isExpressionAsyncWaitEnter():
wait_kind = "await_enter"
elif expression.isExpressionAsyncWaitExit():
wait_kind = "await_exit"
else:
wait_kind = "await_normal"
emit("%s = ASYNC_AWAIT(%s, %s);" % (to_name, value_name, wait_kind))
getErrorExitCode(
check_name=to_name, release_name=value_name, emit=emit, context=context
)
context.addCleanupTempName(to_name)
def generateAsyncIterCode(to_name, expression, emit, context):
(value_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
with withObjectCodeTemporaryAssignment(
to_name, "aiter_result", expression, emit, context
) as result_name:
emit("%s = ASYNC_MAKE_ITERATOR(%s);" % (result_name, value_name))
getErrorExitCode(
check_name=result_name, release_name=value_name, emit=emit, context=context
)
context.addCleanupTempName(result_name)
def generateAsyncNextCode(to_name, expression, emit, context):
(value_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
with withObjectCodeTemporaryAssignment(
to_name, "anext_result", expression, emit, context
) as result_name:
emit("%s = ASYNC_ITERATOR_NEXT(%s);" % (result_name, value_name))
getErrorExitCode(
check_name=result_name, release_name=value_name, emit=emit, context=context
)
context.addCleanupTempName(result_name) | PypiClean |
/Flask-Monitoring-1.1.2.tar.gz/Flask-Monitoring-1.1.2/flask_monitoringdashboard/database/data_grouped.py | from numpy import median
from flask_monitoringdashboard.database import Request
def get_data_grouped(db_session, column, func, *where):
""" Return the data for a specific endpoint. The result is grouped on column
:param db_session: session for the database
:param column: the column that is used for grouping
:param func: the function to reduce the data
:param where: additional where clause
"""
result = db_session.query(column, Request.duration).filter(*where).order_by(column).all()
# result is now a list of tuples per request.
return group_result(result, func)
def group_result(result, func):
"""
:param result: A list of rows from the database: e.g. [(key, data1), (key, data2)]
:param func: the function to reduce the data e.g. func=median
:return: the data that is reduced. e.g. [(key, (data1+data2)/2)]
"""
data = {}
for key, value in result:
if key in data.keys():
data[key].append(value)
else:
data[key] = [value]
for key in data:
data[key] = func(data[key])
return data.items()
def group_result_endpoint(result, func):
"""
:param result: A list of rows from the database: e.g. [(key, data1), (key, data2)]
:param func: the function to reduce the data e.g. func=median
:return: the data that is reduced. e.g. [(key, (data1+data2)/2)]
"""
data = {}
for key, value in result:
if key.endpoint.name in data.keys():
data[key.endpoint.name].append(value)
else:
data[key.endpoint.name] = [value]
for key in data:
data[key] = func(data[key])
return data.items()
def get_endpoint_data_grouped(db_session, func, *where):
"""
:param db_session: session for the database
:param func: the function to reduce the data
:param where: additional where clause
"""
return get_data_grouped(db_session, Request.endpoint_id, func, *where)
def get_version_data_grouped(db_session, func, *where):
"""
:param db_session: session for the database
:param func: the function to reduce the data
:param where: additional where clause
"""
return get_data_grouped(db_session, Request.version_requested, func, *where)
def get_user_data_grouped(db_session, func, *where):
"""
:param db_session: session for the database
:param func: the function to reduce the data
:param where: additional where clause
"""
return get_data_grouped(db_session, Request.group_by, func, *where)
def get_two_columns_grouped(db_session, column, *where):
"""
:param db_session: session for the database
:param column: column that is used for the grouping (together with the Request.version)
:param where: additional where clause
"""
result = (
db_session.query(column, Request.version_requested, Request.duration).filter(*where).all()
)
result = [((g, v), t) for g, v, t in result]
return group_result(result, median) | PypiClean |
/BladePy-0.1.3.zip/BladePy-0.1.3/bladepy_packages/data_structure/case_model.py | from PyQt4 import QtCore, QtGui
from occ_modules.shape_properties import shape_colorlist, shape_colordictionary, shape_colordictionaryhex
class CaseModel(QtCore.QAbstractItemModel):
"""
This is a creation of a model for the tree view list.
It inherits the basic QAbstractItemModel classThe QAbstractItemModel class defines the standard interface that
item models must use to be able to inter-operate with other components in the model/view architecture. It is not
supposed to be instantiated directly. Instead, you should subclass it to create new models. This model will be
the one used by a a data_structure.case_node.CaseNode object.
When subclassing QAbstractItemModel, at the very least you must implement index(), parent(), rowCount(),
columnCount(), and data(). These functions are used in all read-only models, and form the basis of editable
models. More info: http://doc.qt.io/qt-5/qabstractitemmodel.html#details
"""
def __init__(self, root, parent=None):
super(CaseModel, self).__init__(parent)
self._rootNode = root
def rowCount(self, parent):
"""
Returns the number of rows under the given parent. When the parent is valid it means that rowCount is returning
the number of children of parent.
"""
if not parent.isValid():
parent_node = self._rootNode
else:
parent_node = parent.internalPointer()
return parent_node.childCount()
def columnCount(self, parent):
"""
Returns the number of columns for the children of the given parent.
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#columncount
"""
return 2
def data(self, index, role):
"""
Returns the data stored under the given role for the item referred to by the index.
"""
if not index.isValid():
return None
node = index.internalPointer()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if index.column() == 0:
return node.name()
if index.column() == 1:
if node.tecplotIsVisible():
return node.tecplotMode()
else:
return "hidden"
if index.column() == 3:
return node.shapeQuality()
if index.column() == 4:
return node.shapeTransparency()
if index.column() == 5:
return node.shapeColor()
if index.column() == 6:
return node.shapeTransformation()[0]
if index.column() == 7:
return node.shapeTransformation()[1]
if index.column() == 8:
return node.shapeTransformation()[2]
if index.column() == 9:
return node.shapeTransformation()[3]
if index.column() == 10:
return node.shapeTransformation()[4]
if role == QtCore.Qt.DecorationRole:
if index.column() == 0:
pixmap = QtGui.QPixmap(26, 26)
pixmap.fill(shape_colordictionaryhex[shape_colorlist[node.shapeColor()]])
icon = QtGui.QIcon(pixmap)
return icon
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""
Sets the role data for the item at index to value.
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#setdata
"""
if index.isValid():
node = index.internalPointer()
if role == QtCore.Qt.EditRole:
if index.column() == 0:
node.setName(value)
if index.column() == 3:
node.setShapeQuality(value)
if index.column() == 4:
node.setShapeTransparency(value)
if index.column() == 5:
node.setShapeColor(value)
if index.column() == 6:
node.setShapeTransformation(value, 0)
if index.column() == 7:
node.setShapeTransformation(value, 1)
if index.column() == 8:
node.setShapeTransformation(value, 2)
if index.column() == 9:
node.setShapeTransformation(value, 3)
if index.column() == 10:
node.setShapeTransformation(value, 4)
self.dataChanged.emit(index, index)
return True
return False
def headerData(self, section, orientation, role):
"""
Returns the data for the given role and section in the header with the specified orientation.
http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#headerdata
"""
if role == QtCore.Qt.DisplayRole:
if section == 0:
return "Case"
else:
return "TecPlot Disp."
def flags(self, index):
"""
Returns the item flags for the given index.
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#flags
"""
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
def parent(self, index):
"""
Returns the parent of the model item with the given index.
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#parent
"""
node = self.getNode(index)
parent_node = node.parent()
if parent_node == self._rootNode:
return QtCore.QModelIndex()
return self.createIndex(parent_node.row(), 0, parent_node)
def index(self, row, column, parent):
"""
Returns the index of the item in the model specified by the given row, column and parent index.
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#index
"""
parent_node = self.getNode(parent)
child_item = parent_node.child(row)
if child_item:
return self.createIndex(row, column, child_item)
else:
return QtCore.QModelIndex()
def getNode(self, index):
if index.isValid():
node = index.internalPointer()
if node:
return node
return self._rootNode
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
"""
Method for removing rows from the data structure
ref: http://pyqt.sourceforge.net/Docs/PyQt4/qabstractitemmodel.html#removeows
"""
parent_node = self.getNode(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
for row in range(rows):
success = parent_node.removeChild(position)
self.endRemoveRows()
return success | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/nls/dijit-all_ru.js | require({cache:{
'dijit/form/nls/ru/validate':function(){
define(
//begin v1.x content
({
invalidMessage: "Указано недопустимое значение.",
missingMessage: "Это обязательное значение.",
rangeMessage: "Это значение вне диапазона."
})
//end v1.x content
);
},
'dijit/_editor/nls/ru/commands':function(){
define(
//begin v1.x content
({
'bold': 'Полужирный',
'copy': 'Копировать',
'cut': 'Вырезать',
'delete': 'Удалить',
'indent': 'Отступ',
'insertHorizontalRule': 'Горизонтальная линейка',
'insertOrderedList': 'Нумерованный список',
'insertUnorderedList': 'Список с маркерами',
'italic': 'Курсив',
'justifyCenter': 'По центру',
'justifyFull': 'По ширине',
'justifyLeft': 'По левому краю',
'justifyRight': 'По правому краю',
'outdent': 'Втяжка',
'paste': 'Вставить',
'redo': 'Повторить',
'removeFormat': 'Удалить формат',
'selectAll': 'Выбрать все',
'strikethrough': 'Перечеркивание',
'subscript': 'Нижний индекс',
'superscript': 'Верхний индекс',
'underline': 'Подчеркивание',
'undo': 'Отменить',
'unlink': 'Удалить ссылку',
'createLink': 'Создать ссылку',
'toggleDir': 'Изменить направление',
'insertImage': 'Вставить изображение',
'insertTable': 'Вставить/изменить таблицу',
'toggleTableBorder': 'Переключить рамку таблицы',
'deleteTable': 'Удалить таблицу',
'tableProp': 'Свойства таблицы',
'htmlToggle': 'Код HTML',
'foreColor': 'Цвет текста',
'hiliteColor': 'Цвет фона',
'plainFormatBlock': 'Стиль абзаца',
'formatBlock': 'Стиль абзаца',
'fontSize': 'Размер шрифта',
'fontName': 'Название шрифта',
'tabIndent': 'Табуляция',
"fullScreen": "Переключить полноэкранный режим",
"viewSource": "Показать исходный код HTML",
"print": "Печать",
"newPage": "Создать страницу",
/* Error messages */
'systemShortcut': 'Действие "${0}" можно выполнить в браузере только путем нажатия клавиш ${1}.'
})
//end v1.x content
);
},
'dojo/cldr/nls/ru/gregorian':function(){
define(
//begin v1.x content
{
"dateFormatItem-yM": "M.y",
"field-dayperiod": "AM/PM",
"field-minute": "Минута",
"eraNames": [
"до н.э.",
"н.э."
],
"dateFormatItem-MMMEd": "ccc, d MMM",
"field-day-relative+-1": "Вчера",
"field-weekday": "День недели",
"dateFormatItem-yQQQ": "y QQQ",
"field-day-relative+-2": "Позавчера",
"dateFormatItem-MMdd": "dd.MM",
"days-standAlone-wide": [
"Воскресенье",
"Понедельник",
"Вторник",
"Среда",
"Четверг",
"Пятница",
"Суббота"
],
"dateFormatItem-MMM": "LLL",
"months-standAlone-narrow": [
"Я",
"Ф",
"М",
"А",
"М",
"И",
"И",
"А",
"С",
"О",
"Н",
"Д"
],
"field-era": "Эра",
"field-hour": "Час",
"quarters-standAlone-abbr": [
"1-й кв.",
"2-й кв.",
"3-й кв.",
"4-й кв."
],
"dateFormatItem-yyMMMEEEd": "EEE, d MMM yy",
"dateFormatItem-y": "y",
"timeFormat-full": "H:mm:ss zzzz",
"dateFormatItem-yyyy": "y",
"months-standAlone-abbr": [
"янв.",
"февр.",
"март",
"апр.",
"май",
"июнь",
"июль",
"авг.",
"сент.",
"окт.",
"нояб.",
"дек."
],
"dateFormatItem-Ed": "E, d",
"dateFormatItem-yMMM": "LLL y",
"field-day-relative+0": "Сегодня",
"dateFormatItem-yyyyLLLL": "LLLL y",
"field-day-relative+1": "Завтра",
"days-standAlone-narrow": [
"В",
"П",
"В",
"С",
"Ч",
"П",
"С"
],
"eraAbbr": [
"до н.э.",
"н.э."
],
"field-day-relative+2": "Послезавтра",
"dateFormatItem-yyyyMM": "MM.yyyy",
"dateFormatItem-yyyyMMMM": "LLLL y",
"dateFormat-long": "d MMMM y 'г'.",
"timeFormat-medium": "H:mm:ss",
"field-zone": "Часовой пояс",
"dateFormatItem-Hm": "H:mm",
"dateFormat-medium": "dd.MM.yyyy",
"dateFormatItem-yyMM": "MM.yy",
"dateFormatItem-Hms": "H:mm:ss",
"dateFormatItem-yyMMM": "LLL yy",
"quarters-standAlone-wide": [
"1-й квартал",
"2-й квартал",
"3-й квартал",
"4-й квартал"
],
"dateFormatItem-ms": "mm:ss",
"dateFormatItem-yyyyQQQQ": "QQQQ y 'г'.",
"field-year": "Год",
"months-standAlone-wide": [
"Январь",
"Февраль",
"Март",
"Апрель",
"Май",
"Июнь",
"Июль",
"Август",
"Сентябрь",
"Октябрь",
"Ноябрь",
"Декабрь"
],
"field-week": "Неделя",
"dateFormatItem-MMMd": "d MMM",
"dateFormatItem-yyQ": "Q yy",
"timeFormat-long": "H:mm:ss z",
"months-format-abbr": [
"янв.",
"февр.",
"марта",
"апр.",
"мая",
"июня",
"июля",
"авг.",
"сент.",
"окт.",
"нояб.",
"дек."
],
"timeFormat-short": "H:mm",
"dateFormatItem-H": "H",
"field-month": "Месяц",
"quarters-format-abbr": [
"1-й кв.",
"2-й кв.",
"3-й кв.",
"4-й кв."
],
"days-format-abbr": [
"вс",
"пн",
"вт",
"ср",
"чт",
"пт",
"сб"
],
"dateFormatItem-M": "L",
"days-format-narrow": [
"В",
"П",
"В",
"С",
"Ч",
"П",
"С"
],
"field-second": "Секунда",
"field-day": "День",
"dateFormatItem-MEd": "E, d.M",
"months-format-narrow": [
"Я",
"Ф",
"М",
"А",
"М",
"И",
"И",
"А",
"С",
"О",
"Н",
"Д"
],
"days-standAlone-abbr": [
"Вс",
"Пн",
"Вт",
"Ср",
"Чт",
"Пт",
"Сб"
],
"dateFormat-short": "dd.MM.yy",
"dateFormatItem-yMMMEd": "E, d MMM y",
"dateFormat-full": "EEEE, d MMMM y 'г'.",
"dateFormatItem-Md": "d.M",
"dateFormatItem-yMEd": "EEE, d.M.y",
"months-format-wide": [
"января",
"февраля",
"марта",
"апреля",
"мая",
"июня",
"июля",
"августа",
"сентября",
"октября",
"ноября",
"декабря"
],
"dateFormatItem-d": "d",
"quarters-format-wide": [
"1-й квартал",
"2-й квартал",
"3-й квартал",
"4-й квартал"
],
"days-format-wide": [
"воскресенье",
"понедельник",
"вторник",
"среда",
"четверг",
"пятница",
"суббота"
],
"eraNarrow": [
"до н.э.",
"н.э."
]
}
//end v1.x content
);
},
'dijit/nls/ru/loading':function(){
define(
//begin v1.x content
({
loadingState: "Загрузка...",
errorState: "Извините, возникла ошибка"
})
//end v1.x content
);
},
'dojo/nls/ru/colors':function(){
define(
//begin v1.x content
({
// local representation of all CSS3 named colors, companion to dojo.colors. To be used where descriptive information
// is required for each color, such as a palette widget, and not for specifying color programatically.
//Note: due to the SVG 1.0 spec additions, some of these are alternate spellings for the same color e.g. gray vs. gray.
//TODO: should we be using unique rgb values as keys instead and avoid these duplicates, or rely on the caller to do the reverse mapping?
aliceblue: "серо-голубой",
antiquewhite: "белый антик",
aqua: "зеленовато-голубой",
aquamarine: "аквамарин",
azure: "лазурный",
beige: "бежевый",
bisque: "бисквитный",
black: "черный",
blanchedalmond: "светло-миндальный",
blue: "синий",
blueviolet: "сине-фиолетовый",
brown: "коричневый",
burlywood: "светло-коричневый",
cadetblue: "серо-синий",
chartreuse: "желто-салатный",
chocolate: "шоколадный",
coral: "коралловый",
cornflowerblue: "фиолетово-синий",
cornsilk: "шелковый оттенок",
crimson: "малиновый",
cyan: "циан",
darkblue: "темно-синий",
darkcyan: "темный циан",
darkgoldenrod: "темно-золотистый",
darkgray: "темно-серый",
darkgreen: "темно-зеленый",
darkgrey: "темно-серый", // same as darkgray
darkkhaki: "темный хаки",
darkmagenta: "темно-пурпурный",
darkolivegreen: "темно-оливковый",
darkorange: "темно-оранжевый",
darkorchid: "темный орсель",
darkred: "темно-красный",
darksalmon: "темно-лососевый",
darkseagreen: "темный морской волны",
darkslateblue: "темный грифельно-синий",
darkslategray: "темный грифельно-серый",
darkslategrey: "темный грифельно-серый", // same as darkslategray
darkturquoise: "темный бирюзовый",
darkviolet: "темно-фиолетовый",
deeppink: "темно-розовый",
deepskyblue: "темный небесно-голубой",
dimgray: "тускло-серый",
dimgrey: "тускло-серый", // same as dimgray
dodgerblue: "бледно-синий",
firebrick: "кирпичный",
floralwhite: "цветочно-белый",
forestgreen: "зеленый лесной",
fuchsia: "фуксин",
gainsboro: "бледно-серый",
ghostwhite: "призрачно-белый",
gold: "золотой",
goldenrod: "золотистый",
gray: "серый",
green: "зеленый",
greenyellow: "зелено-желтый",
grey: "серый", // same as gray
honeydew: "медовый",
hotpink: "красно-розовый",
indianred: "индийский красный",
indigo: "индиго",
ivory: "слоновой кости",
khaki: "хаки",
lavender: "бледно-лиловый",
lavenderblush: "розовато-лиловый",
lawngreen: "зеленая лужайка",
lemonchiffon: "бледно-лимонный",
lightblue: "светло-синий",
lightcoral: "светло-коралловый",
lightcyan: "светлый циан",
lightgoldenrodyellow: "светло-золотистый",
lightgray: "светло-серый",
lightgreen: "светло-зеленый",
lightgrey: "светло-серый", // same as lightgray
lightpink: "светло-розовый",
lightsalmon: "светло-лососевый",
lightseagreen: "светлый морской волны",
lightskyblue: "светлый небесно-голубой",
lightslategray: "светлый грифельно-серый",
lightslategrey: "светлый грифельно-серый", // same as lightslategray
lightsteelblue: "светлый стальной",
lightyellow: "светло-желтый",
lime: "лайм",
limegreen: "зеленый лайм",
linen: "хлопковый",
magenta: "пурпурный",
maroon: "темно-бордовый",
mediumaquamarine: "нейтральный аквамарин",
mediumblue: "нейтральный синий",
mediumorchid: "нейтральный орсель",
mediumpurple: "нейтральный фиолетовый",
mediumseagreen: "нейтральный морской волны",
mediumslateblue: "нейтральный грифельно-синий",
mediumspringgreen: "нейтральный весенне-зеленый",
mediumturquoise: "нейтральный бирюзовый",
mediumvioletred: "нейтральный фиолетово-красный",
midnightblue: "полуночно-синий",
mintcream: "мятно-кремовый",
mistyrose: "блекло-розовый",
moccasin: "мокасин",
navajowhite: "белый навахо",
navy: "темно-синий",
oldlace: "матово-белый",
olive: "оливковый",
olivedrab: "желтовато-серый",
orange: "оранжевый",
orangered: "оранжево-красный",
orchid: "орсель",
palegoldenrod: "бледно-золотистый",
palegreen: "бледно-зеленый",
paleturquoise: "бледно-бирюзовый",
palevioletred: "бледный фиолетово-красный",
papayawhip: "черенок папайи",
peachpuff: "персиковый",
peru: "перу",
pink: "розовый",
plum: "сливовый",
powderblue: "пороховой",
purple: "фиолетовый",
red: "красный",
rosybrown: "розово-коричневый",
royalblue: "королевский голубой",
saddlebrown: "кожано-коричневый",
salmon: "лососевый",
sandybrown: "коричнево-песчаный",
seagreen: "морской волны",
seashell: "морская раковина",
sienna: "охра",
silver: "серебристый",
skyblue: "небесно-голубой",
slateblue: "грифельно-синий",
slategray: "грифельно-серый",
slategrey: "грифельно-серый", // same as slategray
snow: "белоснежный",
springgreen: "весенний зеленый",
steelblue: "стальной",
tan: "рыжевато-коричневый",
teal: "чирок",
thistle: "чертополох",
tomato: "помидор",
transparent: "прозрачный",
turquoise: "бирюзовый",
violet: "фиолетовый",
wheat: "пшеница",
white: "белый",
whitesmoke: "дымчато-белый",
yellow: "желтый",
yellowgreen: "желто-зеленый"
})
//end v1.x content
);
},
'dojo/cldr/nls/ru/number':function(){
define(
//begin v1.x content
{
"group": " ",
"percentSign": "%",
"exponential": "E",
"scientificFormat": "#E0",
"percentFormat": "#,##0 %",
"list": ";",
"infinity": "∞",
"patternDigit": "#",
"minusSign": "-",
"decimal": ",",
"nativeZeroDigit": "0",
"perMille": "‰",
"decimalFormat": "#,##0.###",
"currencyFormat": "#,##0.00 ¤",
"plusSign": "+"
}
//end v1.x content
);
},
'dijit/_editor/nls/ru/FontChoice':function(){
define(
"dijit/_editor/nls/ru/FontChoice", //begin v1.x content
({
fontSize: "Размер",
fontName: "Шрифт",
formatBlock: "Формат",
serif: "с засечками",
"sans-serif": "без засечек",
monospace: "непропорциональный",
cursive: "курсив",
fantasy: "артистический",
noFormat: "Нет",
p: "Абзац",
h1: "Заголовок",
h2: "Подзаголовок",
h3: "Вложенный подзаголовок",
pre: "Заранее отформатированный",
1: "самый маленький",
2: "очень маленький",
3: "маленький",
4: "средний",
5: "большой",
6: "очень большой",
7: "самый большой"
})
//end v1.x content
);
},
'dojo/cldr/nls/ru/currency':function(){
define(
//begin v1.x content
{
"HKD_displayName": "Гонконгский доллар",
"CHF_displayName": "Швейцарский франк",
"CAD_displayName": "Канадский доллар",
"CNY_displayName": "Юань Ренминби",
"USD_symbol": "$",
"AUD_displayName": "Австралийский доллар",
"JPY_displayName": "Японская иена",
"USD_displayName": "Доллар США",
"GBP_displayName": "Английский фунт стерлингов",
"EUR_displayName": "Евро"
}
//end v1.x content
);
},
'dijit/form/nls/ru/ComboBox':function(){
define(
//begin v1.x content
({
previousMessage: "Предыдущие варианты",
nextMessage: "Следующие варианты"
})
//end v1.x content
);
},
'dijit/nls/ru/common':function(){
define(
//begin v1.x content
({
buttonOk: "ОК",
buttonCancel: "Отмена",
buttonSave: "Сохранить",
itemClose: "Закрыть"
})
//end v1.x content
);
}}});
define("dijit/nls/dijit-all_ru", [], 1); | PypiClean |
/Fabric39-1.15.3.post1.tar.gz/Fabric39-1.15.3.post1/sites/docs/tutorial.rst | =====================
Overview and Tutorial
=====================
Welcome to Fabric!
This document is a whirlwind tour of Fabric's features and a quick guide to its
use. Additional documentation (which is linked to throughout) can be found in
the :ref:`usage documentation <usage-docs>` -- please make sure to check it out.
What is Fabric?
===============
As the ``README`` says:
.. include:: ../../README.rst
:end-before: It provides
More specifically, Fabric is:
* A tool that lets you execute **arbitrary Python functions** via the **command
line**;
* A library of subroutines (built on top of a lower-level library) to make
executing shell commands over SSH **easy** and **Pythonic**.
Naturally, most users combine these two things, using Fabric to write and
execute Python functions, or **tasks**, to automate interactions with remote
servers. Let's take a look.
Hello, ``fab``
==============
This wouldn't be a proper tutorial without "the usual"::
def hello():
print("Hello world!")
Placed in a Python module file named ``fabfile.py`` in your current working
directory, that ``hello`` function can be executed with the ``fab`` tool
(installed as part of Fabric) and does just what you'd expect::
$ fab hello
Hello world!
Done.
That's all there is to it. This functionality allows Fabric to be used as a
(very) basic build tool even without importing any of its API.
.. note::
The ``fab`` tool simply imports your fabfile and executes the function or
functions you instruct it to. There's nothing magic about it -- anything
you can do in a normal Python script can be done in a fabfile!
.. seealso:: :ref:`execution-strategy`, :doc:`/usage/tasks`, :doc:`/usage/fab`
Task arguments
==============
It's often useful to pass runtime parameters into your tasks, just as you might
during regular Python programming. Fabric has basic support for this using a
shell-compatible notation: ``<task name>:<arg>,<kwarg>=<value>,...``. It's
contrived, but let's extend the above example to say hello to you personally::
def hello(name="world"):
print("Hello %s!" % name)
By default, calling ``fab hello`` will still behave as it did before; but now
we can personalize it::
$ fab hello:name=Jeff
Hello Jeff!
Done.
Those already used to programming in Python might have guessed that this
invocation behaves exactly the same way::
$ fab hello:Jeff
Hello Jeff!
Done.
For the time being, your argument values will always show up in Python as
strings and may require a bit of string manipulation for complex types such
as lists. Future versions may add a typecasting system to make this easier.
.. seealso:: :ref:`task-arguments`
Local commands
==============
As used above, ``fab`` only really saves a couple lines of
``if __name__ == "__main__"`` boilerplate. It's mostly designed for use with
Fabric's API, which contains functions (or **operations**) for executing shell
commands, transferring files, and so forth.
Let's build a hypothetical Web application fabfile. This example scenario is
as follows: The Web application is managed via Git on a remote host
``vcshost``. On ``localhost``, we have a local clone of said Web application.
When we push changes back to ``vcshost``, we want to be able to immediately
install these changes on a remote host ``my_server`` in an automated fashion.
We will do this by automating the local and remote Git commands.
Fabfiles usually work best at the root of a project::
.
|-- __init__.py
|-- app.wsgi
|-- fabfile.py <-- our fabfile!
|-- manage.py
`-- my_app
|-- __init__.py
|-- models.py
|-- templates
| `-- index.html
|-- tests.py
|-- urls.py
`-- views.py
.. note::
We're using a Django application here, but only as an example -- Fabric is
not tied to any external codebase, save for its SSH library.
For starters, perhaps we want to run our tests and commit to our VCS so we're
ready for a deploy::
from fabric.api import local
def prepare_deploy():
local("./manage.py test my_app")
local("git add -p && git commit")
local("git push")
The output of which might look a bit like this::
$ fab prepare_deploy
[localhost] run: ./manage.py test my_app
Creating test database...
Creating tables
Creating indexes
..........................................
----------------------------------------------------------------------
Ran 42 tests in 9.138s
OK
Destroying test database...
[localhost] run: git add -p && git commit
<interactive Git add / git commit edit message session>
[localhost] run: git push
<git push session, possibly merging conflicts interactively>
Done.
The code itself is straightforward: import a Fabric API function,
`~fabric.operations.local`, and use it to run and interact with local shell
commands. The rest of Fabric's API is similar -- it's all just Python.
.. seealso:: :doc:`api/core/operations`, :ref:`fabfile-discovery`
Organize it your way
====================
Because Fabric is "just Python" you're free to organize your fabfile any way
you want. For example, it's often useful to start splitting things up into
subtasks::
from fabric.api import local
def test():
local("./manage.py test my_app")
def commit():
local("git add -p && git commit")
def push():
local("git push")
def prepare_deploy():
test()
commit()
push()
The ``prepare_deploy`` task can be called just as before, but now you can make
a more granular call to one of the sub-tasks, if desired.
Failure
=======
Our base case works fine now, but what happens if our tests fail? Chances are
we want to put on the brakes and fix them before deploying.
Fabric checks the return value of programs called via operations and will abort
if they didn't exit cleanly. Let's see what happens if one of our tests
encounters an error::
$ fab prepare_deploy
[localhost] run: ./manage.py test my_app
Creating test database...
Creating tables
Creating indexes
.............E............................
======================================================================
ERROR: testSomething (my_project.my_app.tests.MainTests)
----------------------------------------------------------------------
Traceback (most recent call last):
[...]
----------------------------------------------------------------------
Ran 42 tests in 9.138s
FAILED (errors=1)
Destroying test database...
Fatal error: local() encountered an error (return code 2) while executing './manage.py test my_app'
Aborting.
Great! We didn't have to do anything ourselves: Fabric detected the failure and
aborted, never running the ``commit`` task.
.. seealso:: :ref:`Failure handling (usage documentation) <failures>`
Failure handling
----------------
But what if we wanted to be flexible and give the user a choice? A setting
(or **environment variable**, usually shortened to **env var**) called
:ref:`warn_only` lets you turn aborts into warnings, allowing flexible error
handling to occur.
Let's flip this setting on for our ``test`` function, and then inspect the
result of the `~fabric.operations.local` call ourselves::
from __future__ import with_statement
from fabric.api import local, settings, abort
from fabric.contrib.console import confirm
def test():
with settings(warn_only=True):
result = local('./manage.py test my_app', capture=True)
if result.failed and not confirm("Tests failed. Continue anyway?"):
abort("Aborting at user request.")
[...]
In adding this new feature we've introduced a number of new things:
* The ``__future__`` import required to use ``with:`` in Python 2.5;
* Fabric's `contrib.console <fabric.contrib.console>` submodule, containing the
`~fabric.contrib.console.confirm` function, used for simple yes/no prompts;
* The `~fabric.context_managers.settings` context manager, used to apply
settings to a specific block of code;
* Command-running operations like `~fabric.operations.local` can return objects
containing info about their result (such as ``.failed``, or
``.return_code``);
* And the `~fabric.utils.abort` function, used to manually abort execution.
However, despite the additional complexity, it's still pretty easy to follow,
and is now much more flexible.
.. seealso:: :doc:`api/core/context_managers`, :ref:`env-vars`
Making connections
==================
Let's start wrapping up our fabfile by putting in the keystone: a ``deploy``
task that is destined to run on one or more remote server(s), and ensures the
code is up to date::
def deploy():
code_dir = '/srv/django/myproject'
with cd(code_dir):
run("git pull")
run("touch app.wsgi")
Here again, we introduce a handful of new concepts:
* Fabric is just Python -- so we can make liberal use of regular Python code
constructs such as variables and string interpolation;
* `~fabric.context_managers.cd`, an easy way of prefixing commands with a ``cd
/to/some/directory`` call. This is similar to `~fabric.context_managers.lcd`
which does the same locally.
* `~fabric.operations.run`, which is similar to `~fabric.operations.local` but
runs **remotely** instead of locally.
We also need to make sure we import the new functions at the top of our file::
from __future__ import with_statement
from fabric.api import local, settings, abort, run, cd
from fabric.contrib.console import confirm
With these changes in place, let's deploy::
$ fab deploy
No hosts found. Please specify (single) host string for connection: my_server
[my_server] run: git pull
[my_server] out: Already up-to-date.
[my_server] out:
[my_server] run: touch app.wsgi
Done.
We never specified any connection info in our fabfile, so Fabric doesn't know
on which host(s) the remote command should be executed. When this happens,
Fabric prompts us at runtime. Connection definitions use SSH-like "host
strings" (e.g. ``user@host:port``) and will use your local username as a
default -- so in this example, we just had to specify the hostname,
``my_server``.
Remote interactivity
--------------------
``git pull`` works fine if you've already got a checkout of your source code --
but what if this is the first deploy? It'd be nice to handle that case too and
do the initial ``git clone``::
def deploy():
code_dir = '/srv/django/myproject'
with settings(warn_only=True):
if run("test -d %s" % code_dir).failed:
run("git clone user@vcshost:/path/to/repo/.git %s" % code_dir)
with cd(code_dir):
run("git pull")
run("touch app.wsgi")
As with our calls to `~fabric.operations.local` above, `~fabric.operations.run`
also lets us construct clean Python-level logic based on executed shell
commands. However, the interesting part here is the ``git clone`` call: since
we're using Git's SSH method of accessing the repository on our Git server,
this means our remote `~fabric.operations.run` call will need to authenticate
itself.
Older versions of Fabric (and similar high level SSH libraries) run remote
programs in limbo, unable to be touched from the local end. This is
problematic when you have a serious need to enter passwords or otherwise
interact with the remote program.
Fabric 1.0 and later breaks down this wall and ensures you can always talk to
the other side. Let's see what happens when we run our updated ``deploy`` task
on a new server with no Git checkout::
$ fab deploy
No hosts found. Please specify (single) host string for connection: my_server
[my_server] run: test -d /srv/django/myproject
Warning: run() encountered an error (return code 1) while executing 'test -d /srv/django/myproject'
[my_server] run: git clone user@vcshost:/path/to/repo/.git /srv/django/myproject
[my_server] out: Cloning into /srv/django/myproject...
[my_server] out: Password: <enter password>
[my_server] out: remote: Counting objects: 6698, done.
[my_server] out: remote: Compressing objects: 100% (2237/2237), done.
[my_server] out: remote: Total 6698 (delta 4633), reused 6414 (delta 4412)
[my_server] out: Receiving objects: 100% (6698/6698), 1.28 MiB, done.
[my_server] out: Resolving deltas: 100% (4633/4633), done.
[my_server] out:
[my_server] run: git pull
[my_server] out: Already up-to-date.
[my_server] out:
[my_server] run: touch app.wsgi
Done.
Notice the ``Password:`` prompt -- that was our remote ``git`` call on our Web server, asking for the password to the Git server. We were able to type it in and the clone continued normally.
.. seealso:: :doc:`/usage/interactivity`
.. _defining-connections:
Defining connections beforehand
-------------------------------
Specifying connection info at runtime gets old real fast, so Fabric provides a
handful of ways to do it in your fabfile or on the command line. We won't cover
all of them here, but we will show you the most common one: setting the global
host list, :ref:`env.hosts <hosts>`.
:doc:`env <usage/env>` is a global dictionary-like object driving many of
Fabric's settings, and can be written to with attributes as well (in fact,
`~fabric.context_managers.settings`, seen above, is simply a wrapper for this.)
Thus, we can modify it at module level near the top of our fabfile like so::
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
env.hosts = ['my_server']
def test():
do_test_stuff()
When ``fab`` loads up our fabfile, our modification of ``env`` will execute,
storing our settings change. The end result is exactly as above: our ``deploy``
task will run against the ``my_server`` server.
This is also how you can tell Fabric to run on multiple remote systems at once:
because ``env.hosts`` is a list, ``fab`` iterates over it, calling the given
task once for each connection.
.. seealso:: :doc:`usage/env`, :ref:`host-lists`
Conclusion
==========
Our completed fabfile is still pretty short, as such things go. Here it is in
its entirety::
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
env.hosts = ['my_server']
def test():
with settings(warn_only=True):
result = local('./manage.py test my_app', capture=True)
if result.failed and not confirm("Tests failed. Continue anyway?"):
abort("Aborting at user request.")
def commit():
local("git add -p && git commit")
def push():
local("git push")
def prepare_deploy():
test()
commit()
push()
def deploy():
code_dir = '/srv/django/myproject'
with settings(warn_only=True):
if run("test -d %s" % code_dir).failed:
run("git clone user@vcshost:/path/to/repo/.git %s" % code_dir)
with cd(code_dir):
run("git pull")
run("touch app.wsgi")
This fabfile makes use of a large portion of Fabric's feature set:
* defining fabfile tasks and running them with :doc:`fab <usage/fab>`;
* calling local shell commands with `~fabric.operations.local`;
* modifying env vars with `~fabric.context_managers.settings`;
* handling command failures, prompting the user, and manually aborting;
* and defining host lists and `~fabric.operations.run`-ning remote commands.
However, there's still a lot more we haven't covered here! Please make sure you
follow the various "see also" links, and check out the documentation table of
contents on :doc:`the main index page <index>`.
Thanks for reading!
| PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/views/decorators/debug.py | from functools import wraps
from django.http import HttpRequest
def sensitive_variables(*variables):
"""
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case consider all
variables are sensitive:
@sensitive_variables()
def my_function()
...
"""
if len(variables) == 1 and callable(variables[0]):
raise TypeError(
"sensitive_variables() must be called to use it as a decorator, "
"e.g., use @sensitive_variables(), not @sensitive_variables."
)
def decorator(func):
@wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = "__ALL__"
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator
def sensitive_post_parameters(*parameters):
"""
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case consider all
variables are sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
if len(parameters) == 1 and callable(parameters[0]):
raise TypeError(
"sensitive_post_parameters() must be called to use it as a "
"decorator, e.g., use @sensitive_post_parameters(), not "
"@sensitive_post_parameters."
)
def decorator(view):
@wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
if not isinstance(request, HttpRequest):
raise TypeError(
"sensitive_post_parameters didn't receive an HttpRequest "
"object. If you are decorating a classmethod, make sure "
"to use @method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = "__ALL__"
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/deploy/environment.py | import logging as _logging
import sys as _sys
import graphlab as _gl
from . import _internal_utils
from ._artifact import Artifact as _Artifact
from graphlab.util import _raise_error_if_not_of_type
# We keep track of version.
# -----------------------------------------------------------------------------
#
# Version 1: GLC 1.3
# --------------------
# Everything starts from scratch here. Everything before V1.3 is not version
# controlled and hence, will not be fowards compatible.
#
#
__LOGGER__ = _logging.getLogger(__name__)
class _Environment(_Artifact):
"""Base class for environments for running Tasks / Jobs."""
# All environments must have these parameters.
_typename = 'Environment'
_env_type = None
def __init__(self, name, session_aware=True):
"""
Constructor for base Environment, should not be instantiated directly.
"""
if not name:
raise TypeError("Name is required when creating an Environment.")
if _sys.version_info.major == 3:
_raise_error_if_not_of_type(name, [str], 'name')
else:
_raise_error_if_not_of_type(name, [str, unicode], 'name')
self._session = _gl.deploy._default_session
self.name = name
self._env_type = type(self).__name__
self._modified_since_last_saved = None
if session_aware:
self._session.register(self)
def clone(self, name):
"""
Clones environment and returns the cloned environment.
Returns
-------
out : Environment
Cloned environment.
Notes
-----
Cloned environment cannot be saved unless it is renamed.
"""
raise NotImplementedError
def is_running(self):
'''
Returns whether or not the environment actually starts
'''
raise NotImplementedError
def save(self):
"""
Saves the environment to the session. Can be retrieved using
gl.deploy.environments[`name`] where `name` is the name of the job.
See Also
--------
graphlab.deploy.environments
Examples
--------
>>> environment.save()
"""
self._session.save(self)
return self
def get_num_workers(self):
'''
Return number of workers that can be used to process jobs in parallel
'''
return 1
def __getstate__(self):
odict = dict.copy(self.__dict__)
if '_session' in odict:
del odict['_session']
return odict
def __repr__(self):
return self.__str__()
def __str__(self):
return "Environment: [\"name\": %s, \"type\": %s]" % (self.name,
self._env_type)
def __to_dict__(self):
json_dict = {}
json_dict["name"] = self.name
json_dict["type"] = self._env_type
return json_dict
class LocalAsync(_Environment):
"""
Environment for executing jobs in the background (asynchronously) on your
local machine.
Parameters
----------
name : str
The name for the environment.
session_aware : str (default True)
Register this environment with the session (recommended). When
registered, the environment objects can be retrieved from the session.
See Also
--------
Local
Examples
--------
>>> async = graphlab.deploy.environment.LocalAsync('async')
"""
_LOCAL_ASYNC_ENV_VERSION = 2
def __init__(self, name, session_aware=True):
super(LocalAsync, self).__init__(name, session_aware)
def __str__(self):
return "LocalAsync: [\"name\": %s]" % (self.name)
def _get_version(self):
return self._LOCAL_ASYNC_ENV_VERSION
@classmethod
def _load_version(cls, unpickler, version):
"""
An abstract function to implement save for the object in consideration.
Parameters
----------
pickler : A GLUnpickler file handle.
version : Version number.
"""
# Load the dump.
obj = unpickler.load()
# Construct a new object.
new = cls(obj.name, session_aware = False)
assert obj._get_version() <= new._get_version()
# Now copy over the useful parts of the dump.
lst = ['_env_type']
_internal_utils.copy_attributes(new, obj, lst)
return new
def is_running(self):
'''
Returns whether or not the environment actually starts
'''
return True
def clone(self, name):
"""
Create a safe clone of this object.
Returns
-------
out : Environment
Cloned environment object.
Notes
-----
- Cloned environment cannot be saved unless it is renamed.
"""
new = LocalAsync.__new__(LocalAsync)
new.name = name
new._modified_since_last_saved = self._modified_since_last_saved
new._env_type = self._env_type
new._session = self._session
return new
class Local(_Environment):
"""
Environment for executing jobs locally on this machine (not in the background).
Parameters
----------
name : str
The name for the environment.
session_aware : str (default True)
Register this environment with the session (recommended). When
registered, the environment objects can be retrieved from the session.
See Also
--------
LocalAsync
Examples
--------
>>> local = graphlab.deploy.environment.Local('local')
"""
_LOCAL_ENV_VERSION = 2
def __init__(self, name, session_aware=True):
super(Local, self).__init__(name, session_aware)
def __str__(self):
return "Local: [\"name\": %s]" % (self.name)
def _get_version(self):
return self._LOCAL_ENV_VERSION
@classmethod
def _load_version(cls, unpickler, version):
"""
An abstract function to implement save for the object in consideration.
Parameters
----------
pickler : A GLUnpickler file handle.
version : Version number.
"""
# Load the dump.
obj = unpickler.load()
# Construct a new object.
new = cls(obj.name, session_aware = False)
assert obj._get_version() <= new._get_version()
# Now copy over the useful parts of the dump.
lst = ['_env_type', '_modified_since_last_saved',
'_typename', 'name']
_internal_utils.copy_attributes(new, obj, lst)
return new
def is_running(self):
'''
Returns whether or not the environment actually starts
'''
return True
def clone(self, name):
"""
Create a safe clone of this object.
Returns
-------
out : Environment
Cloned environment object.
Notes
-----
- Cloned environment cannot be saved unless it is renamed.
"""
new = Local.__new__(Local)
new.name = name
new._modified_since_last_saved = self._modified_since_last_saved
new._env_type = self._env_type
new._session = self._session
return new | PypiClean |
/MAVR-0.93.tar.gz/MAVR-0.93/scripts/phylogenetics/collapse_by_support.py | __author__ = 'Sergei F. Kliver'
import argparse
from ete2 import Tree
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_tree_file", action="store", dest="input_tree_file",
help="File with input trees to collapse")
parser.add_argument("-o", "--output_tree_file", action="store", dest="output_tree_file",
help="File with otput collapsed trees")
parser.add_argument("-f", "--input_tree_format", action="store", dest="input_tree_format", type=int, default=0,
help="""Format of input trees. Allowed formats:
0 flexible with support values (default)
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only""")
parser.add_argument("-s", "--support_threshold", action="store", dest="minimum_support", default=50,
help="Minimun support value for nodes to retain")
parser.add_argument("-y", "--support_type", action="store", dest="support_type", default="int",
help="Type of support values - 'int' or 'float'. Default - 'int'")
args = parser.parse_args()
threshold_value = int(args.minimum_support) if args.support_type == "int" else float(args.minimum_support) \
if args.support_type == "float" else None
print("Nodes with support less than %s will be collapsed" % str(threshold_value))
if threshold_value is None:
raise ValueError("Wrong support type is set")
tree_index = 1
with open(args.input_tree_file, "r") as in_fd:
with open(args.output_tree_file, "w") as out_fd:
for line in in_fd:
tree_line = line.strip()
tree = Tree(tree_line, format=args.input_tree_format, support=100 if args.support_type == "int" else 1.0)
print("Totaly %i leaves in tree %i" % (len(tree), tree_index))
#print(tree.write())
for node in tree.traverse():
if node.is_root() or node.is_leaf():
#print(node.support)
#print(tree.write())
continue
if node.support < threshold_value:
#print node
node.delete()
#print(tree.write())
out_fd.write(tree.write(format=args.input_tree_format))
out_fd.write("\n")
tree_index += 1 | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/core/mod.py | from .function import Function
from .numbers import nan
class Mod(Function):
"""Represents a modulo operation on symbolic expressions.
Receives two arguments, dividend p and divisor q.
The convention used is the same as Python's: the remainder always has the
same sign as the divisor.
Examples
========
>>> x**2 % y
x**2%y
>>> _.subs({x: 5, y: 6})
1
"""
@classmethod
def eval(cls, p, q):
from ..polys.polytools import gcd
from .add import Add
from .exprtools import gcd_terms
from .mul import Mul
from .numbers import Integer
def doit(p, q):
"""Try to return p % q if both are numbers or +/-p is known
to be less than or equal q.
"""
if p.is_infinite or q.is_infinite:
return nan
if (p == q or p == -q or
p.is_Pow and p.exp.is_Integer and p.base == q or
p.is_integer and q == 1):
return Integer(0)
if q.is_Number:
if p.is_Number:
return p % q
if q == 2:
if p.is_even:
return Integer(0)
elif p.is_odd:
return Integer(1)
# by ratio
r = p/q
try:
d = int(r)
except TypeError:
pass
else:
rv = p - d*q
if (rv*q).is_nonnegative:
return rv
elif (rv*q).is_nonpositive:
return rv + q
# by difference
d = p - q
if d.is_negative:
if q.is_negative:
return d
elif q.is_positive:
return p
rv = doit(p, q)
if rv is not None:
return rv
# denest
if isinstance(p, cls):
# easy
qinner = p.args[1]
if qinner == q:
return p
# XXX other possibilities?
# extract gcd; any further simplification should be done by the user
G = gcd(p, q)
if G != 1:
p, q = [
gcd_terms(i/G, clear=False, fraction=False) for i in (p, q)]
pwas, qwas = p, q
# simplify terms
# (x + y + 2) % x -> Mod(y + 2, x)
if p.is_Add:
args = []
for i in p.args:
a = cls(i, q)
if a.count(cls) > i.count(cls):
args.append(i)
else:
args.append(a)
if args != list(p.args):
p = Add(*args)
else:
# handle coefficients if they are not Rational
# since those are not handled by factor_terms
# e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y)
cp, p = p.as_coeff_Mul()
cq, q = q.as_coeff_Mul()
ok = False
if not cp.is_Rational or not cq.is_Rational:
r = cp % cq
if r == 0:
G *= cq
p *= int(cp/cq)
ok = True
if not ok:
p = cp*p
q = cq*q
# simple -1 extraction
if p.could_extract_minus_sign() and q.could_extract_minus_sign():
G, p, q = [-i for i in (G, p, q)]
# check again to see if p and q can now be handled as numbers
rv = doit(p, q)
if rv is not None:
return rv*G
# put 1.0 from G on inside
if G.is_Float and G == 1:
p *= G
return cls(p, q, evaluate=False)
elif G.is_Mul and G.args[0].is_Float and G.args[0] == 1:
p = G.args[0]*p
G = Mul._from_args(G.args[1:])
return G*cls(p, q, evaluate=(p, q) != (pwas, qwas))
def _eval_is_integer(self):
p, q = self.args
if p.is_integer and q.is_integer and q.is_nonzero:
return True
def _eval_is_nonnegative(self):
p, q = self.args
if p.is_real and q.is_real and q.is_positive:
return True
def _eval_is_nonpositive(self):
p, q = self.args
if p.is_real and q.is_real and q.is_negative:
return True | PypiClean |
/BigchainDB-2.2.2.tar.gz/BigchainDB-2.2.2/bigchaindb/backend/connection.py |
import logging
from importlib import import_module
from itertools import repeat
import bigchaindb
from bigchaindb.backend.exceptions import ConnectionError
from bigchaindb.backend.utils import get_bigchaindb_config_value, get_bigchaindb_config_value_or_key_error
from bigchaindb.common.exceptions import ConfigurationError
BACKENDS = {
'localmongodb': 'bigchaindb.backend.localmongodb.connection.LocalMongoDBConnection',
}
logger = logging.getLogger(__name__)
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None, keyfile_passphrase=None,
crlfile=None):
"""Create a new connection to the database backend.
All arguments default to the current configuration's values if not
given.
Args:
backend (str): the name of the backend to use.
host (str): the host to connect to.
port (int): the port to connect to.
name (str): the name of the database to use.
replicaset (str): the name of the replica set (only relevant for
MongoDB connections).
Returns:
An instance of :class:`~bigchaindb.backend.connection.Connection`
based on the given (or defaulted) :attr:`backend`.
Raises:
:exc:`~ConnectionError`: If the connection to the database fails.
:exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`
is not supported or could not be loaded.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
"""
backend = backend or get_bigchaindb_config_value_or_key_error('backend')
host = host or get_bigchaindb_config_value_or_key_error('host')
port = port or get_bigchaindb_config_value_or_key_error('port')
dbname = name or get_bigchaindb_config_value_or_key_error('name')
# Not sure how to handle this here. This setting is only relevant for
# mongodb.
# I added **kwargs for both RethinkDBConnection and MongoDBConnection
# to handle these these additional args. In case of RethinkDBConnection
# it just does not do anything with it.
#
# UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
# The problem described above might be reconsidered next time we introduce a backend,
# if it ever happens.
replicaset = replicaset or get_bigchaindb_config_value('replicaset')
ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
login = login or get_bigchaindb_config_value('login')
password = password or get_bigchaindb_config_value('password')
ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
certfile = certfile or get_bigchaindb_config_value('certfile')
keyfile = keyfile or get_bigchaindb_config_value('keyfile')
keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase', None)
crlfile = crlfile or get_bigchaindb_config_value('crlfile')
try:
module_name, _, class_name = BACKENDS[backend].rpartition('.')
Class = getattr(import_module(module_name), class_name)
except KeyError:
raise ConfigurationError('Backend `{}` is not supported. '
'BigchainDB currently supports {}'.format(backend, BACKENDS.keys()))
except (ImportError, AttributeError) as exc:
raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
logger.debug('Connection: {}'.format(Class))
return Class(host=host, port=port, dbname=dbname,
max_tries=max_tries, connection_timeout=connection_timeout,
replicaset=replicaset, ssl=ssl, login=login, password=password,
ca_cert=ca_cert, certfile=certfile, keyfile=keyfile,
keyfile_passphrase=keyfile_passphrase, crlfile=crlfile)
class Connection:
"""Connection class interface.
All backend implementations should provide a connection class that inherits
from and implements this class.
"""
def __init__(self, host=None, port=None, dbname=None,
connection_timeout=None, max_tries=None,
**kwargs):
"""Create a new :class:`~.Connection` instance.
Args:
host (str): the host to connect to.
port (int): the port to connect to.
dbname (str): the name of the database to use.
connection_timeout (int, optional): the milliseconds to wait
until timing out the database connection attempt.
Defaults to 5000ms.
max_tries (int, optional): how many tries before giving up,
if 0 then try forever. Defaults to 3.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
dbconf = bigchaindb.config['database']
self.host = host or dbconf['host']
self.port = port or dbconf['port']
self.dbname = dbname or dbconf['name']
self.connection_timeout = connection_timeout if connection_timeout is not None \
else dbconf['connection_timeout']
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
self._conn = None
@property
def conn(self):
if self._conn is None:
self.connect()
return self._conn
def run(self, query):
"""Run a query.
Args:
query: the query to run
Raises:
:exc:`~DuplicateKeyError`: If the query fails because of a
duplicate key constraint.
:exc:`~OperationFailure`: If the query fails for any other
reason.
:exc:`~ConnectionError`: If the connection to the database
fails.
"""
raise NotImplementedError()
def connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
"""
attempt = 0
for i in self.max_tries_counter:
attempt += 1
try:
self._conn = self._connect()
except ConnectionError as exc:
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
attempt, self.max_tries if self.max_tries != 0 else '∞',
self.host, self.port, self.connection_timeout)
if attempt == self.max_tries:
logger.critical('Cannot connect to the Database. Giving up.')
raise ConnectionError() from exc
else:
break | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/framework/middleware/functional/collector.py | from typing import TYPE_CHECKING, Callable, List, Tuple, Any
from functools import reduce
import treetensor.torch as ttorch
from ding.envs import BaseEnvManager
from ding.policy import Policy
from ding.torch_utils import to_ndarray, get_shape0
if TYPE_CHECKING:
from ding.framework import OnlineRLContext
class TransitionList:
def __init__(self, env_num: int) -> None:
self.env_num = env_num
self._transitions = [[] for _ in range(env_num)]
self._done_idx = [[] for _ in range(env_num)]
def append(self, env_id: int, transition: Any) -> None:
self._transitions[env_id].append(transition)
if transition.done:
self._done_idx[env_id].append(len(self._transitions[env_id]))
def to_trajectories(self) -> Tuple[List[Any], List[int]]:
trajectories = sum(self._transitions, [])
lengths = [len(t) for t in self._transitions]
trajectory_end_idx = [reduce(lambda x, y: x + y, lengths[:i + 1]) for i in range(len(lengths))]
trajectory_end_idx = [t - 1 for t in trajectory_end_idx]
return trajectories, trajectory_end_idx
def to_episodes(self) -> List[List[Any]]:
episodes = []
for env_id in range(self.env_num):
last_idx = 0
for done_idx in self._done_idx[env_id]:
episodes.append(self._transitions[env_id][last_idx:done_idx])
last_idx = done_idx
return episodes
def clear(self):
for item in self._transitions:
item.clear()
for item in self._done_idx:
item.clear()
def inferencer(seed: int, policy: Policy, env: BaseEnvManager) -> Callable:
"""
Overview:
The middleware that executes the inference process.
Arguments:
- seed (:obj:`int`): Random seed.
- policy (:obj:`Policy`): The policy to be inferred.
- env (:obj:`BaseEnvManager`): The env where the inference process is performed. \
The env.ready_obs (:obj:`tnp.array`) will be used as model input.
"""
env.seed(seed)
def _inference(ctx: "OnlineRLContext"):
"""
Output of ctx:
- obs (:obj:`Union[torch.Tensor, Dict[torch.Tensor]]`): The input observations collected \
from all collector environments.
- action: (:obj:`List[np.ndarray]`): The inferred actions listed by env_id.
- inference_output (:obj:`Dict[int, Dict]`): The dict of which the key is env_id (int), \
and the value is inference result (Dict).
"""
if env.closed:
env.launch()
obs = ttorch.as_tensor(env.ready_obs)
ctx.obs = obs
obs = obs.to(dtype=ttorch.float32)
# TODO mask necessary rollout
obs = {i: obs[i] for i in range(get_shape0(obs))} # TBD
inference_output = policy.forward(obs, **ctx.collect_kwargs)
ctx.action = [to_ndarray(v['action']) for v in inference_output.values()] # TBD
ctx.inference_output = inference_output
return _inference
def rolloutor(policy: Policy, env: BaseEnvManager, transitions: TransitionList) -> Callable:
"""
Overview:
The middleware that executes the transition process in the env.
Arguments:
- policy (:obj:`Policy`): The policy to be used during transition.
- env (:obj:`BaseEnvManager`): The env for the collection, the BaseEnvManager object or \
its derivatives are supported.
- transitions (:obj:`TransitionList`): The transition information which will be filled \
in this process, including `obs`, `next_obs`, `action`, `logit`, `value`, `reward` \
and `done`.
"""
env_episode_id = [_ for _ in range(env.env_num)]
current_id = env.env_num
def _rollout(ctx: "OnlineRLContext"):
"""
Input of ctx:
- action: (:obj:`List[np.ndarray]`): The inferred actions from previous inference process.
- obs (:obj:`Dict[Tensor]`): The states fed into the transition dict.
- inference_output (:obj:`Dict[int, Dict]`): The inference results to be fed into the \
transition dict.
- train_iter (:obj:`int`): The train iteration count to be fed into the transition dict.
- env_step (:obj:`int`): The count of env step, which will increase by 1 for a single \
transition call.
- env_episode (:obj:`int`): The count of env episode, which will increase by 1 if the \
trajectory stops.
"""
nonlocal current_id
timesteps = env.step(ctx.action)
ctx.env_step += len(timesteps)
timesteps = [t.tensor() for t in timesteps]
# TODO abnormal env step
for i, timestep in enumerate(timesteps):
transition = policy.process_transition(ctx.obs[i], ctx.inference_output[i], timestep)
transition = ttorch.as_tensor(transition) # TBD
transition.collect_train_iter = ttorch.as_tensor([ctx.train_iter])
transition.env_data_id = ttorch.as_tensor([env_episode_id[timestep.env_id]])
transitions.append(timestep.env_id, transition)
if timestep.done:
policy.reset([timestep.env_id])
env_episode_id[timestep.env_id] = current_id
current_id += 1
ctx.env_episode += 1
# TODO log
return _rollout | PypiClean |
/GenMotion-0.0.4-py3-none-any.whl/genmotion/algorithm/action2motion/models/motion_vae.py | import torch
import torch.nn as nn
class GaussianGRU(nn.Module):
def __init__(self, input_size, output_size, hidden_size, n_layers, batch_size, device):
super(GaussianGRU, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.batch_size = batch_size
self.device = device
self.embed = nn.Linear(input_size, hidden_size)
self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)])
self.mu_net = nn.Linear(hidden_size, output_size)
self.logvar_net = nn.Linear(hidden_size, output_size)
self.hidden = self.init_hidden()
def init_hidden(self, num_samples=None):
batch_size = num_samples if num_samples is not None else self.batch_size
hidden = []
for i in range(self.n_layers):
hidden.append(torch.zeros(batch_size, self.hidden_size).requires_grad_(False).to(self.device))
self.hidden = hidden
return hidden
def reparameterize(self, mu, logvar):
s_var = logvar.mul(0.5).exp_()
eps = s_var.data.new(s_var.size()).normal_()
return eps.mul(s_var).add_(mu)
def forward(self, inputs):
embedded = self.embed(inputs.view(-1, self.input_size))
h_in = embedded
for i in range(self.n_layers):
self.hidden[i] = self.gru[i](h_in, self.hidden[i])
h_in = self.hidden[i]
mu = self.mu_net(h_in)
logvar = self.logvar_net(h_in)
z = self.reparameterize(mu, logvar)
return z, mu, logvar, h_in
class DecoderGRU(nn.Module):
def __init__(self, input_size, output_size, hidden_size, n_layers, batch_size, device):
super(DecoderGRU, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.batch_size = batch_size
self.n_layers = n_layers
self.device = device
self.embed = nn.Linear(input_size, hidden_size)
self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)])
self.output = nn.Linear(hidden_size, output_size)
self.hidden = self.init_hidden()
def init_hidden(self, num_samples=None):
batch_size = num_samples if num_samples is not None else self.batch_size
hidden = []
for i in range(self.n_layers):
hidden.append(torch.zeros(batch_size, self.hidden_size).requires_grad_(False).to(self.device))
self.hidden = hidden
return hidden
def forward(self, inputs):
embedded = self.embed(inputs.view(-1, self.input_size))
h_in = embedded
for i in range(self.n_layers):
self.hidden[i] = self.gru[i](h_in, self.hidden[i])
h_in = self.hidden[i]
return self.output(h_in), h_in
# generator with Lie algbra parameters, root joint has no rotations
class DecoderGRULie(DecoderGRU):
def __init__(self, input_size, output_size, hidden_size, n_layers, batch_size, device):
super(DecoderGRULie, self).__init__(input_size,
output_size,
hidden_size,
n_layers,
batch_size,
device)
self.output_lie = nn.Linear(output_size - 3, output_size - 3)
self.PI = 3.1415926
def forward(self, inputs):
hidden_output, h_mid = super(DecoderGRULie, self).forward(inputs)
root_trans = hidden_output[..., :3]
lie_hid = hidden_output[..., 3:]
lie_hid = torch.tanh(lie_hid)
lie_out = self.output_lie(lie_hid)
lie_out = torch.tanh(lie_out) * self.PI
output = torch.cat((root_trans, lie_out), dim=-1)
return output, h_mid | PypiClean |
/Imap-CLI-0.7.tar.gz/Imap-CLI-0.7/imap_cli/scripts/imap_shell.py | import argparse
import cmd
import datetime
import logging
import os
import sys
import tempfile
import threading
import time
import webbrowser
import docopt
import imap_cli
from imap_cli import config
from imap_cli import const
from imap_cli import copy
from imap_cli import fetch
from imap_cli import flag
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
keep_alive_bool = True
keep_alive_timer = 30
log = logging.getLogger(app_name)
class ImapShell(cmd.Cmd):
completekey = 'Tab'
intro = u''.join([
'IMAP interactive Command Line Interpreter. ',
'Type help or ? to list commands.\n'])
prompt = '(imap-cli "INBOX") '
stdout = sys.stdout
cmdqueue = []
delete_conf = None
def __init__(self, imap_account):
self.imap_account = imap_account
def do_cd(self, arg):
'''Change selected IMAP folder.'''
try:
args = docopt.docopt('Usage: cd <directory>', arg)
except SystemExit:
return
cd_result = imap_cli.change_dir(self.imap_account,
directory=args['<directory>'])
if cd_result == -1:
sys.stdout.write('IMAP Folder can\'t be found\n')
else:
self.prompt = '(imap-cli "{}") '.format(args['<directory>'])
def do_cp(self, arg):
'''Copy mail from one mailbox to another.'''
try:
args = docopt.docopt('Usage: cp <dest> <mail_id>...', arg)
except SystemExit:
return
copy.copy(self.imap_account, args['<mail_id>'], args['<dest>'])
def do_flag(self, arg):
'''Set or Unset flag on mails.'''
try:
args = docopt.docopt('\n'.join([
'Usage: flag [options] <mail_id> <flag>',
'',
'Options:',
' -u, --unset Remove flag instead of setting them',
' -h, --help Show help options',
]), argv=arg)
except SystemExit:
return
flag.flag(self.imap_account, [args['<mail_id>']], args['<flag>'],
unset=args['--unset'])
def do_list(self, arg):
'''List mail in specified folder.'''
try:
args = docopt.docopt('\n'.join([
'Usage: list [options] [<directory>]',
'',
'Options:',
' -l, --limit=<LIMIT> Limit number of mail displayed',
' -h, --help Show this message',
]), argv=arg)
except SystemExit:
return
try:
limit = int(args['--limit'] or 10)
except ValueError:
limit = 10
for mail_info in search.fetch_mails_info(self.imap_account,
limit=limit):
sys.stdout.write(
u'UID : {:<10} From : {:<40.40} Subject : {:.50}\n'.format(
mail_info['uid'],
mail_info['from'],
mail_info['subject']))
def do_mv(self, arg):
'''Move mail from one mailbox to another.'''
try:
args = docopt.docopt('Usage: cp <dest> <mail_id>...', arg)
except SystemExit:
return
copy.copy(self.imap_account, args['<mail_id>'], args['<dest>'])
flag.flag(self.imap_account, args['<mail_id>'], [const.FLAG_DELETED])
self.imap_account.expunge()
def do_quit(self, arg):
'Exit this shell'
global keep_alive_bool
keep_alive_bool = False
imap_cli.disconnect(self.imap_account)
sys.stdout.write('Bye\n')
return True
def do_rm(self, arg):
'''Remove mail from one mailbox.'''
try:
args = docopt.docopt('Usage: rm <mail_id>...', arg)
except SystemExit:
return
if self.delete_conf['delete_method'] == 'MOVE_TO_TRASH':
copy.copy(self.imap_account, args['<mail_id>'],
self.delete_conf['trash_directory'])
flag.flag(self.imap_account, args['<mail_id>'], [const.FLAG_DELETED])
if self.delete_conf['delete_method'] in ['MOVE_TO_TRASH', 'EXPUNGE']:
self.imap_account.expunge()
def do_read(self, arg):
'''Read mail by uid.'''
try:
args = docopt.docopt(u'\n'.join([
u'Usage: read [options] <mail_uid> [<save_directory>]',
u'',
u'Options:',
u' -b, --browser Open mail in browser',
]), arg)
except SystemExit:
return
fetched_mail = fetch.read(self.imap_account, args['<mail_uid>'],
save_directory=args['<save_directory>'])
if fetched_mail is None:
log.error("Mail was not fetched, an error occured")
if args['--browser'] is True:
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(fetch.display(fetched_mail,
browser=True).encode('utf-8'))
webbrowser.open_new_tab(temp_file.name)
temp_file.close()
else:
sys.stdout.write(fetch.display(fetched_mail))
def do_search(self, arg):
'''Search mail.'''
usage = '\n'.join([
'Usage: search [options]',
'',
'Options:',
' -a, --address=<address> Search by address',
' -d, --date=<date> Search by date (YYYY-MM-DD)',
' -s, --size=<SIZE> Search by size (in bytes)',
' -S, --subject=<subject> Search by subject',
' -t, --tags=<tags> Searched tags (Comma separated)',
' -T, --full-text=<text> Searched tags (Comma separated)',
' -h, --help Show help options.',
])
try:
args = docopt.docopt(usage, argv=arg)
except SystemExit:
return
if args.get('--tags') is not None:
args['--tags'] = args['--tags'].split(',')
if args['--date'] is not None:
try:
date = datetime.datetime.strptime(args['--date'], '%Y-%m-%d')
except ValueError:
date = None
else:
date = None
search_criterion = search.create_search_criterion(
address=args['--address'],
date=date,
subject=args['--subject'],
size=args['--size'],
tags=args['--tags'],
text=args['--full-text'],
)
mail_set = search.fetch_uids(self.imap_account,
search_criterion=search_criterion)
if len(mail_set) == 0:
log.error('No mail found')
return 0
for mail_info in search.fetch_mails_info(self.imap_account,
mail_set=mail_set):
sys.stdout.write(
u'UID : {:<10} From : {:<40.40} Subject : {:.50}\n'.format(
mail_info['uid'],
mail_info['from'],
mail_info['subject']))
def do_status(self, arg):
'Print status of all IMAP folder in this account'
directory_statuses = sorted(imap_cli.status(self.imap_account),
key=lambda obj: obj['directory'])
for directory_status in directory_statuses:
sys.stdout.write(
u'{:<30} : Unseen {:<6} Recent {:<6} Total {:<6}\n'.format(
directory_status['directory'],
directory_status['unseen'],
directory_status['recent'],
directory_status['count']))
def do_unseen(self, arg):
'''List Unseen mail (equivalent to "search -t unseen").'''
search_criterion = search.create_search_criterion(tags=['unseen'])
mail_set = search.fetch_uids(self.imap_account,
search_criterion=search_criterion)
if len(mail_set) == 0:
log.error('No unseen mail found')
else:
for mail_info in search.fetch_mails_info(self.imap_account,
mail_set=mail_set):
sys.stdout.write(
u'UID : {:<10} From : {:<40.40} Subject : {:.50}\n'.format(
mail_info['uid'],
mail_info['from'],
mail_info['subject']))
def emptyline(self):
pass
def keep_alive(imap_account):
time_count = 0
while keep_alive_bool is True:
time_count += 1
if time_count % keep_alive_timer == 0:
log.debug('NOOP send')
imap_account.noop()
time.sleep(1)
log.debug('Keep alive thread terminated')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true',
help='increase output verbosity')
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.WARNING,
stream=sys.stdout,
)
connection_config = config.new_context_from_file(section='imap')
if connection_config is None:
return 1
delete_config = config.new_context_from_file(section='trash')
imap_account = imap_cli.connect(**connection_config)
imap_shell = ImapShell(imap_account)
imap_shell.delete_conf = delete_config
keep_alive_thread = threading.Thread(target=keep_alive,
args=(imap_account,))
keep_alive_thread.start()
imap_shell.cmdloop()
keep_alive_thread.join()
return 0
if __name__ == "__main__":
sys.exit(main()) | PypiClean |
/L_SpaceCurves-0.0.8.tar.gz/L_SpaceCurves-0.0.8/L_SpaceCurves/L_SpaceCurves.py | import turtle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class L_SpaceCurves():
import turtle
def __init__(self, leng=None, letter='S', iter=1, speed=0, tipe=None, origin=(0, 0)):
self.len = leng
self.speed = speed
self.tipe = tipe
self.origin = origin
self.size = leng
self.x, self.y, self.z = origin[0], origin[1], 0
self.letter = letter
self.iter = iter
def sierspinski_curve(self):
# Defining letters
def S(iter=1):
if iter == 1:
t.left(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.left(45)
else:
G1(iter - 1)
def R(iter=1):
if iter == 1:
t.left(135)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(45)
else:
G2(iter - 1)
def Z(iter=1):
if iter == 1:
t.right(135)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(135)
else:
G4(iter - 1)
def P(iter):
if iter == 1:
t.right(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.right(45)
t.forward(self.len)
t.left(135)
else:
G3(iter - 1)
# defining constants
def c():
# walk up
t.left(90)
t.forward(self.len)
t.right(90)
def e():
# walk left
t.right(180)
t.forward(self.len)
t.right(180)
def d():
# walk right
t.forward(self.len)
def b():
# walk down
t.right(90)
t.forward(self.len)
t.left(90)
def Ddc():
# walk diagonal right up
t.left(45)
t.forward(self.len)
t.right(45)
def Ddb():
# walk diagonal right down
t.right(45)
t.forward(self.len)
t.left(45)
def Dec():
# walk diaginal left up
t.left(135)
t.forward(self.len)
t.right(135)
def Deb():
# walk diaginal left down
t.right(135)
t.forward(self.len)
t.left(135)
# grammar
def G1(iter, var="S"):
# S <-
S(iter)
Ddc()
R(iter)
d()
P(iter)
Ddb()
S(iter)
def G2(iter, var="R"):
# R <-
R(iter)
Dec()
Z(iter)
c()
S(iter)
Ddc()
R(iter)
def G3(iter, var="P"):
P(iter)
Ddb()
S(iter)
b()
Z(iter)
Deb()
P(iter)
def G4(iter, var="Z"):
Z(iter)
Deb()
P(iter)
e()
R(iter)
Dec()
Z(iter)
self.tipe = "2D" # save the dimension of curve
t = turtle.Turtle()
# move the start point
t.up()
t.setpos(self.origin)
t.down()
# change the turtle speed
t.speed(self.speed)
print(self.letter)
if self.letter == "S":
print('oi')
S(self.iter)
elif self.letter == "R":
R(self.iter)
elif self.letter == "Z":
Z(self.iter)
elif self.letter == "P":
P(self.iter)
else:
raise "Use the letters definided at Github"
# to exit the plot need to click at screen
turtle.exitonclick()
def hilbert_curve(self):
import turtle
t = turtle.Turtle()
# set the origin point
t.up()
t.setpos(self.origin)
t.down()
#set the speed of turtle
t.speed(self.speed)
# Definig letters
def H(iter):
if iter == 1:
t.left(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.left(90)
else:
G1(iter - 1)
def A(iter):
if iter == 1:
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.right(180)
else:
G2(iter - 1)
def B(iter):
if iter == 1:
t.left(180)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
else:
G3(iter - 1)
def C(iter):
if iter == 1:
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
else:
G4(iter - 1)
# Definig constants
def d():
"Walk right"
t.forward(self.len)
def e():
"Walk left"
t.right(180)
t.forward(self.len)
t.left(180)
def c():
"walk up"
t.left(90)
t.forward(self.len)
t.right(90)
def b():
"walk down"
t.right(90)
t.forward(self.len)
t.left(90)
# Grammar
def G1(iter, var="H"):
A(iter)
c()
H(iter)
d()
H(iter)
b()
B(iter)
def G2(iter, var="A"):
H(iter)
d()
A(iter)
c()
A(iter)
e()
C(iter)
def G3(iter, var="B"):
C(iter)
e()
B(iter)
b()
B(iter)
d()
H(iter)
def G4(iter, var='C'):
B(iter)
b()
C(iter)
e()
C(iter)
c()
A(iter)
if self.letter == "S" or self.letter == "H":
H(self.iter)
elif self.letter == "A":
A(self.iter)
elif self.letter == "B":
B(self.iter)
elif self.iter == "C":
C(self.iter)
else:
raise "Use the letters definided at Github"
turtle.exitonclick()
def peano_curve(self):
import turtle
t = turtle.Turtle()
t.speed(self.speed)
# set the start point
t.up()
t.setpos(self.origin)
t.down()
# Definig letters
def P(iter):
if iter == 1:
t.left(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.right(90)
else:
G1(iter - 1)
def Q(iter):
if iter == 1:
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
else:
G2(iter - 1)
def R(iter):
if iter == 1:
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
else:
G3(iter - 1)
def S(iter):
if iter == 1:
t.right(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.left(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.right(90)
t.forward(self.len)
t.left(90)
else:
G4(iter - 1)
# defining constants
def c():
"Walk up"
t.left(90)
t.forward(self.len)
t.right(90)
def b():
"Walk down"
t.right(90)
t.forward(self.len)
t.left(90)
def d():
"Walk right"
t.forward(self.len)
def e():
"Walk left"
t.right(180)
t.forward(self.len)
t.right(180)
# Grammar
def G1(iter, var="P"):
P(iter)
c()
Q(iter)
c()
P(iter)
d()
S(iter)
b()
R(iter)
b()
S(iter)
d()
P(iter)
c()
Q(iter)
c()
P(iter)
def G2(iter, var="Q"):
Q(iter)
c()
P(iter)
c()
Q(iter)
e()
R(iter)
b()
S(iter)
b()
R(iter)
e()
Q(iter)
c()
P(iter)
c()
Q(iter)
def G3(iter, var="Q"):
R(iter)
b()
S(iter)
b()
R(iter)
e()
Q(iter)
c()
P(iter)
c()
Q(iter)
e()
R(iter)
b()
S(iter)
b()
R(iter)
def G4(iter, var="Q"):
S(iter)
b()
R(iter)
b()
S(iter)
d()
P(iter)
c()
Q(iter)
c()
P(iter)
d()
S(iter)
b()
R(iter)
b()
S(iter)
if self.letter == "S" or self.letter == "P":
P(self.iter)
elif self.letter == "Q":
Q(self.iter)
elif self.letter == "R":
R(self.iter)
elif self.letter == "S":
S(self.iter)
else:
raise "Use the letters definided at Github"
turtle.exitonclick()
def gosper_Flowsnake_curve(self):
import turtle
t = turtle.Turtle()
t.speed(self.speed)
t.up()
t.setpos(self.origin)
t.down()
# Definig letters
def G(iter):
if iter == 1:
t.forward(self.len)
t.left(120)
t.forward(self.len)
t.left(60)
t.forward(self.len)
t.right(150)
t.forward(self.len)
t.right(30)
t.forward(self.len)
t.right(60)
t.forward(self.len)
else:
G1(iter - 1)
def R(iter):
if iter == 1:
t.forward(self.len)
t.left(60)
t.forward(self.len)
t.left(60)
t.forward(self.len)
t.left(120)
t.forward(self.len)
t.right(60)
t.forward(self.len)
t.right(120)
t.forward(self.len)
else:
G2(iter - 1)
# definig constants
def c():
# walk forward
t.forward(self.len)
def l():
# turn left 60º
t.left(60)
def r():
# turn right 60º
t.right(60)
# Grammar
def G1(iter):
G(iter)
l()
c()
R(iter)
l()
c()
R(iter)
c()
r()
G(iter)
c()
r()
G(iter)
l()
c()
G(iter)
c()
r()
R(iter)
def G2(iter):
G(iter)
l()
c()
R(iter)
c()
r()
R(iter)
l()
c()
R(iter)
l()
c()
G(iter)
c()
r()
G(iter)
c()
r()
R(iter)
if self.letter == "G" or self.letter == "S":
G(self.iter)
elif self.iter == "R":
R(self.iter)
else:
raise "Use the letters definided at Github"
turtle.exitonclick()
def dragon(self, level=4, size=200, direction=45):
from turtle import right, left, forward, exitonclick
if not level:
forward(size)
else:
right(direction)
self.dragon(level - 1, size / 1.41421356237, 45)
left(direction * 2)
self.dragon(level - 1, size / 1.41421356237, -45)
right(direction)
exitonclick()
def pablo_curve(self):
import turtle
from turtle import exitonclick
t = turtle.Turtle()
t.speed(self.speed)
t.up()
t.setpos(self.origin)
t.down()
# definig letters
def A(iter):
if iter == 1:
t.right(90)
t.forward(self.len)
t.left(90)
else:
G1(iter - 1)
def B(iter):
if iter == 1:
t.forward(self.len)
else:
G2(iter - 1)
def C(iter):
if iter == 1:
t.right(180)
t.forward(self.len)
t.right(180)
else:
G3(iter - 1)
def D(iter):
if iter == 1:
t.left(90)
t.forward(self.len)
t.right(90)
else:
G4(iter - 1)
# defining grammar
def G1(iter):
B(iter)
A(iter)
C(iter)
D(iter)
A(iter)
C(iter)
A(iter)
B(iter)
D(iter)
A(iter)
B(iter)
A(iter)
C(iter)
D(iter)
A(iter)
def G2(iter):
D(iter)
B(iter)
A(iter)
C(iter)
B(iter)
A(iter)
B(iter)
D(iter)
C(iter)
B(iter)
B(iter)
D(iter)
C(iter)
A(iter)
B(iter)
def G3(iter):
D(iter)
C(iter)
A(iter)
B(iter)
C(iter)
A(iter)
C(iter)
D(iter)
B(iter)
C(iter)
D(iter)
C(iter)
A(iter)
B(iter)
C(iter)
def G4(iter):
C(iter)
D(iter)
B(iter)
A(iter)
D(iter)
B(iter)
D(iter)
C(iter)
A(iter)
D(iter)
C(iter)
D(iter)
B(iter)
A(iter)
D(iter)
if self.letter == "S" or self.letter == "A":
A(self.iter)
elif self.letter == "B":
B(self.iter)
elif self.letter == "C":
C(self.iter)
elif self.letter == "D":
D(self.iter)
else:
raise "Use the letters definided at Github"
exitonclick()
def hilbert_3D(self):
"""this class will build the hilbert curve in Blender
"""
import numpy as np
if len(self.origin)<3:
raise "Set a origin in a tuple with three components"
self.x, self.y, self.z = np.array([self.x]), np.array([self.y]), np.array([self.z])
self.len = len(self.x)
# defining letters
def A(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
else:
G1(iter - 1)
def B(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
else:
G2(iter - 1)
def C(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
else:
G3(iter - 1)
def D(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
else:
G4(iter - 1)
def E(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
else:
G5(iter)
def F(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
else:
G6(iter - 1)
def G(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.size + self.x[-1]), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
else:
G7(iter - 1)
def H(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
else:
G8(iter - 1)
def I(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z, self.z[-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
else:
G9(iter - 1)
def J(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
else:
G10(iter - 1)
def K(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
else:
G11(iter - 1)
def L(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z, self.z[-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] - self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z, self.z[-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z, self.z[-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z, self.z[-1])
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(self.z,
self.z[
-1] + self.size)
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z, self.z[-1])
else:
G12(iter - 1)
# Definig constants
def c():
"Walk up"
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(
self.z, self.z[-1] + self.size)
def b():
"Walk down"
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(
self.z, self.z[-1] - self.size)
def e():
"Walk left"
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[-1])
def d():
"Walk right"
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[-1])
def f():
"Walk forward"
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(
self.z,
self.z[
-1])
def back():
"Walk backward"
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(
self.z,
self.z[
-1])
# Defining grammar
def G1(iter):
A(iter)
back()
E(iter)
d()
E(iter)
f()
H(iter)
c()
H(iter)
back()
K(iter)
e()
K(iter)
f()
D(iter)
def G2(iter):
D(iter)
f()
F(iter)
d()
F(iter)
back()
G(iter)
b()
G(iter)
f()
L(iter)
e()
L(iter)
b()
C(iter)
def G3(iter):
E(iter)
d()
A(iter)
c()
A(iter)
e()
I(iter)
back()
I(iter)
d()
B(iter)
b()
B(iter)
e()
L(iter)
def G4(iter):
F(iter)
d()
B(iter)
b()
B(iter)
e()
J(iter)
f()
J(iter)
d()
A(iter)
c()
A(iter)
e()
K(iter)
def G5(iter):
A(iter)
c()
C(iter)
back()
C(iter)
b()
F(iter)
d()
F(iter)
c()
J(iter)
f()
J(iter)
b()
G(iter)
def G6(iter):
B(iter)
b()
D(iter)
f()
D(iter)
f()
E(iter)
d()
E(iter)
b()
I(iter)
back()
I(iter)
c()
H(iter)
def G7(iter):
I(iter)
back()
K(iter)
e()
K(iter)
f()
B(iter)
b()
B(iter)
back()
E(iter)
d()
E(iter)
f()
J(iter)
def G8(iter):
J(iter)
f()
L(iter)
e()
L(iter)
back()
A(iter)
c()
A(iter)
f()
F(iter)
d()
F(iter)
back()
I(iter)
def G9(iter):
K(iter)
e()
G(iter)
b()
G(iter)
d()
C(iter)
back()
C(iter)
e()
H(iter)
c()
H(iter)
d()
F(iter)
def G10(iter):
L(iter)
e()
H(iter)
c()
H(iter)
d()
D(iter)
f()
D(iter)
e()
G(iter)
b()
G(iter)
d()
E(iter)
def G11(iter):
G(iter)
b()
I(iter)
back()
I(iter)
c()
L(iter)
e()
L(iter)
b()
D(iter)
f()
D(iter)
c()
A(iter)
def G12(iter):
H(iter)
c()
J(iter)
f()
b()
K(iter)
e()
K(iter)
c()
C(iter)
back()
C(iter)
b()
B(iter)
if self.letter == "S" or self.letter == "A":
A(self.iter)
elif self.letter == "B":
B(self.iter)
elif self.letter == "C":
C(self.iter)
elif self.letter == "D":
D(self.iter)
elif self.letter == "E":
E(self.iter)
elif self.letter == "F":
F(self.iter)
elif self.letter == "G":
G(self.iter)
elif self.letter == 'H':
H(self.iter)
elif self.letter == "I":
I(self.iter)
elif self.letter == "J":
J(self.iter)
elif self.letter == "K":
K(self.iter)
elif self.letter == "L":
L(self.iter)
self.tipe = "3D"
def peano_3D(self):
"""this class will build the hilbert curve in Blender
"""
import numpy as np
self.x, self.y, self.z = np.array([self.x]), np.array([self.y]), np.array([self.z])
self.len = len(self.x)
# Definig letters
def P(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y, self.y[
-1] + self.size), np.append(self.z, self.z[-1] + self.size)
else:
G1(iter - 1)
def R( iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y, self.y[
-1] - self.size), np.append(self.z, self.z[-1] - self.size)
else:
G2(iter - 1)
def Q(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y, self.y[
-1] - self.size), np.append(self.z, self.z[-1] + self.size)
else:
G3(iter - 1)
def S(iter):
if iter == 1:
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y, self.y[
-1] + self.size), np.append(self.z, self.z[-1] - self.size)
else:
G4(iter - 1)
def py(iter, var="P"):
P(iter)
back()
R(iter)
back()
P(iter)
def ry(iter, var="R"):
R(iter)
f()
P(iter)
f()
R(iter)
def qy(iter, var="Q"):
Q(iter)
f()
S(iter)
f()
Q(iter)
def sy(iter, var="S"):
S(iter)
back()
Q(iter)
back()
S(iter)
def pyz(iter):
py(iter)
c()
qy(iter)
c()
py(iter)
def ryz(iter):
ry(iter)
b()
qy(iter)
b()
ry(iter)
def qyz(iter):
qy(iter)
c()
ry(iter)
c()
qy(iter)
def syz(iter):
sy(iter)
b()
py(iter)
b()
sy(iter)
# Defining constants
def c():
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(
self.z, self.z[-1] + self.size)
def b():
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y, self.y[-1]), np.append(
self.z, self.z[-1] - self.size)
def e():
self.x, self.y, self.z = np.append(self.x, self.x[-1] - self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[-1])
def d():
self.x, self.y, self.z = np.append(self.x, self.x[-1] + self.size), np.append(self.y,
self.y[-1]), np.append(
self.z,
self.z[-1])
def f():
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] + self.size), np.append(self.z,
self.z[
-1])
def back():
self.x, self.y, self.z = np.append(self.x, self.x[-1]), np.append(self.y,
self.y[-1] - self.size), np.append(self.z,
self.z[
-1])
# Gramatics
def G1(iter):
pyz(iter)
d()
ryz(iter)
d()
pyz(iter)
def G2(iter):
qyz(iter)
e()
syz(iter)
e()
qyz(iter)
def G3(iter):
ryz(iter)
d()
pyz(iter)
d()
ryz(iter)
def G4(iter):
syz(iter)
e()
qyz(iter)
e()
syz(iter)
if self.letter == "S" or self.letter == "P":
P(self.iter)
elif self.letter == "R":
R(self.letter)
elif self.letter == "Q":
Q(self.letter)
elif self.letter == "S":
S(self.letter)
self.tipe = "3D" # change the curve type to 3D
def plot(self):
if self.tipe != "3D":
raise "The function plot works only with 3D plots, to plot 2D don't need this function"
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection="3d")
ax.plot(self.x, self.y, self.z)
plt.show() | PypiClean |
/Devoir-0.1.1.tar.gz/Devoir-0.1.1/README.rst | Devoir — Quickly set up a working environment to edit a file
============================================================
|sources| |pypi| |documentation| |license|
When editing a LaTeX file, I want the file being edited with `vim
<http://www.vim.org>`_, the compiled file displayed using a pdf viewer, and
latex being run whenever something changes, using `latexmk
<http://users.phys.psu.edu/~collins/software/latexmk-jcc/>`_. But wait, there
is more.
- I often start a LaTeX document by copying an existing one, as a template.
- The pdf file may or may not exist when I start working: if I have already
been working on this file before, the pdf file exists; if not, it does not
exists, and my pdf viewer won't start on a non-existing file.
This program aims to automate all this process. I built it to process LaTeX
files, but it should work with other files too.
What's new?
-----------
See `changelog
<https://git.framasoft.org/spalax/devoir/blob/master/CHANGELOG>`_.
Download and install
--------------------
See the end of list for a (quick and dirty) Debian package.
* From sources:
* Download: https://pypi.python.org/pypi/devoir
* Install (in a `virtualenv`, if you do not want to mess with your distribution installation system)::
python3 setup.py install
* From pip::
pip install devoir
* Quick and dirty Debian (and Ubuntu?) package
This requires `stdeb <https://github.com/astraw/stdeb>`_ to be installed::
python3 setup.py --command-packages=stdeb.command bdist_deb
sudo dpkg -i deb_dist/devoir-<VERSION>_all.deb
Documentation
-------------
* The compiled documentation is available on `readthedocs
<http://devoir.readthedocs.org>`_
* To compile it from source, download and run::
cd doc && make html
.. |documentation| image:: http://readthedocs.org/projects/devoir/badge
:target: http://devoir.readthedocs.org
.. |pypi| image:: https://img.shields.io/pypi/v/devoir.svg
:target: http://pypi.python.org/pypi/devoir
.. |license| image:: https://img.shields.io/pypi/l/devoir.svg
:target: http://www.gnu.org/licenses/gpl-3.0.html
.. |sources| image:: https://img.shields.io/badge/sources-devoir-brightgreen.svg
:target: http://git.framasoft.org/spalax/devoir
| PypiClean |
/FIRSTBEATLU-0.13.1.tar.gz/FIRSTBEATLU-0.13.1/econml/utilities.py | import numpy as np
import pandas as pd
import scipy.sparse
import sparse as sp
import itertools
import inspect
from operator import getitem
from collections import defaultdict, Counter
from sklearn import clone
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso
from functools import reduce, wraps
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import assert_all_finite
import warnings
from warnings import warn
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from collections.abc import Iterable
from sklearn.utils.multiclass import type_of_target
import numbers
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import summary_return
from statsmodels.compat.python import lmap
import copy
from inspect import signature
MAX_RAND_SEED = np.iinfo(np.int32).max
class IdentityFeatures(TransformerMixin):
"""Featurizer that just returns the input data."""
def fit(self, X):
"""Fit method (does nothing, just returns self)."""
return self
def transform(self, X):
"""Perform the identity transform, which returns the input unmodified."""
return X
def parse_final_model_params(coef, intercept, d_y, d_t, d_t_in, bias_part_of_coef, fit_cate_intercept):
dt = d_t
if (d_t_in != d_t) and (d_t[0] == 1): # binary treatment
dt = ()
cate_intercept = None
if bias_part_of_coef:
cate_coef = coef.reshape(d_y + dt + (-1,))[..., 1:]
if fit_cate_intercept:
cate_intercept = coef.reshape(d_y + dt + (-1,))[..., 0]
else:
cate_coef = coef.reshape(d_y + dt + (-1,))
if fit_cate_intercept:
cate_intercept = np.reshape(intercept, d_y + dt)
if (cate_intercept is not None) and (np.ndim(cate_intercept) == 0):
cate_intercept = cate_intercept.item()
return cate_coef, cate_intercept
def check_high_dimensional(X, T, *, threshold, featurizer=None, discrete_treatment=False, msg=""):
# Check if model is sparse enough for this model
X, T = check_input_arrays(X, T)
if X is None:
d_x = 1
elif featurizer is None:
d_x = X.shape[1]
else:
d_x = clone(featurizer, safe=False).fit_transform(X[[0], :]).shape[1]
if discrete_treatment:
d_t = len(set(T.flatten())) - 1
else:
d_t = 1 if np.ndim(T) < 2 else T.shape[1]
if d_x * d_t < threshold:
warn(msg, UserWarning)
def inverse_onehot(T):
"""
Given a one-hot encoding of a value, return a vector reversing the encoding to get numeric treatment indices.
Note that we assume that the first column has been removed from the input.
Parameters
----------
T : array (shape (n, d_t-1))
The one-hot-encoded array
Returns
-------
A : vector of int (shape (n,))
The un-encoded 0-based category indices
"""
assert ndim(T) == 2
# note that by default OneHotEncoder returns float64s, so need to convert to int
return (T @ np.arange(1, T.shape[1] + 1)).astype(int)
def issparse(X):
"""Determine whether an input is sparse.
For the purposes of this function, both `scipy.sparse` matrices and `sparse.SparseArray`
types are considered sparse.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is sparse
"""
return scipy.sparse.issparse(X) or isinstance(X, sp.SparseArray)
def iscoo(X):
"""Determine whether an input is a `sparse.COO` array.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is a `COO` array
"""
return isinstance(X, sp.COO)
def tocoo(X):
"""
Convert an array to a sparse COO array.
If the input is already an `sparse.COO` object, this returns the object directly; otherwise it is converted.
"""
if isinstance(X, sp.COO):
return X
elif isinstance(X, sp.DOK):
return sp.COO(X)
elif scipy.sparse.issparse(X):
return sp.COO.from_scipy_sparse(X)
else:
return sp.COO.from_numpy(X)
def todense(X):
"""
Convert an array to a dense numpy array.
If the input is already a numpy array, this may create a new copy.
"""
if scipy.sparse.issparse(X):
return X.toarray()
elif isinstance(X, sp.SparseArray):
return X.todense()
else:
# TODO: any way to avoid creating a copy if the array was already dense?
# the call is necessary if the input was something like a list, though
return np.array(X)
def size(X):
"""Return the number of elements in the array.
Parameters
----------
a : array_like
Input data
Returns
-------
int
The number of elements of the array
"""
return X.size if issparse(X) else np.size(X)
def shape(X):
"""Return a tuple of array dimensions."""
return X.shape if issparse(X) else np.shape(X)
def ndim(X):
"""Return the number of array dimensions."""
return X.ndim if issparse(X) else np.ndim(X)
def reshape(X, shape):
"""Return a new array that is a reshaped version of an input array.
The output will be sparse iff the input is.
Parameters
----------
X : array_like
The array to reshape
shape : tuple of ints
The desired shape of the output array
Returns
-------
ndarray or SparseArray
The reshaped output array
"""
if scipy.sparse.issparse(X):
# scipy sparse arrays don't support reshaping (even for 2D they throw not implemented errors),
# so convert to pydata sparse first
X = sp.COO.from_scipy_sparse(X)
if len(shape) == 2:
# in the 2D case, we can convert back to scipy sparse; in other cases we can't
return X.reshape(shape).to_scipy_sparse()
return X.reshape(shape)
def _apply(op, *XS):
"""
Apply a function to a sequence of sparse or dense array arguments.
If any array is sparse then all arrays are converted to COO before the function is applied;
if all of the arrays are scipy sparse arrays, and if the result is 2D,
the returned value will be a scipy sparse array as well
"""
all_scipy_sparse = all(scipy.sparse.issparse(X) for X in XS)
if any(issparse(X) for X in XS):
XS = tuple(tocoo(X) for X in XS)
result = op(*XS)
if all_scipy_sparse and len(shape(result)) == 2:
# both inputs were scipy and we can safely convert back to scipy because it's 2D
return result.to_scipy_sparse()
return result
def tensordot(X1, X2, axes):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Parameters
----------
X1, X2 : array_like, len(shape) >= 1
Tensors to "dot"
axes : int or (2,) array_like
integer_like
If an int N, sum over the last N axes of `X1` and the first N axes
of `X2` in order. The sizes of the corresponding axes must match
(2,) array_like
Or, a list of axes to be summed over, first sequence applying to `X1`,
second to `X2`. Both elements array_like must be of the same length.
"""
def td(X1, X2):
return sp.tensordot(X1, X2, axes) if iscoo(X1) else np.tensordot(X1, X2, axes)
return _apply(td, X1, X2)
def cross_product(*XS):
"""
Compute the cross product of features.
Parameters
----------
X1 : n x d1 matrix
First matrix of n samples of d1 features
(or an n-element vector, which will be treated as an n x 1 matrix)
X2 : n x d2 matrix
Second matrix of n samples of d2 features
(or an n-element vector, which will be treated as an n x 1 matrix)
Returns
-------
A : n x (d1*d2*...) matrix
Matrix of n samples of d1*d2*... cross product features,
arranged in form such that each row t of X12 contains:
[X1[t,0]*X2[t,0]*..., ..., X1[t,d1-1]*X2[t,0]*..., X1[t,0]*X2[t,1]*..., ..., X1[t,d1-1]*X2[t,1]*..., ...]
"""
for X in XS:
assert 2 >= ndim(X) >= 1
n = shape(XS[0])[0]
for X in XS:
assert n == shape(X)[0]
def cross(XS):
k = len(XS)
XS = [reshape(XS[i], (n,) + (1,) * (k - i - 1) + (-1,) + (1,) * i) for i in range(k)]
return reshape(reduce(np.multiply, XS), (n, -1))
return _apply(cross, XS)
def stack(XS, axis=0):
"""
Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if axis=0 it will be the first dimension and if axis=-1 it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape
axis : int, optional
The axis in the result array along which the input arrays are stacked
Returns
-------
ndarray or SparseArray
The stacked array, which has one more dimension than the input arrays.
It will be sparse if the inputs are.
"""
def st(*XS):
return sp.stack(XS, axis=axis) if iscoo(XS[0]) else np.stack(XS, axis=axis)
return _apply(st, *XS)
def concatenate(XS, axis=0):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
X1, X2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ndarray or SparseArray
The concatenated array. It will be sparse if the inputs are.
"""
def conc(*XS):
return sp.concatenate(XS, axis=axis) if iscoo(XS[0]) else np.concatenate(XS, axis=axis)
return _apply(conc, *XS)
# note: in contrast to np.hstack this only works with arrays of dimension at least 2
def hstack(XS):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the second axis.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 1)
def vstack(XS):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after
1-D arrays of shape (N,) have been reshaped to (1,N).
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays, will be at least 2-D. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 0)
def transpose(X, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
X : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes according to the values given
Returns
-------
p : ndarray or SparseArray
`X` with its axes permuted. This will be sparse if `X` is.
"""
def t(X):
if iscoo(X):
return X.transpose(axes)
else:
return np.transpose(X, axes)
return _apply(t, X)
def add_intercept(X):
"""
Adds an intercept feature to an array by prepending a column of ones.
Parameters
----------
X : array-like
Input array. Must be 2D.
Returns
-------
arr : ndarray
`X` with a column of ones prepended
"""
return hstack([np.ones((X.shape[0], 1)), X])
def reshape_Y_T(Y, T):
"""
Reshapes Y and T when Y.ndim = 2 and/or T.ndim = 1.
Parameters
----------
Y : array_like, shape (n, ) or (n, 1)
Outcome for the treatment policy. Must be a vector or single-column matrix.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
Returns
-------
Y : array_like, shape (n, )
Flattened outcome for the treatment policy.
T : array_like, shape (n, 1) or (n, d_t)
Reshaped treatment policy.
"""
assert (len(Y) == len(T))
assert (Y.ndim <= 2)
if Y.ndim == 2:
assert (Y.shape[1] == 1)
Y = Y.flatten()
if T.ndim == 1:
T = T.reshape(-1, 1)
return Y, T
def check_inputs(Y, T, X, W=None, multi_output_T=True, multi_output_Y=True):
"""
Input validation for CATE estimators.
Checks Y, T, X, W for consistent length, enforces X, W 2d.
Standard input checks are only applied to all inputs,
such as checking that an input does not have np.nan or np.inf targets.
Converts regular Python lists to numpy arrays.
Parameters
----------
Y : array_like, shape (n, ) or (n, d_y)
Outcome for the treatment policy.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
W : array-like, shape (n, d_w) or None (default=None)
High-dimensional controls.
multi_output_T : bool
Whether to allow more than one treatment.
multi_output_Y: bool
Whether to allow more than one outcome.
Returns
-------
Y : array_like, shape (n, ) or (n, d_y)
Converted and validated Y.
T : array_like, shape (n, ) or (n, d_t)
Converted and validated T.
X : array-like, shape (n, d_x)
Converted and validated X.
W : array-like, shape (n, d_w) or None (default=None)
Converted and validated W.
"""
X, T = check_X_y(X, T, multi_output=multi_output_T, y_numeric=True)
_, Y = check_X_y(X, Y, multi_output=multi_output_Y, y_numeric=True)
if W is not None:
W, _ = check_X_y(W, Y, multi_output=multi_output_Y, y_numeric=True)
return Y, T, X, W
def check_input_arrays(*args, validate_len=True, force_all_finite=True, dtype=None):
"""Cast input sequences into numpy arrays.
Only inputs that are sequence-like will be converted, all other inputs will be left as is.
When `validate_len` is True, the sequences will be checked for equal length.
Parameters
----------
args : scalar or array_like
Inputs to be checked.
validate_len : bool (default=True)
Whether to check if the input arrays have the same length.
force_all_finite : bool (default=True)
Whether to allow inf and nan in input arrays.
dtype : 'numeric', type, list of type or None (default=None)
Argument passed to sklearn.utils.check_array.
Specifies data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
Returns
-------
args: array-like
List of inputs where sequence-like objects have been cast to numpy arrays.
"""
n = None
args = list(args)
for i, arg in enumerate(args):
if np.ndim(arg) > 0:
new_arg = check_array(arg, dtype=dtype, ensure_2d=False, accept_sparse=True,
force_all_finite=force_all_finite)
if not force_all_finite:
# For when checking input values is disabled
try:
assert_all_finite(new_arg)
except ValueError:
warnings.warn("Input contains NaN, infinity or a value too large for dtype('float64') "
"but input check is disabled. Check the inputs before proceeding.")
if validate_len:
m = new_arg.shape[0]
if n is None:
n = m
else:
assert (m == n), "Input arrays have incompatible lengths: {} and {}".format(n, m)
args[i] = new_arg
return args
def get_input_columns(X, prefix="X"):
"""Extracts column names from dataframe-like input object.
Currently supports column name extraction from pandas DataFrame and Series objects.
Parameters
----------
X : array_like or None
Input array with column names to be extracted.
prefix: string or None
If input array doesn't have column names, a default using the naming scheme
"{prefix}{column number}" will be returned.
Returns
-------
cols: array-like or None
List of columns corresponding to the dataframe-like object.
None if the input array is not in the supported types.
"""
if X is None:
return None
if np.ndim(X) == 0:
raise ValueError(
f"Expected array-like object for imput with prefix {prefix} but got '{X}' object instead.")
# Type to column extraction function
type_to_func = {
pd.DataFrame: lambda x: x.columns.tolist(),
pd.Series: lambda x: [x.name]
}
if type(X) in type_to_func:
return type_to_func[type(X)](X)
len_X = 1 if np.ndim(X) == 1 else np.asarray(X).shape[1]
return [f"{prefix}{i}" for i in range(len_X)]
def get_feature_names_or_default(featurizer, feature_names):
# Prefer sklearn 1.0's get_feature_names_out method to deprecated get_feature_names method
if hasattr(featurizer, "get_feature_names_out"):
try:
return featurizer.get_feature_names_out(feature_names)
except Exception:
# Some featurizers will throw, such as a pipeline with a transformer that doesn't itself support names
pass
if hasattr(featurizer, 'get_feature_names'):
# Get number of arguments, some sklearn featurizer don't accept feature_names
arg_no = len(inspect.getfullargspec(featurizer.get_feature_names).args)
if arg_no == 1:
return featurizer.get_feature_names()
elif arg_no == 2:
return featurizer.get_feature_names(feature_names)
# Featurizer doesn't have 'get_feature_names' or has atypical 'get_feature_names'
try:
# Get feature names using featurizer
dummy_X = np.ones((1, len(feature_names)))
return get_input_columns(featurizer.transform(dummy_X), prefix="feat(X)")
except Exception:
# All attempts at retrieving transformed feature names have failed
# Delegate handling to downstream logic
return None
def check_models(models, n):
"""
Input validation for metalearner models.
Check whether the input models satisfy the criteria below.
Parameters
----------
models : estimator or a list/tuple of estimators
n : int
Number of models needed
Returns
----------
models : a list/tuple of estimators
"""
if isinstance(models, (tuple, list)):
if n != len(models):
raise ValueError("The number of estimators doesn't equal to the number of treatments. "
"Please provide either a tuple/list of estimators "
"with same number of treatments or an unified estimator.")
elif hasattr(models, 'fit'):
models = [clone(models, safe=False) for i in range(n)]
else:
raise ValueError(
"models must be either a tuple/list of estimators with same number of treatments "
"or an unified estimator.")
return models
def broadcast_unit_treatments(X, d_t):
"""
Generate `d_t` unit treatments for each row of `X`.
Parameters
----------
d_t: int
Number of treatments
X : array
Features
Returns
-------
X, T : (array, array)
The updated `X` array (with each row repeated `d_t` times),
and the generated `T` array
"""
d_x = shape(X)[0]
eye = np.eye(d_t)
# tile T and repeat X along axis 0 (so that the duplicated rows of X remain consecutive)
T = np.tile(eye, (d_x, 1))
Xs = np.repeat(X, d_t, axis=0)
return Xs, T
def reshape_treatmentwise_effects(A, d_t, d_y):
"""
Given an effects matrix ordered first by treatment, transform it to be ordered by outcome.
Parameters
----------
A : array
The array of effects, of size n*d_y*d_t
d_t : tuple of int
Either () if T was a vector, or a 1-tuple of the number of columns of T if it was an array
d_y : tuple of int
Either () if Y was a vector, or a 1-tuple of the number of columns of Y if it was an array
Returns
-------
A : array (shape (m, d_y, d_t))
The transformed array. Note that singleton dimensions will be dropped for any inputs which
were vectors, as in the specification of `BaseCateEstimator.marginal_effect`.
"""
A = reshape(A, (-1,) + d_t + d_y)
if d_t and d_y:
return transpose(A, (0, 2, 1)) # need to return as m by d_y by d_t matrix
else:
return A
def einsum_sparse(subscripts, *arrs):
"""
Evaluate the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional array operations can be represented
in a simple fashion. This function provides a way to compute such summations.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
Unlike `np.eisnum` elipses are not supported and the output must be explicitly included
arrs : list of COO arrays
These are the sparse arrays for the operation.
Returns
-------
SparseArray
The sparse array calculated based on the Einstein summation convention.
"""
inputs, outputs = subscripts.split('->')
inputs = inputs.split(',')
outputInds = set(outputs)
allInds = set.union(*[set(i) for i in inputs])
# same number of input definitions as arrays
assert len(inputs) == len(arrs)
# input definitions have same number of dimensions as each array
assert all(arr.ndim == len(input) for (arr, input) in zip(arrs, inputs))
# all result indices are unique
assert len(outputInds) == len(outputs)
# all result indices must match at least one input index
assert outputInds <= allInds
# map indices to all array, axis pairs for that index
indMap = {c: [(n, i) for n in range(len(inputs)) for (i, x) in enumerate(inputs[n]) if x == c] for c in allInds}
for c in indMap:
# each index has the same cardinality wherever it appears
assert len({arrs[n].shape[i] for (n, i) in indMap[c]}) == 1
# State: list of (set of letters, list of (corresponding indices, value))
# Algo: while list contains more than one entry
# take two entries
# sort both lists by intersection of their indices
# merge compatible entries (where intersection of indices is equal - in the resulting list,
# take the union of indices and the product of values), stepping through each list linearly
# TODO: might be faster to break into connected components first
# e.g. for "ab,d,bc->ad", the two components "ab,bc" and "d" are independent,
# so compute their content separately, then take cartesian product
# this would save a few pointless sorts by empty tuples
# TODO: Consider investigating other performance ideas for these cases
# where the dense method beat the sparse method (usually sparse is faster)
# e,facd,c->cfed
# sparse: 0.0335489
# dense: 0.011465999999999997
# gbd,da,egb->da
# sparse: 0.0791625
# dense: 0.007319099999999995
# dcc,d,faedb,c->abe
# sparse: 1.2868097
# dense: 0.44605229999999985
def merge(x1, x2):
(s1, l1), (s2, l2) = x1, x2
keys = {c for c in s1 if c in s2} # intersection of strings
outS = ''.join(set(s1 + s2)) # union of strings
outMap = [(True, s1.index(c)) if c in s1 else (False, s2.index(c)) for c in outS]
def keyGetter(s):
inds = [s.index(c) for c in keys]
return lambda p: tuple(p[0][ind] for ind in inds)
kg1 = keyGetter(s1)
kg2 = keyGetter(s2)
l1.sort(key=kg1)
l2.sort(key=kg2)
i1 = i2 = 0
outL = []
while i1 < len(l1) and i2 < len(l2):
k1, k2 = kg1(l1[i1]), kg2(l2[i2])
if k1 < k2:
i1 += 1
elif k2 < k1:
i2 += 1
else:
j1, j2 = i1, i2
while j1 < len(l1) and kg1(l1[j1]) == k1:
j1 += 1
while j2 < len(l2) and kg2(l2[j2]) == k2:
j2 += 1
for c1, d1 in l1[i1:j1]:
for c2, d2 in l2[i2:j2]:
outL.append((tuple(c1[charIdx] if inFirst else c2[charIdx] for inFirst, charIdx in outMap),
d1 * d2))
i1 = j1
i2 = j2
return outS, outL
# when indices are repeated within an array, pre-filter the coordinates and data
def filter_inds(coords, data, n):
counts = Counter(inputs[n])
repeated = [(c, counts[c]) for c in counts if counts[c] > 1]
if len(repeated) > 0:
mask = np.full(len(data), True)
for (k, v) in repeated:
inds = [i for i in range(len(inputs[n])) if inputs[n][i] == k]
for i in range(1, v):
mask &= (coords[:, inds[0]] == coords[:, inds[i]])
if not all(mask):
return coords[mask, :], data[mask]
return coords, data
xs = [(s, list(zip(c, d)))
for n, (s, arr) in enumerate(zip(inputs, arrs))
for c, d in [filter_inds(arr.coords.T, arr.data, n)]]
# TODO: would using einsum's paths to optimize the order of merging help?
while len(xs) > 1:
xs.append(merge(xs.pop(), xs.pop()))
results = defaultdict(int)
for (s, l) in xs:
coordMap = [s.index(c) for c in outputs]
for (c, d) in l:
results[tuple(c[i] for i in coordMap)] += d
return sp.COO(np.array(list(results.keys())).T if results else
np.empty((len(outputs), 0)),
np.array(list(results.values())),
[arrs[indMap[c][0][0]].shape[indMap[c][0][1]] for c in outputs])
def fit_with_groups(model, X, y, groups=None, **kwargs):
"""
Fit a model while correctly handling grouping if necessary.
This enables us to perform an inner-loop cross-validation of a model
which handles grouping correctly, which is not easy using typical sklearn models.
For example, GridSearchCV and RandomSearchCV both support passing 'groups' to fit,
but other CV-related estimators (such as those derived from LinearModelCV, including LassoCV),
do not support passing groups to fit which meanst that GroupKFold cannot be used as the cv instance
when using these types, because the required 'groups' argument will never be passed to the
GroupKFold's split method. See also https://github.com/scikit-learn/scikit-learn/issues/12052
The (hacky) workaround that is used here is to explicitly set the 'cv' attribute (if there is one) to
the exact set of rows and not to use GroupKFold even with the sklearn classes that could support it;
this should work with classes derived from BaseSearchCV, LinearModelCV, and CalibratedClassifierCV.
Parameters
----------
model : estimator
The model to fit
X : array-like
The features to fit against
y : array-like
The target to fit against
groups : array-like, optional
The set of groupings that should be kept together when splitting rows for
cross-validation
kwargs : dict
Any other named arguments to pass to the model's fit
"""
if groups is not None:
# assume that we should perform nested cross-validation if and only if
# the model has a 'cv' attribute; this is a somewhat brittle assumption...
if hasattr(model, 'cv'):
old_cv = model.cv
# logic copied from check_cv
cv = 5 if old_cv is None else old_cv
if isinstance(cv, numbers.Integral):
cv = GroupKFold(cv)
# otherwise we will assume the user already set the cv attribute to something
# compatible with splitting with a 'groups' argument
# now we have to compute the folds explicitly because some classifiers (like LassoCV)
# don't use the groups when calling split internally
splits = list(cv.split(X, y, groups=groups))
try:
model.cv = splits
return model.fit(X, y, **kwargs)
finally:
model.cv = old_cv
return model.fit(X, y, **kwargs)
def filter_none_kwargs(**kwargs):
"""
Filters out any keyword arguments that are None.
This is useful when specific optional keyword arguments might not be universally supported,
so that stripping them out when they are not set enables more uses to succeed.
Parameters
----------
kwargs: dict
The keyword arguments to filter
Returns
-------
filtered_kwargs: dict
The input dictionary, but with all entries having value None removed
"""
return {key: value for key, value in kwargs.items() if value is not None}
class WeightedModelWrapper:
"""Helper class for assiging weights to models without this option.
Parameters
----------
model_instance : estimator
Model that requires weights.
sample_type : string, optional (default=`weighted`)
Method for adding weights to the model. `weighted` for linear regression models
where the weights can be incorporated in the matrix multiplication,
`sampled` for other models. `sampled` samples the training set according
to the normalized weights and creates a dataset larger than the original.
"""
def __init__(self, model_instance, sample_type="weighted"):
self.model_instance = model_instance
if sample_type == "weighted":
self.data_transform = self._weighted_inputs
else:
warnings.warn("The model provided does not support sample weights. "
"Manual weighted sampling may icrease the variance in the results.", UserWarning)
self.data_transform = self._sampled_inputs
def fit(self, X, y, sample_weight=None):
"""Fit underlying model instance with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples, n_outcomes)
Target values.
Returns
-------
self: an instance of the underlying estimator.
"""
if sample_weight is not None:
X, y = self.data_transform(X, y, sample_weight)
return self.model_instance.fit(X, y)
def predict(self, X):
"""Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples, n_outcomes)
Returns predicted values.
"""
return self.model_instance.predict(X)
def _weighted_inputs(self, X, y, sample_weight):
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
normalized_weights = sample_weight * X.shape[0] / np.sum(sample_weight)
sqrt_weights = np.sqrt(normalized_weights)
weighted_X = sqrt_weights.reshape(-1, 1) * X
weighted_y = sqrt_weights.reshape(-1, 1) * y if y.ndim > 1 else sqrt_weights * y
return weighted_X, weighted_y
def _sampled_inputs(self, X, y, sample_weight):
# Normalize weights
normalized_weights = sample_weight / np.sum(sample_weight)
data_length = int(min(1 / np.min(normalized_weights[normalized_weights > 0]), 10) * X.shape[0])
data_indices = np.random.choice(X.shape[0], size=data_length, p=normalized_weights)
return X[data_indices], y[data_indices]
class MultiModelWrapper:
"""Helper class for training different models for each treatment.
Parameters
----------
model_list : array-like, shape (n_T, )
List of models to be trained separately for each treatment group.
"""
def __init__(self, model_list=[]):
self.model_list = model_list
self.n_T = len(model_list)
def fit(self, Xt, y, sample_weight=None):
"""Fit underlying list of models with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Training data. The last n_T columns should be a one-hot encoding of the treatment assignment.
y : array-like, shape (n_samples, )
Target values.
Returns
-------
self: an instance of the class
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
if sample_weight is None:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask])
else:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask], sample_weight[mask])
return self
def predict(self, Xt):
"""Predict using the linear model.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Samples. The last n_T columns should be a one-hot encoding of the treatment assignment.
Returns
-------
C : array, shape (n_samples, )
Returns predicted values.
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
predictions = [self.model_list[np.nonzero(t[i])[0][0]].predict(X[[i]]) for i in range(len(X))]
return np.concatenate(predictions)
def _safe_norm_ppf(q, loc=0, scale=1):
if hasattr(loc, "__len__"):
prelim = loc.copy()
if np.any(scale > 0):
prelim[scale > 0] = scipy.stats.norm.ppf(q, loc=loc[scale > 0], scale=scale[scale > 0])
elif scale > 0:
prelim = scipy.stats.norm.ppf(q, loc=loc, scale=scale)
else:
prelim = loc
return prelim
class Summary:
# This class is mainly derived from statsmodels.iolib.summary.Summary
"""
Result summary
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated
tables are not saved separately.
extra_txt : str
extra lines that are added to the text output, used for warnings
and explanations.
"""
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table(self, res, header, index, title):
table = SimpleTable(res, header, index, title)
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : list[str]
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : str
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if self.extra_txt is not None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : str
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
latex = summary_return(self.tables, return_fmt='latex')
if self.extra_txt is not None:
latex = latex + '\n\n' + self.extra_txt.replace('\n', ' \\newline\n ')
return latex
def as_csv(self):
'''return tables as string
Returns
-------
csv : str
concatenated summary tables in comma delimited format
'''
csv = summary_return(self.tables, return_fmt='csv')
if self.extra_txt is not None:
csv = csv + '\n\n' + self.extra_txt
return csv
def as_html(self):
'''return tables as string
Returns
-------
html : str
concatenated summary tables in HTML format
'''
html = summary_return(self.tables, return_fmt='html')
if self.extra_txt is not None:
html = html + '<br/><br/>' + self.extra_txt.replace('\n', '<br/>')
return html
class SeparateModel:
"""
Splits the data based on the last feature and trains
a separate model for each subsample. At predict time, it
uses the last feature to choose which model to use
to predict.
"""
def __init__(self, *models):
self.models = [clone(model) for model in models]
def fit(self, XZ, T):
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
m.fit(XZ[inds, :-1], T[inds])
return self
def predict(self, XZ):
t_pred = np.zeros(XZ.shape[0])
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
if np.any(inds):
t_pred[inds] = m.predict(XZ[inds, :-1])
return t_pred
@property
def coef_(self):
return np.concatenate((model.coef_ for model in self.models))
def deprecated(message, category=FutureWarning):
"""
Enables decorating a method or class to providing a warning when it is used.
Parameters
----------
message: string
The deprecation message to use
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
# if we're decorating a class, just update the __init__ method,
# so that the result is still a class instead of a wrapper method
if isinstance(to_wrap, type):
old_init = to_wrap.__init__
@wraps(to_wrap.__init__)
def new_init(*args, **kwargs):
warn(message, category, stacklevel=2)
old_init(*args, **kwargs)
to_wrap.__init__ = new_init
return to_wrap
else:
@wraps(to_wrap)
def m(*args, **kwargs):
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
def _deprecate_positional(message, bad_args, category=FutureWarning):
"""
Enables decorating a method to provide a warning when certain arguments are used positionally.
Parameters
----------
message: string
The deprecation message to use
bad_args : list of string
The positional arguments that will be keyword-only in the future
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
@wraps(to_wrap)
def m(*args, **kwargs):
# want to enforce that each bad_arg was either in kwargs,
# or else it was in neither and is just taking its default value
bound = signature(m).bind(*args, **kwargs)
wrong_args = False
for arg in bad_args:
if arg not in kwargs and arg in bound.arguments:
wrong_args = True
if wrong_args:
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
class MissingModule:
"""
Placeholder to stand in for a module that couldn't be imported, delaying ImportErrors until use.
Parameters
----------
msg:string
The message to display when an attempt to access a module memeber is made
exn:ImportError
The original ImportError to pass as the source of the exception
"""
def __init__(self, msg, exn):
self.msg = msg
self.exn = exn
# Any access should throw
def __getattr__(self, _):
raise ImportError(self.msg) from self.exn
# As a convenience, also throw on calls to allow MissingModule to be used in lieu of specific imports
def __call__(self, *args, **kwargs):
raise ImportError(self.msg) from self.exn
def transpose_dictionary(d):
"""
Transpose a dictionary of dictionaries, bringing the keys from the second level
to the top and vice versa
Parameters
----------
d: dict
The dictionary to transpose; the values of this dictionary should all themselves
be dictionaries
Returns
-------
output: dict
The output dictionary with first- and second-level keys swapped
"""
output = defaultdict(dict)
for key1, value in d.items():
for key2, val in value.items():
output[key2][key1] = val
return output
def reshape_arrays_2dim(length, *args):
"""
Reshape the input arrays as two dimensional.
If None, will be reshaped as (n, 0).
Parameters
----------
length: scalar
Number of samples
args: arrays
Inputs to be reshaped
Returns
-------
new_args: arrays
Output of reshaped arrays
"""
new_args = []
for arg in args:
if arg is None:
new_args.append(np.array([]).reshape(length, 0))
elif arg.ndim == 1:
new_args.append(arg.reshape((-1, 1)))
else:
new_args.append(arg)
return new_args
class _RegressionWrapper:
"""
A simple wrapper that makes a binary classifier behave like a regressor.
Essentially .fit, calls the fit method of the classifier and
.predict calls the .predict_proba method of the classifier
and returns the probability of label 1.
"""
def __init__(self, clf):
"""
Parameters
----------
clf : the classifier model
"""
self._clf = clf
def fit(self, X, y, **kwargs):
"""
Parameters
----------
X : features
y : one-hot-encoding of binary label, with drop='first'
"""
if len(y.shape) > 1 and y.shape[1] > 1:
y = y @ np.arange(1, y.shape[1] + 1)
self._clf.fit(X, y, **kwargs)
return self
def predict(self, X):
"""
Parameters
----------
X : features
"""
return self._clf.predict_proba(X)[:, 1:]
@deprecated("This class will be removed from a future version of this package; "
"please use econml.sklearn_extensions.linear_model.WeightedLassoCV instead.")
class LassoCVWrapper:
"""Helper class to wrap either LassoCV or MultiTaskLassoCV depending on the shape of the target."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, Y):
assert shape(X)[0] == shape(Y)[0]
assert ndim(Y) <= 2
self.needs_unravel = False
if ndim(Y) == 2 and shape(Y)[1] > 1:
self.model = MultiTaskLassoCV(*self.args, **self.kwargs)
else:
if ndim(Y) == 2 and shape(Y)[1] == 1:
Y = np.ravel(Y)
self.needs_unravel = True
self.model = LassoCV(*self.args, **self.kwargs)
self.model.fit(X, Y)
return self
def predict(self, X):
predictions = self.model.predict(X)
return reshape(predictions, (-1, 1)) if self.needs_unravel else predictions | PypiClean |
/CountryGoogleScraper-0.2.10.tar.gz/CountryGoogleScraper-0.2.10/GoogleScraper/selenium_mode.py |
import tempfile
import threading
from urllib.parse import quote
import json
import datetime
import time
import math
import re
import sys
import os
try:
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from pyvirtualdisplay import Display
except ImportError as ie:
print(ie)
sys.exit('You can install missing modules with `pip3 install [modulename]`')
from GoogleScraper.scraping import SearchEngineScrape, SeleniumSearchError, get_base_search_url_by_search_engine, MaliciousRequestDetected
from GoogleScraper.user_agents import random_user_agent
import logging
display = Display(visible=0, size=(800, 800))
display.start()
driver = webdriver.Chrome()
logger = logging.getLogger(__name__)
def get_selenium_scraper_by_search_engine_name(config, search_engine_name, *args, **kwargs):
"""Get the appropriate selenium scraper for the given search engine name.
Args:
search_engine_name: The search engine name.
args: The arguments for the target search engine instance creation.
kwargs: The keyword arguments for the target search engine instance creation.
Returns;
Either a concrete SelScrape instance specific for the given search engine or the abstract SelScrape object.
"""
class_name = search_engine_name[0].upper() + search_engine_name[1:].lower() + 'SelScrape'
ns = globals()
if class_name in ns:
return ns[class_name](config, *args, **kwargs)
return SelScrape(config, *args, **kwargs)
class SelScrape(SearchEngineScrape, threading.Thread):
"""Instances of this class make use of selenium browser objects to query the search engines on a high level.
"""
next_page_selectors = {
'google': '#pnnext',
'yandex': '.pager__button_kind_next',
'bing': '.sb_pagN',
'yahoo': '#pg-next',
'baidu': '.n',
'ask': '#paging div a.txt3.l_nu',
'blekko': '',
'duckduckgo': '',
'googleimg': '#pnnext',
'baiduimg': '.n',
}
input_field_selectors = {
'google': (By.NAME, 'q'),
'yandex': (By.NAME, 'text'),
'bing': (By.NAME, 'q'),
'yahoo': (By.NAME, 'p'),
'baidu': (By.NAME, 'wd'),
'duckduckgo': (By.NAME, 'q'),
'ask': (By.NAME, 'q'),
'blekko': (By.NAME, 'q'),
'google': (By.NAME, 'q'),
'googleimg': (By.NAME, 'as_q'),
'baiduimg': (By.NAME, 'word'),
}
param_field_selectors = {
'googleimg': {
'image_type': (By.ID, 'imgtype_input'),
'image_size': (By.ID, 'imgsz_input'),
},
}
search_params = {
'googleimg': {
'image_type': None,
'image_size': None,
},
}
normal_search_locations = {
'google': 'https://www.google.com/',
'yandex': 'http://www.yandex.ru/',
'bing': 'http://www.bing.com/',
'yahoo': 'https://yahoo.com/',
'baidu': 'http://baidu.com/',
'duckduckgo': 'https://duckduckgo.com/',
'ask': 'http://ask.com/',
'blekko': 'http://blekko.com/',
}
image_search_locations = {
'google': 'https://www.google.com/imghp',
'yandex': 'http://yandex.ru/images/',
'bing': 'https://www.bing.com/?scope=images',
'yahoo': 'http://images.yahoo.com/',
'baidu': 'http://image.baidu.com/',
'duckduckgo': None, # duckduckgo doesnt't support direct image search
'ask': 'http://www.ask.com/pictures/',
'blekko': None,
'googleimg':'https://www.google.com/advanced_image_search',
'baiduimg': 'http://image.baidu.com/',
}
def __init__(self, config, *args, captcha_lock=None, browser_num=1, **kwargs):
"""Create a new SelScraper thread Instance.
Args:
captcha_lock: To sync captcha solving (stdin)
proxy: Optional, if set, use the proxy to route all scrapign through it.
browser_num: A unique, semantic number for each thread.
"""
self.search_input = None
threading.Thread.__init__(self)
SearchEngineScrape.__init__(self, config, *args, **kwargs)
self.browser_type = self.config.get('sel_browser', 'chrome').lower()
self.browser_num = browser_num
self.captcha_lock = captcha_lock
self.scrape_method = 'selenium'
self.xvfb_display = self.config.get('xvfb_display', None)
self.search_param_values = self._get_search_param_values()
# get the base search url based on the search engine.
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, self.scrape_method)
super().instance_creation_info(self.__class__.__name__)
def set_proxy(self):
"""Install a proxy on the communication channel."""
def switch_proxy(self, proxy):
"""Switch the proxy on the communication channel."""
def proxy_check(self, proxy):
assert self.proxy and self.webdriver, 'Scraper instance needs valid webdriver and proxy instance to make the proxy check'
online = False
status = 'Proxy check failed: {host}:{port} is not used while requesting'.format(**self.proxy.__dict__)
ipinfo = {}
try:
self.webdriver.get(self.config.get('proxy_info_url'))
try:
text = re.search(r'(\{.*?\})', self.webdriver.page_source, flags=re.DOTALL).group(0)
ipinfo = json.loads(text)
except ValueError as v:
logger.critical(v)
except Exception as e:
status = str(e)
if 'ip' in ipinfo and ipinfo['ip']:
online = True
status = 'Proxy is working.'
else:
logger.warning(status)
super().update_proxy_status(status, ipinfo, online)
return online
def _save_debug_screenshot(self):
"""
Saves a debug screenshot of the browser window to figure
out what went wrong.
"""
tempdir = tempfile.gettempdir()
location = os.path.join(tempdir, '{}_{}_debug_screenshot.png'.format(self.search_engine_name, self.browser_type))
self.webdriver.get_screenshot_as_file(location)
def _set_xvfb_display(self):
# TODO: should we check the format of the config?
if self.xvfb_display:
os.environ['DISPLAY'] = self.xvfb_display
def _get_webdriver(self):
"""Return a webdriver instance and set it up with the according profile/ proxies.
Chrome is quite fast, but not as stealthy as PhantomJS.
Returns:
The appropriate webdriver mode according to self.browser_type. If no webdriver mode
could be found, return False.
"""
if self.browser_type == 'chrome':
return self._get_Chrome()
elif self.browser_type == 'firefox':
return self._get_Firefox()
elif self.browser_type == 'phantomjs':
return self._get_PhantomJS()
return False
def _get_Chrome(self):
try:
if self.proxy:
chrome_ops = webdriver.ChromeOptions()
chrome_ops.add_argument(
'--proxy-server={}://{}:{}'.format(self.proxy.proto, self.proxy.host, self.proxy.port))
self.webdriver = webdriver.Chrome(chrome_options=chrome_ops)
else:
self.webdriver = webdriver.Chrome()#service_log_path='/tmp/chromedriver_log.log')
return True
except WebDriverException as e:
# we don't have a chrome executable or a chrome webdriver installed
raise
return False
def _get_Firefox(self):
try:
if self.proxy:
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type",
1) # this means that the proxy is user set, regardless of the type
if self.proxy.proto.lower().startswith('socks'):
profile.set_preference("network.proxy.socks", self.proxy.host)
profile.set_preference("network.proxy.socks_port", self.proxy.port)
profile.set_preference("network.proxy.socks_version", 5 if self.proxy.proto[-1] == '5' else 4)
profile.update_preferences()
elif self.proxy.proto == 'http':
profile.set_preference("network.proxy.http", self.proxy.host)
profile.set_preference("network.proxy.http_port", self.proxy.port)
else:
raise ValueError('Invalid protocol given in proxyfile.')
profile.update_preferences()
self.webdriver = webdriver.Firefox(firefox_profile=profile)
else:
self.webdriver = webdriver.Firefox()
return True
except WebDriverException as e:
# reaching here is bad, since we have no available webdriver instance.
logger.error(e)
return False
def _get_PhantomJS(self):
try:
service_args = []
if self.proxy:
service_args.extend([
'--proxy={}:{}'.format(self.proxy.host, self.proxy.port),
'--proxy-type={}'.format(self.proxy.proto),
])
if self.proxy.username and self.proxy.password:
service_args.append(
'--proxy-auth={}:{}'.format(self.proxy.username, self.proxy.password)
)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = random_user_agent(only_desktop=True)
self.webdriver = webdriver.PhantomJS(service_args=service_args, desired_capabilities=dcap)
return True
except WebDriverException as e:
logger.error(e)
return False
def handle_request_denied(self, status_code):
"""Checks whether Google detected a potentially harmful request.
Whenever such potential abuse is detected, Google shows an captcha.
This method just blocks as long as someone entered the captcha in the browser window.
When the window is not visible (For example when using PhantomJS), this method
makes a png from the html code and shows it to the user, which should enter it in a command
line.
Returns:
The search input field.
Raises:
MaliciousRequestDetected when there was not way to stp Google From denying our requests.
"""
# selenium webdriver objects have no status code :/
super().handle_request_denied('400')
needles = self.malicious_request_needles[self.search_engine_name]
if needles and needles['inurl'] in self.webdriver.current_url \
and needles['inhtml'] in self.webdriver.page_source:
if self.config.get('manual_captcha_solving', False):
with self.captcha_lock:
import tempfile
tf = tempfile.NamedTemporaryFile('wb')
tf.write(self.webdriver.get_screenshot_as_png())
import webbrowser
webbrowser.open('file://{}'.format(tf.name))
solution = input('enter the captcha please...')
self.webdriver.find_element_by_name('submit').send_keys(solution + Keys.ENTER)
try:
self.search_input = WebDriverWait(self.webdriver, 5).until(
EC.visibility_of_element_located(self._get_search_input_field()))
except TimeoutException:
raise MaliciousRequestDetected('Requesting with this ip is not possible at the moment.')
tf.close()
else:
# Just wait until the user solves the captcha in the browser window
# 10 hours if needed :D
logger.info('Waiting for user to solve captcha')
return self._wait_until_search_input_field_appears(10 * 60 * 60)
def build_search(self):
"""Build the search for SelScrapers"""
assert self.webdriver, 'Webdriver needs to be ready to build the search'
if self.config.get('search_type', 'normal') == 'image':
starting_point = self.image_search_locations[self.search_engine_name]
else:
starting_point = self.base_search_url
self.webdriver.get(starting_point)
def _get_search_param_values(self):
search_param_values = {}
if self.search_engine_name in self.search_params:
for param_key in self.search_params[self.search_engine_name]:
cfg = self.config.get(param_key, None)
if cfg:
search_param_values[param_key] = cfg
return search_param_values
def _get_search_input_field(self):
"""Get the search input field for the current search_engine.
Returns:
A tuple to locate the search field as used by seleniums function presence_of_element_located()
"""
return self.input_field_selectors[self.search_engine_name]
def _get_search_param_fields(self):
if self.search_engine_name in self.param_field_selectors:
return self.param_field_selectors[self.search_engine_name]
else:
return {}
def _wait_until_search_input_field_appears(self, max_wait=5):
"""Waits until the search input field can be located for the current search engine
Args:
max_wait: How long to wait maximally before returning False.
Returns: False if the search input field could not be located within the time
or the handle to the search input field.
"""
def find_visible_search_input(driver):
input_field = driver.find_element(*self._get_search_input_field())
return input_field
try:
search_input = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_input)
return search_input
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search input field: {}'.format(self.name, e))
return False
def _wait_until_search_param_fields_appears(self, max_wait=5):
"""Waits until the search input field contains the query.
Args:
max_wait: How long to wait maximally before returning False.
"""
def find_visible_search_param(driver):
for param, field in self._get_search_param_fields().items():
input_field = driver.find_element(*field)
if not input_field:
return False
return True
try:
fields = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_param)
return fields
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search param field: {}'.format(self.name, e))
return False
def _goto_next_page(self):
"""
Click the next page element,
Returns:
The url of the next page or False if there is no such url
(end of available pages for instance).
"""
next_url = ''
element = self._find_next_page_element()
if hasattr(element, 'click'):
next_url = element.get_attribute('href')
try:
element.click()
except WebDriverException:
# See http://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error
# first move mouse to the next element, some times the element is not visibility, like blekko.com
selector = self.next_page_selectors[self.search_engine_name]
if selector:
try:
next_element = WebDriverWait(self.webdriver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
webdriver.ActionChains(self.webdriver).move_to_element(next_element).perform()
# wait until the next page link emerges
WebDriverWait(self.webdriver, 8).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
element = self.webdriver.find_element_by_css_selector(selector)
next_url = element.get_attribute('href')
element.click()
except WebDriverException:
pass
# wait until the next page was loaded
if not next_url:
return False
else:
return next_url
def _find_next_page_element(self):
"""Finds the element that locates the next page for any search engine.
Returns:
The element that needs to be clicked to get to the next page or a boolean value to
indicate an error condition.
"""
if self.search_type == 'normal':
selector = self.next_page_selectors[self.search_engine_name]
try:
# wait until the next page link is clickable
WebDriverWait(self.webdriver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
except (WebDriverException, TimeoutException) as e:
self._save_debug_screenshot()
raise Exception('{}: Cannot locate next page element: {}'.format(self.name, str(e)))
return self.webdriver.find_element_by_css_selector(selector)
elif self.search_type == 'image':
self.page_down()
return True
def wait_until_serp_loaded(self):
"""
This method tries to wait until the page requested is loaded.
We know that the correct page is loaded when self.page_number appears
in the navigation of the page.
"""
if self.search_type == 'normal':
if self.search_engine_name == 'google':
selector = '#navcnt td.cur'
elif self.search_engine_name == 'yandex':
selector = '.pager__item_current_yes font font'
elif self.search_engine_name == 'bing':
selector = 'nav li a.sb_pagS'
elif self.search_engine_name == 'yahoo':
selector = '.compPagination strong'
elif self.search_engine_name == 'baidu':
selector = '#page .fk_cur + .pc'
elif self.search_engine_name == 'duckduckgo':
# no pagination in duckduckgo
pass
elif self.search_engine_name == 'ask':
selector = '#paging .pgcsel .pg'
if self.search_engine_name == 'duckduckgo':
time.sleep(1.5)
else:
try:
WebDriverWait(self.webdriver, 5).\
until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, selector), str(self.page_number)))
except TimeoutException as e:
self._save_debug_screenshot()
content = self.webdriver.find_element_by_css_selector(selector).text
raise Exception('Pagenumber={} did not appear in navigation. Got "{}" instead'\
.format(self.page_number), content)
elif self.search_type == 'image':
self.wait_until_title_contains_keyword()
else:
self.wait_until_title_contains_keyword()
def wait_until_title_contains_keyword(self):
try:
WebDriverWait(self.webdriver, 5).until(EC.title_contains(self.query))
except TimeoutException:
logger.debug(SeleniumSearchError(
'{}: Keyword "{}" not found in title: {}'.format(self.name, self.query, self.webdriver.title)))
def search(self):
"""Search with webdriver.
Fills out the search form of the search engine for each keyword.
Clicks the next link while pages_per_keyword is not reached.
"""
for self.query, self.pages_per_keyword in self.jobs.items():
self.search_input = self._wait_until_search_input_field_appears()
if self.search_input is False and self.config.get('stop_on_detection'):
self.status = 'Malicious request detected'
return
if self.search_input is False:
# @todo: pass status_code
self.search_input = self.handle_request_denied()
if self.search_input:
self.search_input.clear()
time.sleep(.25)
self.search_param_fields = self._get_search_param_fields()
if self.search_param_fields:
wait_res = self._wait_until_search_param_fields_appears()
if wait_res is False:
raise Exception('Waiting search param input fields time exceeds')
for param, field in self.search_param_fields.items():
if field[0] == By.ID:
js_tpl = '''
var field = document.getElementById("%s");
field.setAttribute("value", "%s");
'''
elif field[0] == By.NAME:
js_tpl = '''
var fields = document.getElementsByName("%s");
for (var f in fields) {
f.setAttribute("value", "%s");
}
'''
js_str = js_tpl % (field[1], self.search_param_values[param])
self.webdriver.execute_script(js_str)
try:
self.search_input.send_keys(self.query + Keys.ENTER)
except ElementNotVisibleException:
time.sleep(2)
self.search_input.send_keys(self.query + Keys.ENTER)
self.requested_at = datetime.datetime.utcnow()
else:
logger.debug('{}: Cannot get handle to the input form for keyword {}.'.format(self.name, self.query))
continue
super().detection_prevention_sleep()
super().keyword_info()
for self.page_number in self.pages_per_keyword:
self.wait_until_serp_loaded()
try:
self.html = self.webdriver.execute_script('return document.body.innerHTML;')
except WebDriverException as e:
self.html = self.webdriver.page_source
super().after_search()
# Click the next page link not when leaving the loop
# in the next iteration.
if self.page_number in self.pages_per_keyword:
next_url = self._goto_next_page()
self.requested_at = datetime.datetime.utcnow()
if not next_url:
break
def page_down(self):
"""Scrolls down a page with javascript.
Used for next page in image search mode or when the
next results are obtained by scrolling down a page.
"""
js = '''
var w = window,
d = document,
e = d.documentElement,
g = d.getElementsByTagName('body')[0],
y = w.innerHeight|| e.clientHeight|| g.clientHeight;
window.scrollBy(0,y);
return y;
'''
self.webdriver.execute_script(js)
def run(self):
"""Run the SelScraper."""
self._set_xvfb_display()
if not self._get_webdriver():
raise Exception('{}: Aborting due to no available selenium webdriver.'.format(self.name))
try:
self.webdriver.set_window_size(400, 400)
self.webdriver.set_window_position(400 * (self.browser_num % 4), 400 * (math.floor(self.browser_num // 4)))
except WebDriverException as e:
logger.debug('Cannot set window size: {}'.format(e))
super().before_search()
if self.startable:
self.build_search()
self.search()
if self.webdriver:
self.webdriver.quit()
"""
For most search engines, the normal SelScrape works perfectly, but sometimes
the scraping logic is different for other search engines.
Duckduckgo loads new results on the fly (via ajax) and doesn't support any "next page"
link. Other search engines like gekko.com have a completely different SERP page format.
That's why we need to inherit from SelScrape for specific logic that only applies for the given
search engine.
The following functionality may differ in particular:
- _goto_next_page()
- _get_search_input()
- _wait_until_search_input_field_appears()
- _handle_request_denied()
- wait_until_serp_loaded()
"""
class DuckduckgoSelScrape(SelScrape):
"""
Duckduckgo is a little special since new results are obtained by ajax.
next page thus is then to scroll down.
Furthermore duckduckgo.com doesn't seem to work with Phantomjs. Maybe they block it, but I
don't know how ??!
It cannot be the User-Agent, because I already tried this.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def _goto_next_page(self):
super().page_down()
return 'No more results' not in self.html
def wait_until_serp_loaded(self):
super()._wait_until_search_input_field_appears()
class BlekkoSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def _goto_next_page(self):
pass
class AskSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def wait_until_serp_loaded(self):
def wait_until_keyword_in_url(driver):
try:
return quote(self.query) in driver.current_url or \
self.query.replace(' ', '+') in driver.current_url
except WebDriverException:
pass
WebDriverWait(self.webdriver, 5).until(wait_until_keyword_in_url) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/layout/ToggleSplitter.js | if(!dojo._hasResource["dojox.layout.ToggleSplitter"]){
dojo._hasResource["dojox.layout.ToggleSplitter"]=true;
dojo.provide("dojox.layout.ToggleSplitter");
dojo.experimental("dojox.layout.ToggleSplitter");
dojo.require("dijit.layout.BorderContainer");
dojo.declare("dojox.layout.ToggleSplitter",[dijit.layout._Splitter],{open:true,closedThreshold:5,openSize:"",_closedSize:"0",templateString:"<div class=\"dijitSplitter dojoxToggleSplitter\" dojoAttachEvent=\"onkeypress:_onKeyPress,onmousedown:_onMouseDown\" tabIndex=\"0\" waiRole=\"separator\"><div dojoAttachPoint=\"toggleNode\" class=\"dijitSplitterThumb dojoxToggleSplitterIcon\"></div></div>",postCreate:function(){
this._started=false;
this.inherited(arguments);
var _1=this.region;
dojo.addClass(this.domNode,"dojoxToggleSplitter"+_1.charAt(0).toUpperCase()+_1.substring(1));
this.connect(this,"onDblClick","_toggleMe");
},startup:function(){
this.inherited(arguments);
var _2=this.child.domNode,_3=dojo.style(_2,(this.horizontal?"height":"width"));
dojo.forEach(["toggleSplitterOpen","toggleSplitterClosedThreshold","toggleSplitterOpenSize"],function(_4){
var _5=_4.substring("toggleSplitter".length);
_5=_5.charAt(0).toLowerCase()+_5.substring(1);
if(_4 in this.child){
this[_5]=this.child[_4];
}
},this);
if(!this.openSize){
this.openSize=(this.open)?_3+"px":"75px";
}
this._openStyleProps=this._getStyleProps(_2,true);
this._started=true;
this.attr("open",this.open);
return this;
},_onMouseUp:function(_6){
dojo.disconnect(this._onMoveHandle);
dojo.disconnect(this._onUpHandle);
delete this._onMoveHandle;
delete this._onUpHandle;
delete this._startPosn;
},_onPrelimMouseMove:function(_7){
var _8=this._startPosn||0;
var _9=3;
var _a=Math.abs(_8-(this.horizontal?_7.clientY:_7.clientX));
if(_a>=_9){
dojo.disconnect(this._onMoveHandle);
this._startDrag(_7);
}
},_onMouseDown:function(_b){
if(!this.open){
return;
}
if(!this._onUpHandle){
this._onUpHandle=dojo.connect(dojo.body(),"onmouseup",this,"_onMouseUp");
}
if(!this._onMoveHandle){
this._startPosn=this.horizontal?_b.clientY:_b.clientX;
this._onMoveHandle=dojo.connect(dojo.body(),"onmousemove",this,"_onPrelimMouseMove");
}
},_handleOnChange:function(){
var _c=this.child.domNode,_d,_e=this.horizontal?"height":"width";
if(this.open){
var _f=dojo.mixin({display:"block",overflow:"auto",visibility:"visible"},this._openStyleProps);
_f[_e]=(this._openStyleProps&&this._openStyleProps[_e])?this._openStyleProps[_e]:this.openSize;
dojo.style(_c,_f);
this.connect(this.domNode,"onmousedown","_onMouseDown");
}else{
var _10=dojo.getComputedStyle(_c);
_d=this._getStyleProps(_c,true,_10);
var _11=this._getStyleProps(_c,false,_10);
this._openStyleProps=_d;
dojo.style(_c,_11);
}
this._setStateClass();
if(this.container._started){
this.container._layoutChildren(this.region);
}
},_getStyleProps:function(_12,_13,_14){
if(!_14){
_14=dojo.getComputedStyle(_12);
}
var _15={},dim=this.horizontal?"height":"width";
_15["overflow"]=(_13)?_14["overflow"]:"hidden";
_15["visibility"]=(_13)?_14["visibility"]:"hidden";
_15[dim]=(_13)?_12.style[dim]||_14[dim]:this._closedSize;
var _16=["Top","Right","Bottom","Left"];
dojo.forEach(["padding","margin","border"],function(_17){
for(var i=0;i<_16.length;i++){
var _18=_17+_16[i];
if(_17=="border"){
_17+="Width";
}
if(undefined!==_14[_18]){
_15[_18]=(_13)?_14[_18]:0;
}
}
});
return _15;
},_setStateClass:function(){
if(this.open){
dojo.removeClass(this.domNode,"dojoxToggleSplitterClosed");
dojo.addClass(this.domNode,"dojoxToggleSplitterOpen");
dojo.removeClass(this.toggleNode,"dojoxToggleSplitterIconClosed");
dojo.addClass(this.toggleNode,"dojoxToggleSplitterIconOpen");
}else{
dojo.addClass(this.domNode,"dojoxToggleSplitterClosed");
dojo.removeClass(this.domNode,"dojoxToggleSplitterOpen");
dojo.addClass(this.toggleNode,"dojoxToggleSplitterIconClosed");
dojo.removeClass(this.toggleNode,"dojoxToggleSplitterIconOpen");
}
},_setOpenAttr:function(_19){
if(!this._started){
return;
}
this.open=_19;
this._handleOnChange(_19,true);
var evt=this.open?"onOpen":"onClose";
this[evt](this.child);
},onOpen:function(){
},onClose:function(){
},_toggleMe:function(evt){
if(evt){
dojo.stopEvent(evt);
}
this.attr("open",!this.open);
},_onKeyPress:function(e){
this.inherited(arguments);
}});
dojo.extend(dijit._Widget,{toggleSplitterOpen:true,toggleSplitterClosedThreshold:5,toggleSplitterOpenSize:""});
} | PypiClean |
/Internet-in-a-Box-0.5.10.tar.gz/Internet-in-a-Box-0.5.10/iiab/iso639.py | iso6392 = {
"roh": {
"terminologic": "",
"alpha2": "rm",
"french": "romanche",
"bibliographic": "roh",
"english": "Romansh"
},
"gv": {
"terminologic": "",
"alpha2": "gv",
"french": "manx; mannois",
"bibliographic": "glv",
"english": "Manx"
},
"gu": {
"terminologic": "",
"alpha2": "gu",
"french": "goudjrati",
"bibliographic": "guj",
"english": "Gujarati"
},
"scn": {
"terminologic": "",
"alpha2": "",
"french": "sicilien",
"bibliographic": "scn",
"english": "Sicilian"
},
"rom": {
"terminologic": "",
"alpha2": "",
"french": "tsigane",
"bibliographic": "rom",
"english": "Romany"
},
"ron": {
"terminologic": "ron",
"alpha2": "ro",
"french": "roumain; moldave",
"bibliographic": "rum",
"english": "Romanian; Moldavian; Moldovan"
},
"alg": {
"terminologic": "",
"alpha2": "",
"french": "algonquines, langues",
"bibliographic": "alg",
"english": "Algonquian languages"
},
"oss": {
"terminologic": "",
"alpha2": "os",
"french": "oss\u00e8te",
"bibliographic": "oss",
"english": "Ossetian; Ossetic"
},
"ale": {
"terminologic": "",
"alpha2": "",
"french": "al\u00e9oute",
"bibliographic": "ale",
"english": "Aleut"
},
"alb": {
"terminologic": "sqi",
"alpha2": "sq",
"french": "albanais",
"bibliographic": "alb",
"english": "Albanian"
},
"sco": {
"terminologic": "",
"alpha2": "",
"french": "\u00e9cossais",
"bibliographic": "sco",
"english": "Scots"
},
"mni": {
"terminologic": "",
"alpha2": "",
"french": "manipuri",
"bibliographic": "mni",
"english": "Manipuri"
},
"gd": {
"terminologic": "",
"alpha2": "gd",
"french": "ga\u00e9lique; ga\u00e9lique \u00e9cossais",
"bibliographic": "gla",
"english": "Gaelic; Scottish Gaelic"
},
"per": {
"terminologic": "fas",
"alpha2": "fa",
"french": "persan",
"bibliographic": "per",
"english": "Persian"
},
"ga": {
"terminologic": "",
"alpha2": "ga",
"french": "irlandais",
"bibliographic": "gle",
"english": "Irish"
},
"mno": {
"terminologic": "",
"alpha2": "",
"french": "manobo, langues",
"bibliographic": "mno",
"english": "Manobo languages"
},
"osa": {
"terminologic": "",
"alpha2": "",
"french": "osage",
"bibliographic": "osa",
"english": "Osage"
},
"gn": {
"terminologic": "",
"alpha2": "gn",
"french": "guarani",
"bibliographic": "grn",
"english": "Guarani"
},
"alt": {
"terminologic": "",
"alpha2": "",
"french": "altai du Sud",
"bibliographic": "alt",
"english": "Southern Altai"
},
"gl": {
"terminologic": "",
"alpha2": "gl",
"french": "galicien",
"bibliographic": "glg",
"english": "Galician"
},
"mwr": {
"terminologic": "",
"alpha2": "",
"french": "marvari",
"bibliographic": "mwr",
"english": "Marwari"
},
"smn": {
"terminologic": "",
"alpha2": "",
"french": "sami d'Inari",
"bibliographic": "smn",
"english": "Inari Sami"
},
"tw": {
"terminologic": "",
"alpha2": "tw",
"french": "twi",
"bibliographic": "twi",
"english": "Twi"
},
"tt": {
"terminologic": "",
"alpha2": "tt",
"french": "tatar",
"bibliographic": "tat",
"english": "Tatar"
},
"tr": {
"terminologic": "",
"alpha2": "tr",
"french": "turc",
"bibliographic": "tur",
"english": "Turkish"
},
"ts": {
"terminologic": "",
"alpha2": "ts",
"french": "tsonga",
"bibliographic": "tso",
"english": "Tsonga"
},
"tn": {
"terminologic": "",
"alpha2": "tn",
"french": "tswana",
"bibliographic": "tsn",
"english": "Tswana"
},
"to": {
"terminologic": "",
"alpha2": "to",
"french": "tongan (\u00celes Tonga)",
"bibliographic": "ton",
"english": "Tonga (Tonga Islands)"
},
"aus": {
"terminologic": "",
"alpha2": "",
"french": "australiennes, langues",
"bibliographic": "aus",
"english": "Australian languages"
},
"tk": {
"terminologic": "",
"alpha2": "tk",
"french": "turkm\u00e8ne",
"bibliographic": "tuk",
"english": "Turkmen"
},
"th": {
"terminologic": "",
"alpha2": "th",
"french": "tha\u00ef",
"bibliographic": "tha",
"english": "Thai"
},
"roa": {
"terminologic": "",
"alpha2": "",
"french": "romanes, langues",
"bibliographic": "roa",
"english": "Romance languages"
},
"ven": {
"terminologic": "",
"alpha2": "ve",
"french": "venda",
"bibliographic": "ven",
"english": "Venda"
},
"tg": {
"terminologic": "",
"alpha2": "tg",
"french": "tadjik",
"bibliographic": "tgk",
"english": "Tajik"
},
"te": {
"terminologic": "",
"alpha2": "te",
"french": "t\u00e9lougou",
"bibliographic": "tel",
"english": "Telugu"
},
"uga": {
"terminologic": "",
"alpha2": "",
"french": "ougaritique",
"bibliographic": "uga",
"english": "Ugaritic"
},
"mwl": {
"terminologic": "",
"alpha2": "",
"french": "mirandais",
"bibliographic": "mwl",
"english": "Mirandese"
},
"ty": {
"terminologic": "",
"alpha2": "ty",
"french": "tahitien",
"bibliographic": "tah",
"english": "Tahitian"
},
"fas": {
"terminologic": "fas",
"alpha2": "fa",
"french": "persan",
"bibliographic": "per",
"english": "Persian"
},
"fat": {
"terminologic": "",
"alpha2": "",
"french": "fanti",
"bibliographic": "fat",
"english": "Fanti"
},
"qaa-qtz": {
"terminologic": "",
"alpha2": "",
"french": "r\u00e9serv\u00e9e \u00e0 l'usage local",
"bibliographic": "qaa-qtz",
"english": "Reserved for local use"
},
"ay": {
"terminologic": "",
"alpha2": "ay",
"french": "aymara",
"bibliographic": "aym",
"english": "Aymara"
},
"fan": {
"terminologic": "",
"alpha2": "",
"french": "fang",
"bibliographic": "fan",
"english": "Fang"
},
"fao": {
"terminologic": "",
"alpha2": "fo",
"french": "f\u00e9ro\u00efen",
"bibliographic": "fao",
"english": "Faroese"
},
"wo": {
"terminologic": "",
"alpha2": "wo",
"french": "wolof",
"bibliographic": "wol",
"english": "Wolof"
},
"rm": {
"terminologic": "",
"alpha2": "rm",
"french": "romanche",
"bibliographic": "roh",
"english": "Romansh"
},
"sme": {
"terminologic": "",
"alpha2": "se",
"french": "sami du Nord",
"bibliographic": "sme",
"english": "Northern Sami"
},
"din": {
"terminologic": "",
"alpha2": "",
"french": "dinka",
"bibliographic": "din",
"english": "Dinka"
},
"hye": {
"terminologic": "hye",
"alpha2": "hy",
"french": "arm\u00e9nien",
"bibliographic": "arm",
"english": "Armenian"
},
"guj": {
"terminologic": "",
"alpha2": "gu",
"french": "goudjrati",
"bibliographic": "guj",
"english": "Gujarati"
},
"cmc": {
"terminologic": "",
"alpha2": "",
"french": "chames, langues",
"bibliographic": "cmc",
"english": "Chamic languages"
},
"srd": {
"terminologic": "",
"alpha2": "sc",
"french": "sarde",
"bibliographic": "srd",
"english": "Sardinian"
},
"mdr": {
"terminologic": "",
"alpha2": "",
"french": "mandar",
"bibliographic": "mdr",
"english": "Mandar"
},
"car": {
"terminologic": "",
"alpha2": "",
"french": "karib; galibi; carib",
"bibliographic": "car",
"english": "Galibi Carib"
},
"div": {
"terminologic": "",
"alpha2": "dv",
"french": "maldivien",
"bibliographic": "div",
"english": "Divehi; Dhivehi; Maldivian"
},
"zh": {
"terminologic": "zho",
"alpha2": "zh",
"french": "chinois",
"bibliographic": "chi",
"english": "Chinese"
},
"tem": {
"terminologic": "",
"alpha2": "",
"french": "temne",
"bibliographic": "tem",
"english": "Timne"
},
"xho": {
"terminologic": "",
"alpha2": "xh",
"french": "xhosa",
"bibliographic": "xho",
"english": "Xhosa"
},
"nwc": {
"terminologic": "",
"alpha2": "",
"french": "newari classique",
"bibliographic": "nwc",
"english": "Classical Newari; Old Newari; Classical Nepal Bhasa"
},
"za": {
"terminologic": "",
"alpha2": "za",
"french": "zhuang; chuang",
"bibliographic": "zha",
"english": "Zhuang; Chuang"
},
"mh": {
"terminologic": "",
"alpha2": "mh",
"french": "marshall",
"bibliographic": "mah",
"english": "Marshallese"
},
"mk": {
"terminologic": "mkd",
"alpha2": "mk",
"french": "mac\u00e9donien",
"bibliographic": "mac",
"english": "Macedonian"
},
"nbl": {
"terminologic": "",
"alpha2": "nr",
"french": "nd\u00e9b\u00e9l\u00e9 du Sud",
"bibliographic": "nbl",
"english": "Ndebele, South; South Ndebele"
},
"zu": {
"terminologic": "",
"alpha2": "zu",
"french": "zoulou",
"bibliographic": "zul",
"english": "Zulu"
},
"ter": {
"terminologic": "",
"alpha2": "",
"french": "tereno",
"bibliographic": "ter",
"english": "Tereno"
},
"tet": {
"terminologic": "",
"alpha2": "",
"french": "tetum",
"bibliographic": "tet",
"english": "Tetum"
},
"mnc": {
"terminologic": "",
"alpha2": "",
"french": "mandchou",
"bibliographic": "mnc",
"english": "Manchu"
},
"sun": {
"terminologic": "",
"alpha2": "su",
"french": "soundanais",
"bibliographic": "sun",
"english": "Sundanese"
},
"abk": {
"terminologic": "",
"alpha2": "ab",
"french": "abkhaze",
"bibliographic": "abk",
"english": "Abkhazian"
},
"suk": {
"terminologic": "",
"alpha2": "",
"french": "sukuma",
"bibliographic": "suk",
"english": "Sukuma"
},
"kur": {
"terminologic": "",
"alpha2": "ku",
"french": "kurde",
"bibliographic": "kur",
"english": "Kurdish"
},
"kum": {
"terminologic": "",
"alpha2": "",
"french": "koumyk",
"bibliographic": "kum",
"english": "Kumyk"
},
"slo": {
"terminologic": "slk",
"alpha2": "sk",
"french": "slovaque",
"bibliographic": "slo",
"english": "Slovak"
},
"sus": {
"terminologic": "",
"alpha2": "",
"french": "soussou",
"bibliographic": "sus",
"english": "Susu"
},
"new": {
"terminologic": "",
"alpha2": "",
"french": "nepal bhasa; newari",
"bibliographic": "new",
"english": "Nepal Bhasa; Newari"
},
"kua": {
"terminologic": "",
"alpha2": "kj",
"french": "kuanyama; kwanyama",
"bibliographic": "kua",
"english": "Kuanyama; Kwanyama"
},
"sux": {
"terminologic": "",
"alpha2": "",
"french": "sum\u00e9rien",
"bibliographic": "sux",
"english": "Sumerian"
},
"ms": {
"terminologic": "msa",
"alpha2": "ms",
"french": "malais",
"bibliographic": "may",
"english": "Malay"
},
"men": {
"terminologic": "",
"alpha2": "",
"french": "mend\u00e9",
"bibliographic": "men",
"english": "Mende"
},
"mul": {
"terminologic": "",
"alpha2": "",
"french": "multilingue",
"bibliographic": "mul",
"english": "Multiple languages"
},
"lez": {
"terminologic": "",
"alpha2": "",
"french": "lezghien",
"bibliographic": "lez",
"english": "Lezghian"
},
"gla": {
"terminologic": "",
"alpha2": "gd",
"french": "ga\u00e9lique; ga\u00e9lique \u00e9cossais",
"bibliographic": "gla",
"english": "Gaelic; Scottish Gaelic"
},
"bos": {
"terminologic": "",
"alpha2": "bs",
"french": "bosniaque",
"bibliographic": "bos",
"english": "Bosnian"
},
"gle": {
"terminologic": "",
"alpha2": "ga",
"french": "irlandais",
"bibliographic": "gle",
"english": "Irish"
},
"eka": {
"terminologic": "",
"alpha2": "",
"french": "ekajuk",
"bibliographic": "eka",
"english": "Ekajuk"
},
"glg": {
"terminologic": "",
"alpha2": "gl",
"french": "galicien",
"bibliographic": "glg",
"english": "Galician"
},
"akk": {
"terminologic": "",
"alpha2": "",
"french": "akkadien",
"bibliographic": "akk",
"english": "Akkadian"
},
"uzb": {
"terminologic": "",
"alpha2": "uz",
"french": "ouszbek",
"bibliographic": "uzb",
"english": "Uzbek"
},
"dra": {
"terminologic": "",
"alpha2": "",
"french": "dravidiennes, langues",
"bibliographic": "dra",
"english": "Dravidian languages"
},
"aka": {
"terminologic": "",
"alpha2": "ak",
"french": "akan",
"bibliographic": "aka",
"english": "Akan"
},
"bod": {
"terminologic": "bod",
"alpha2": "bo",
"french": "tib\u00e9tain",
"bibliographic": "tib",
"english": "Tibetan"
},
"glv": {
"terminologic": "",
"alpha2": "gv",
"french": "manx; mannois",
"bibliographic": "glv",
"english": "Manx"
},
"jrb": {
"terminologic": "",
"alpha2": "",
"french": "jud\u00e9o-arabe",
"bibliographic": "jrb",
"english": "Judeo-Arabic"
},
"vie": {
"terminologic": "",
"alpha2": "vi",
"french": "vietnamien",
"bibliographic": "vie",
"english": "Vietnamese"
},
"ipk": {
"terminologic": "",
"alpha2": "ik",
"french": "inupiaq",
"bibliographic": "ipk",
"english": "Inupiaq"
},
"rum": {
"terminologic": "ron",
"alpha2": "ro",
"french": "roumain; moldave",
"bibliographic": "rum",
"english": "Romanian; Moldavian; Moldovan"
},
"sgn": {
"terminologic": "",
"alpha2": "",
"french": "langues des signes",
"bibliographic": "sgn",
"english": "Sign Languages"
},
"sga": {
"terminologic": "",
"alpha2": "",
"french": "irlandais ancien (jusqu'\u00e0 900)",
"bibliographic": "sga",
"english": "Irish, Old (to 900)"
},
"afa": {
"terminologic": "",
"alpha2": "",
"french": "afro-asiatiques, langues",
"bibliographic": "afa",
"english": "Afro-Asiatic languages"
},
"bre": {
"terminologic": "",
"alpha2": "br",
"french": "breton",
"bibliographic": "bre",
"english": "Breton"
},
"apa": {
"terminologic": "",
"alpha2": "",
"french": "apaches, langues",
"bibliographic": "apa",
"english": "Apache languages"
},
"bra": {
"terminologic": "",
"alpha2": "",
"french": "braj",
"bibliographic": "bra",
"english": "Braj"
},
"aym": {
"terminologic": "",
"alpha2": "ay",
"french": "aymara",
"bibliographic": "aym",
"english": "Aymara"
},
"cha": {
"terminologic": "",
"alpha2": "ch",
"french": "chamorro",
"bibliographic": "cha",
"english": "Chamorro"
},
"chb": {
"terminologic": "",
"alpha2": "",
"french": "chibcha",
"bibliographic": "chb",
"english": "Chibcha"
},
"che": {
"terminologic": "",
"alpha2": "ce",
"french": "tch\u00e9tch\u00e8ne",
"bibliographic": "che",
"english": "Chechen"
},
"chg": {
"terminologic": "",
"alpha2": "",
"french": "djaghata\u00ef",
"bibliographic": "chg",
"english": "Chagatai"
},
"chi": {
"terminologic": "zho",
"alpha2": "zh",
"french": "chinois",
"bibliographic": "chi",
"english": "Chinese"
},
"chk": {
"terminologic": "",
"alpha2": "",
"french": "chuuk",
"bibliographic": "chk",
"english": "Chuukese"
},
"chm": {
"terminologic": "",
"alpha2": "",
"french": "mari",
"bibliographic": "chm",
"english": "Mari"
},
"chn": {
"terminologic": "",
"alpha2": "",
"french": "chinook, jargon",
"bibliographic": "chn",
"english": "Chinook jargon"
},
"cho": {
"terminologic": "",
"alpha2": "",
"french": "choctaw",
"bibliographic": "cho",
"english": "Choctaw"
},
"chp": {
"terminologic": "",
"alpha2": "",
"french": "chipewyan",
"bibliographic": "chp",
"english": "Chipewyan; Dene Suline"
},
"chr": {
"terminologic": "",
"alpha2": "",
"french": "cherokee",
"bibliographic": "chr",
"english": "Cherokee"
},
"chu": {
"terminologic": "",
"alpha2": "cu",
"french": "slavon d'\u00e9glise; vieux slave; slavon liturgique; vieux bulgare",
"bibliographic": "chu",
"english": "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"
},
"chv": {
"terminologic": "",
"alpha2": "cv",
"french": "tchouvache",
"bibliographic": "chv",
"english": "Chuvash"
},
"chy": {
"terminologic": "",
"alpha2": "",
"french": "cheyenne",
"bibliographic": "chy",
"english": "Cheyenne"
},
"msa": {
"terminologic": "msa",
"alpha2": "ms",
"french": "malais",
"bibliographic": "may",
"english": "Malay"
},
"ti": {
"terminologic": "",
"alpha2": "ti",
"french": "tigrigna",
"bibliographic": "tir",
"english": "Tigrinya"
},
"iii": {
"terminologic": "",
"alpha2": "ii",
"french": "yi de Sichuan",
"bibliographic": "iii",
"english": "Sichuan Yi; Nuosu"
},
"ml": {
"terminologic": "",
"alpha2": "ml",
"french": "malayalam",
"bibliographic": "mal",
"english": "Malayalam"
},
"vot": {
"terminologic": "",
"alpha2": "",
"french": "vote",
"bibliographic": "vot",
"english": "Votic"
},
"mg": {
"terminologic": "",
"alpha2": "mg",
"french": "malgache",
"bibliographic": "mlg",
"english": "Malagasy"
},
"ndo": {
"terminologic": "",
"alpha2": "ng",
"french": "ndonga",
"bibliographic": "ndo",
"english": "Ndonga"
},
"ibo": {
"terminologic": "",
"alpha2": "ig",
"french": "igbo",
"bibliographic": "ibo",
"english": "Igbo"
},
"iba": {
"terminologic": "",
"alpha2": "",
"french": "iban",
"bibliographic": "iba",
"english": "Iban"
},
"mn": {
"terminologic": "",
"alpha2": "mn",
"french": "mongol",
"bibliographic": "mon",
"english": "Mongolian"
},
"mi": {
"terminologic": "mri",
"alpha2": "mi",
"french": "maori",
"bibliographic": "mao",
"english": "Maori"
},
"deu": {
"terminologic": "deu",
"alpha2": "de",
"french": "allemand",
"bibliographic": "ger",
"english": "German"
},
"cau": {
"terminologic": "",
"alpha2": "",
"french": "caucasiennes, langues",
"bibliographic": "cau",
"english": "Caucasian languages"
},
"cat": {
"terminologic": "",
"alpha2": "ca",
"french": "catalan; valencien",
"bibliographic": "cat",
"english": "Catalan; Valencian"
},
"mt": {
"terminologic": "",
"alpha2": "mt",
"french": "maltais",
"bibliographic": "mlt",
"english": "Maltese"
},
"cai": {
"terminologic": "",
"alpha2": "",
"french": "am\u00e9rindiennes de L'Am\u00e9rique centrale, langues",
"bibliographic": "cai",
"english": "Central American Indian languages"
},
"del": {
"terminologic": "",
"alpha2": "",
"french": "delaware",
"bibliographic": "del",
"english": "Delaware"
},
"den": {
"terminologic": "",
"alpha2": "",
"french": "esclave (athapascan)",
"bibliographic": "den",
"english": "Slave (Athapascan)"
},
"mr": {
"terminologic": "",
"alpha2": "mr",
"french": "marathe",
"bibliographic": "mar",
"english": "Marathi"
},
"ta": {
"terminologic": "",
"alpha2": "ta",
"french": "tamoul",
"bibliographic": "tam",
"english": "Tamil"
},
"my": {
"terminologic": "mya",
"alpha2": "my",
"french": "birman",
"bibliographic": "bur",
"english": "Burmese"
},
"cad": {
"terminologic": "",
"alpha2": "",
"french": "caddo",
"bibliographic": "cad",
"english": "Caddo"
},
"tat": {
"terminologic": "",
"alpha2": "tt",
"french": "tatar",
"bibliographic": "tat",
"english": "Tatar"
},
"oc": {
"terminologic": "",
"alpha2": "oc",
"french": "occitan (apr\u00e8s 1500); proven\u00e7al",
"bibliographic": "oci",
"english": "Occitan (post 1500); Proven\u00e7al"
},
"tam": {
"terminologic": "",
"alpha2": "ta",
"french": "tamoul",
"bibliographic": "tam",
"english": "Tamil"
},
"spa": {
"terminologic": "",
"alpha2": "es",
"french": "espagnol; castillan",
"bibliographic": "spa",
"english": "Spanish; Castilian"
},
"tah": {
"terminologic": "",
"alpha2": "ty",
"french": "tahitien",
"bibliographic": "tah",
"english": "Tahitian"
},
"tai": {
"terminologic": "",
"alpha2": "",
"french": "tai, langues",
"bibliographic": "tai",
"english": "Tai languages"
},
"cze": {
"terminologic": "ces",
"alpha2": "cs",
"french": "tch\u00e8que",
"bibliographic": "cze",
"english": "Czech"
},
"afh": {
"terminologic": "",
"alpha2": "",
"french": "afrihili",
"bibliographic": "afh",
"english": "Afrihili"
},
"eng": {
"terminologic": "",
"alpha2": "en",
"french": "anglais",
"bibliographic": "eng",
"english": "English"
},
"enm": {
"terminologic": "",
"alpha2": "",
"french": "anglais moyen (1100-1500)",
"bibliographic": "enm",
"english": "English, Middle (1100-1500)"
},
"ava": {
"terminologic": "",
"alpha2": "av",
"french": "avar",
"bibliographic": "ava",
"english": "Avaric"
},
"nyn": {
"terminologic": "",
"alpha2": "",
"french": "nyankol\u00e9",
"bibliographic": "nyn",
"english": "Nyankole"
},
"nyo": {
"terminologic": "",
"alpha2": "",
"french": "nyoro",
"bibliographic": "nyo",
"english": "Nyoro"
},
"gez": {
"terminologic": "",
"alpha2": "",
"french": "gu\u00e8ze",
"bibliographic": "gez",
"english": "Geez"
},
"nya": {
"terminologic": "",
"alpha2": "ny",
"french": "chichewa; chewa; nyanja",
"bibliographic": "nya",
"english": "Chichewa; Chewa; Nyanja"
},
"sio": {
"terminologic": "",
"alpha2": "",
"french": "sioux, langues",
"bibliographic": "sio",
"english": "Siouan languages"
},
"sin": {
"terminologic": "",
"alpha2": "si",
"french": "singhalais",
"bibliographic": "sin",
"english": "Sinhala; Sinhalese"
},
"afr": {
"terminologic": "",
"alpha2": "af",
"french": "afrikaans",
"bibliographic": "afr",
"english": "Afrikaans"
},
"lam": {
"terminologic": "",
"alpha2": "",
"french": "lamba",
"bibliographic": "lam",
"english": "Lamba"
},
"fr": {
"terminologic": "fra",
"alpha2": "fr",
"french": "fran\u00e7ais",
"bibliographic": "fre",
"english": "French"
},
"lao": {
"terminologic": "",
"alpha2": "lo",
"french": "lao",
"bibliographic": "lao",
"english": "Lao"
},
"lah": {
"terminologic": "",
"alpha2": "",
"french": "lahnda",
"bibliographic": "lah",
"english": "Lahnda"
},
"nym": {
"terminologic": "",
"alpha2": "",
"french": "nyamwezi",
"bibliographic": "nym",
"english": "Nyamwezi"
},
"sna": {
"terminologic": "",
"alpha2": "sn",
"french": "shona",
"bibliographic": "sna",
"english": "Shona"
},
"lad": {
"terminologic": "",
"alpha2": "",
"french": "jud\u00e9o-espagnol",
"bibliographic": "lad",
"english": "Ladino"
},
"fy": {
"terminologic": "",
"alpha2": "fy",
"french": "frison occidental",
"bibliographic": "fry",
"english": "Western Frisian"
},
"snk": {
"terminologic": "",
"alpha2": "",
"french": "sonink\u00e9",
"bibliographic": "snk",
"english": "Soninke"
},
"fa": {
"terminologic": "fas",
"alpha2": "fa",
"french": "persan",
"bibliographic": "per",
"english": "Persian"
},
"mac": {
"terminologic": "mkd",
"alpha2": "mk",
"french": "mac\u00e9donien",
"bibliographic": "mac",
"english": "Macedonian"
},
"mad": {
"terminologic": "",
"alpha2": "",
"french": "madourais",
"bibliographic": "mad",
"english": "Madurese"
},
"ff": {
"terminologic": "",
"alpha2": "ff",
"french": "peul",
"bibliographic": "ful",
"english": "Fulah"
},
"lat": {
"terminologic": "",
"alpha2": "la",
"french": "latin",
"bibliographic": "lat",
"english": "Latin"
},
"fi": {
"terminologic": "",
"alpha2": "fi",
"french": "finnois",
"bibliographic": "fin",
"english": "Finnish"
},
"fj": {
"terminologic": "",
"alpha2": "fj",
"french": "fidjien",
"bibliographic": "fij",
"english": "Fijian"
},
"mal": {
"terminologic": "",
"alpha2": "ml",
"french": "malayalam",
"bibliographic": "mal",
"english": "Malayalam"
},
"mao": {
"terminologic": "mri",
"alpha2": "mi",
"french": "maori",
"bibliographic": "mao",
"english": "Maori"
},
"fo": {
"terminologic": "",
"alpha2": "fo",
"french": "f\u00e9ro\u00efen",
"bibliographic": "fao",
"english": "Faroese"
},
"mak": {
"terminologic": "",
"alpha2": "",
"french": "makassar",
"bibliographic": "mak",
"english": "Makasar"
},
"egy": {
"terminologic": "",
"alpha2": "",
"french": "\u00e9gyptien",
"bibliographic": "egy",
"english": "Egyptian (Ancient)"
},
"znd": {
"terminologic": "",
"alpha2": "",
"french": "zand\u00e9, langues",
"bibliographic": "znd",
"english": "Zande languages"
},
"ss": {
"terminologic": "",
"alpha2": "ss",
"french": "swati",
"bibliographic": "ssw",
"english": "Swati"
},
"sr": {
"terminologic": "",
"alpha2": "sr",
"french": "serbe",
"bibliographic": "srp",
"english": "Serbian"
},
"sq": {
"terminologic": "sqi",
"alpha2": "sq",
"french": "albanais",
"bibliographic": "alb",
"english": "Albanian"
},
"sit": {
"terminologic": "",
"alpha2": "",
"french": "sino-tib\u00e9taines, langues",
"bibliographic": "sit",
"english": "Sino-Tibetan languages"
},
"sw": {
"terminologic": "",
"alpha2": "sw",
"french": "swahili",
"bibliographic": "swa",
"english": "Swahili"
},
"sv": {
"terminologic": "",
"alpha2": "sv",
"french": "su\u00e9dois",
"bibliographic": "swe",
"english": "Swedish"
},
"su": {
"terminologic": "",
"alpha2": "su",
"french": "soundanais",
"bibliographic": "sun",
"english": "Sundanese"
},
"st": {
"terminologic": "",
"alpha2": "st",
"french": "sotho du Sud",
"bibliographic": "sot",
"english": "Sotho, Southern"
},
"sk": {
"terminologic": "slk",
"alpha2": "sk",
"french": "slovaque",
"bibliographic": "slo",
"english": "Slovak"
},
"si": {
"terminologic": "",
"alpha2": "si",
"french": "singhalais",
"bibliographic": "sin",
"english": "Sinhala; Sinhalese"
},
"so": {
"terminologic": "",
"alpha2": "so",
"french": "somali",
"bibliographic": "som",
"english": "Somali"
},
"sn": {
"terminologic": "",
"alpha2": "sn",
"french": "shona",
"bibliographic": "sna",
"english": "Shona"
},
"sm": {
"terminologic": "",
"alpha2": "sm",
"french": "samoan",
"bibliographic": "smo",
"english": "Samoan"
},
"sl": {
"terminologic": "",
"alpha2": "sl",
"french": "slov\u00e8ne",
"bibliographic": "slv",
"english": "Slovenian"
},
"sc": {
"terminologic": "",
"alpha2": "sc",
"french": "sarde",
"bibliographic": "srd",
"english": "Sardinian"
},
"sa": {
"terminologic": "",
"alpha2": "sa",
"french": "sanskrit",
"bibliographic": "san",
"english": "Sanskrit"
},
"sg": {
"terminologic": "",
"alpha2": "sg",
"french": "sango",
"bibliographic": "sag",
"english": "Sango"
},
"se": {
"terminologic": "",
"alpha2": "se",
"french": "sami du Nord",
"bibliographic": "sme",
"english": "Northern Sami"
},
"sd": {
"terminologic": "",
"alpha2": "sd",
"french": "sindhi",
"bibliographic": "snd",
"english": "Sindhi"
},
"zen": {
"terminologic": "",
"alpha2": "",
"french": "zenaga",
"bibliographic": "zen",
"english": "Zenaga"
},
"kbd": {
"terminologic": "",
"alpha2": "",
"french": "kabardien",
"bibliographic": "kbd",
"english": "Kabardian"
},
"ita": {
"terminologic": "",
"alpha2": "it",
"french": "italien",
"bibliographic": "ita",
"english": "Italian"
},
"vai": {
"terminologic": "",
"alpha2": "",
"french": "va\u00ef",
"bibliographic": "vai",
"english": "Vai"
},
"csb": {
"terminologic": "",
"alpha2": "",
"french": "kachoube",
"bibliographic": "csb",
"english": "Kashubian"
},
"tsn": {
"terminologic": "",
"alpha2": "tn",
"french": "tswana",
"bibliographic": "tsn",
"english": "Tswana"
},
"lg": {
"terminologic": "",
"alpha2": "lg",
"french": "ganda",
"bibliographic": "lug",
"english": "Ganda"
},
"pt": {
"terminologic": "",
"alpha2": "pt",
"french": "portugais",
"bibliographic": "por",
"english": "Portuguese"
},
"lb": {
"terminologic": "",
"alpha2": "lb",
"french": "luxembourgeois",
"bibliographic": "ltz",
"english": "Luxembourgish; Letzeburgesch"
},
"fiu": {
"terminologic": "",
"alpha2": "",
"french": "finno-ougriennes, langues",
"bibliographic": "fiu",
"english": "Finno-Ugrian languages"
},
"ln": {
"terminologic": "",
"alpha2": "ln",
"french": "lingala",
"bibliographic": "lin",
"english": "Lingala"
},
"geo": {
"terminologic": "kat",
"alpha2": "ka",
"french": "g\u00e9orgien",
"bibliographic": "geo",
"english": "Georgian"
},
"li": {
"terminologic": "",
"alpha2": "li",
"french": "limbourgeois",
"bibliographic": "lim",
"english": "Limburgan; Limburger; Limburgish"
},
"byn": {
"terminologic": "",
"alpha2": "",
"french": "blin; bilen",
"bibliographic": "byn",
"english": "Blin; Bilin"
},
"lt": {
"terminologic": "",
"alpha2": "lt",
"french": "lituanien",
"bibliographic": "lit",
"english": "Lithuanian"
},
"lu": {
"terminologic": "",
"alpha2": "lu",
"french": "luba-katanga",
"bibliographic": "lub",
"english": "Luba-Katanga"
},
"gem": {
"terminologic": "",
"alpha2": "",
"french": "germaniques, langues",
"bibliographic": "gem",
"english": "Germanic languages"
},
"fij": {
"terminologic": "",
"alpha2": "fj",
"french": "fidjien",
"bibliographic": "fij",
"english": "Fijian"
},
"fin": {
"terminologic": "",
"alpha2": "fi",
"french": "finnois",
"bibliographic": "fin",
"english": "Finnish"
},
"eus": {
"terminologic": "eus",
"alpha2": "eu",
"french": "basque",
"bibliographic": "baq",
"english": "Basque"
},
"yi": {
"terminologic": "",
"alpha2": "yi",
"french": "yiddish",
"bibliographic": "yid",
"english": "Yiddish"
},
"non": {
"terminologic": "",
"alpha2": "",
"french": "norrois, vieux",
"bibliographic": "non",
"english": "Norse, Old"
},
"ceb": {
"terminologic": "",
"alpha2": "",
"french": "cebuano",
"bibliographic": "ceb",
"english": "Cebuano"
},
"yo": {
"terminologic": "",
"alpha2": "yo",
"french": "yoruba",
"bibliographic": "yor",
"english": "Yoruba"
},
"dan": {
"terminologic": "",
"alpha2": "da",
"french": "danois",
"bibliographic": "dan",
"english": "Danish"
},
"cel": {
"terminologic": "",
"alpha2": "",
"french": "celtiques, langues; celtes, langues",
"bibliographic": "cel",
"english": "Celtic languages"
},
"bat": {
"terminologic": "",
"alpha2": "",
"french": "baltes, langues",
"bibliographic": "bat",
"english": "Baltic languages"
},
"nob": {
"terminologic": "",
"alpha2": "nb",
"french": "norv\u00e9gien bokm\u00e5l",
"bibliographic": "nob",
"english": "Bokm\u00e5l, Norwegian; Norwegian Bokm\u00e5l"
},
"dak": {
"terminologic": "",
"alpha2": "",
"french": "dakota",
"bibliographic": "dak",
"english": "Dakota"
},
"ces": {
"terminologic": "ces",
"alpha2": "cs",
"french": "tch\u00e8que",
"bibliographic": "cze",
"english": "Czech"
},
"dar": {
"terminologic": "",
"alpha2": "",
"french": "dargwa",
"bibliographic": "dar",
"english": "Dargwa"
},
"qu": {
"terminologic": "",
"alpha2": "qu",
"french": "quechua",
"bibliographic": "que",
"english": "Quechua"
},
"day": {
"terminologic": "",
"alpha2": "",
"french": "dayak, langues",
"bibliographic": "day",
"english": "Land Dayak languages"
},
"nor": {
"terminologic": "",
"alpha2": "no",
"french": "norv\u00e9gien",
"bibliographic": "nor",
"english": "Norwegian"
},
"gba": {
"terminologic": "",
"alpha2": "",
"french": "gbaya",
"bibliographic": "gba",
"english": "Gbaya"
},
"ssa": {
"terminologic": "",
"alpha2": "",
"french": "nilo-sahariennes, langues",
"bibliographic": "ssa",
"english": "Nilo-Saharan languages"
},
"kpe": {
"terminologic": "",
"alpha2": "",
"french": "kpell\u00e9",
"bibliographic": "kpe",
"english": "Kpelle"
},
"man": {
"terminologic": "",
"alpha2": "",
"french": "mandingue",
"bibliographic": "man",
"english": "Mandingo"
},
"wel": {
"terminologic": "cym",
"alpha2": "cy",
"french": "gallois",
"bibliographic": "wel",
"english": "Welsh"
},
"el": {
"terminologic": "ell",
"alpha2": "el",
"french": "grec moderne (apr\u00e8s 1453)",
"bibliographic": "gre",
"english": "Greek, Modern (1453-)"
},
"eo": {
"terminologic": "",
"alpha2": "eo",
"french": "esp\u00e9ranto",
"bibliographic": "epo",
"english": "Esperanto"
},
"en": {
"terminologic": "",
"alpha2": "en",
"french": "anglais",
"bibliographic": "eng",
"english": "English"
},
"map": {
"terminologic": "",
"alpha2": "",
"french": "austron\u00e9siennes, langues",
"bibliographic": "map",
"english": "Austronesian languages"
},
"ee": {
"terminologic": "",
"alpha2": "ee",
"french": "\u00e9w\u00e9",
"bibliographic": "ewe",
"english": "Ewe"
},
"tpi": {
"terminologic": "",
"alpha2": "",
"french": "tok pisin",
"bibliographic": "tpi",
"english": "Tok Pisin"
},
"mdf": {
"terminologic": "",
"alpha2": "",
"french": "moksa",
"bibliographic": "mdf",
"english": "Moksha"
},
"mas": {
"terminologic": "",
"alpha2": "",
"french": "massa\u00ef",
"bibliographic": "mas",
"english": "Masai"
},
"mar": {
"terminologic": "",
"alpha2": "mr",
"french": "marathe",
"bibliographic": "mar",
"english": "Marathi"
},
"eu": {
"terminologic": "eus",
"alpha2": "eu",
"french": "basque",
"bibliographic": "baq",
"english": "Basque"
},
"et": {
"terminologic": "",
"alpha2": "et",
"french": "estonien",
"bibliographic": "est",
"english": "Estonian"
},
"es": {
"terminologic": "",
"alpha2": "es",
"french": "espagnol; castillan",
"bibliographic": "spa",
"english": "Spanish; Castilian"
},
"ru": {
"terminologic": "",
"alpha2": "ru",
"french": "russe",
"bibliographic": "rus",
"english": "Russian"
},
"rw": {
"terminologic": "",
"alpha2": "rw",
"french": "rwanda",
"bibliographic": "kin",
"english": "Kinyarwanda"
},
"goh": {
"terminologic": "",
"alpha2": "",
"french": "allemand, vieux haut (ca. 750-1050)",
"bibliographic": "goh",
"english": "German, Old High (ca.750-1050)"
},
"sms": {
"terminologic": "",
"alpha2": "",
"french": "sami skolt",
"bibliographic": "sms",
"english": "Skolt Sami"
},
"tiv": {
"terminologic": "",
"alpha2": "",
"french": "tiv",
"bibliographic": "tiv",
"english": "Tiv"
},
"smo": {
"terminologic": "",
"alpha2": "sm",
"french": "samoan",
"bibliographic": "smo",
"english": "Samoan"
},
"may": {
"terminologic": "msa",
"alpha2": "ms",
"french": "malais",
"bibliographic": "may",
"english": "Malay"
},
"smj": {
"terminologic": "",
"alpha2": "",
"french": "sami de Lule",
"bibliographic": "smj",
"english": "Lule Sami"
},
"smi": {
"terminologic": "",
"alpha2": "",
"french": "sames, langues",
"bibliographic": "smi",
"english": "Sami languages"
},
"nic": {
"terminologic": "",
"alpha2": "",
"french": "nig\u00e9ro-kordofaniennes, langues",
"bibliographic": "nic",
"english": "Niger-Kordofanian languages"
},
"got": {
"terminologic": "",
"alpha2": "",
"french": "gothique",
"bibliographic": "got",
"english": "Gothic"
},
"rn": {
"terminologic": "",
"alpha2": "rn",
"french": "rundi",
"bibliographic": "run",
"english": "Rundi"
},
"ro": {
"terminologic": "ron",
"alpha2": "ro",
"french": "roumain; moldave",
"bibliographic": "rum",
"english": "Romanian; Moldavian; Moldovan"
},
"dsb": {
"terminologic": "",
"alpha2": "",
"french": "bas-sorabe",
"bibliographic": "dsb",
"english": "Lower Sorbian"
},
"sma": {
"terminologic": "",
"alpha2": "",
"french": "sami du Sud",
"bibliographic": "sma",
"english": "Southern Sami"
},
"gor": {
"terminologic": "",
"alpha2": "",
"french": "gorontalo",
"bibliographic": "gor",
"english": "Gorontalo"
},
"ast": {
"terminologic": "",
"alpha2": "",
"french": "asturien; bable; l\u00e9onais; asturol\u00e9onais",
"bibliographic": "ast",
"english": "Asturian; Bable; Leonese; Asturleonese"
},
"orm": {
"terminologic": "",
"alpha2": "om",
"french": "galla",
"bibliographic": "orm",
"english": "Oromo"
},
"que": {
"terminologic": "",
"alpha2": "qu",
"french": "quechua",
"bibliographic": "que",
"english": "Quechua"
},
"ori": {
"terminologic": "",
"alpha2": "or",
"french": "oriya",
"bibliographic": "ori",
"english": "Oriya"
},
"crh": {
"terminologic": "",
"alpha2": "",
"french": "tatar de Crim\u00e9",
"bibliographic": "crh",
"english": "Crimean Tatar; Crimean Turkish"
},
"asm": {
"terminologic": "",
"alpha2": "as",
"french": "assamais",
"bibliographic": "asm",
"english": "Assamese"
},
"pus": {
"terminologic": "",
"alpha2": "ps",
"french": "pachto",
"bibliographic": "pus",
"english": "Pushto; Pashto"
},
"dgr": {
"terminologic": "",
"alpha2": "",
"french": "dogrib",
"bibliographic": "dgr",
"english": "Dogrib"
},
"ltz": {
"terminologic": "",
"alpha2": "lb",
"french": "luxembourgeois",
"bibliographic": "ltz",
"english": "Luxembourgish; Letzeburgesch"
},
"ath": {
"terminologic": "",
"alpha2": "",
"french": "athapascanes, langues",
"bibliographic": "ath",
"english": "Athapascan languages"
},
"wln": {
"terminologic": "",
"alpha2": "wa",
"french": "wallon",
"bibliographic": "wln",
"english": "Walloon"
},
"isl": {
"terminologic": "isl",
"alpha2": "is",
"french": "islandais",
"bibliographic": "ice",
"english": "Icelandic"
},
"xh": {
"terminologic": "",
"alpha2": "xh",
"french": "xhosa",
"bibliographic": "xho",
"english": "Xhosa"
},
"mag": {
"terminologic": "",
"alpha2": "",
"french": "magahi",
"bibliographic": "mag",
"english": "Magahi"
},
"mai": {
"terminologic": "",
"alpha2": "",
"french": "maithili",
"bibliographic": "mai",
"english": "Maithili"
},
"mah": {
"terminologic": "",
"alpha2": "mh",
"french": "marshall",
"bibliographic": "mah",
"english": "Marshallese"
},
"tel": {
"terminologic": "",
"alpha2": "te",
"french": "t\u00e9lougou",
"bibliographic": "tel",
"english": "Telugu"
},
"lav": {
"terminologic": "",
"alpha2": "lv",
"french": "letton",
"bibliographic": "lav",
"english": "Latvian"
},
"zap": {
"terminologic": "",
"alpha2": "",
"french": "zapot\u00e8que",
"bibliographic": "zap",
"english": "Zapotec"
},
"yid": {
"terminologic": "",
"alpha2": "yi",
"french": "yiddish",
"bibliographic": "yid",
"english": "Yiddish"
},
"kok": {
"terminologic": "",
"alpha2": "",
"french": "konkani",
"bibliographic": "kok",
"english": "Konkani"
},
"kom": {
"terminologic": "",
"alpha2": "kv",
"french": "kom",
"bibliographic": "kom",
"english": "Komi"
},
"kon": {
"terminologic": "",
"alpha2": "kg",
"french": "kongo",
"bibliographic": "kon",
"english": "Kongo"
},
"ukr": {
"terminologic": "",
"alpha2": "uk",
"french": "ukrainien",
"bibliographic": "ukr",
"english": "Ukrainian"
},
"ton": {
"terminologic": "",
"alpha2": "to",
"french": "tongan (\u00celes Tonga)",
"bibliographic": "ton",
"english": "Tonga (Tonga Islands)"
},
"zxx": {
"terminologic": "",
"alpha2": "",
"french": "pas de contenu linguistique; non applicable",
"bibliographic": "zxx",
"english": "No linguistic content; Not applicable"
},
"kos": {
"terminologic": "",
"alpha2": "",
"french": "kosrae",
"bibliographic": "kos",
"english": "Kosraean"
},
"kor": {
"terminologic": "",
"alpha2": "ko",
"french": "cor\u00e9en",
"bibliographic": "kor",
"english": "Korean"
},
"tog": {
"terminologic": "",
"alpha2": "",
"french": "tonga (Nyasa)",
"bibliographic": "tog",
"english": "Tonga (Nyasa)"
},
"hun": {
"terminologic": "",
"alpha2": "hu",
"french": "hongrois",
"bibliographic": "hun",
"english": "Hungarian"
},
"hup": {
"terminologic": "",
"alpha2": "",
"french": "hupa",
"bibliographic": "hup",
"english": "Hupa"
},
"lug": {
"terminologic": "",
"alpha2": "lg",
"french": "ganda",
"bibliographic": "lug",
"english": "Ganda"
},
"cym": {
"terminologic": "cym",
"alpha2": "cy",
"french": "gallois",
"bibliographic": "wel",
"english": "Welsh"
},
"udm": {
"terminologic": "",
"alpha2": "",
"french": "oudmourte",
"bibliographic": "udm",
"english": "Udmurt"
},
"bej": {
"terminologic": "",
"alpha2": "",
"french": "bedja",
"bibliographic": "bej",
"english": "Beja; Bedawiyet"
},
"ben": {
"terminologic": "",
"alpha2": "bn",
"french": "bengali",
"bibliographic": "ben",
"english": "Bengali"
},
"bel": {
"terminologic": "",
"alpha2": "be",
"french": "bi\u00e9lorusse",
"bibliographic": "bel",
"english": "Belarusian"
},
"bem": {
"terminologic": "",
"alpha2": "",
"french": "bemba",
"bibliographic": "bem",
"english": "Bemba"
},
"tsi": {
"terminologic": "",
"alpha2": "",
"french": "tsimshian",
"bibliographic": "tsi",
"english": "Tsimshian"
},
"ber": {
"terminologic": "",
"alpha2": "",
"french": "berb\u00e8res, langues",
"bibliographic": "ber",
"english": "Berber languages"
},
"nzi": {
"terminologic": "",
"alpha2": "",
"french": "nzema",
"bibliographic": "nzi",
"english": "Nzima"
},
"sai": {
"terminologic": "",
"alpha2": "",
"french": "indiennes d'Am\u00e9rique du Sud, autres langues",
"bibliographic": "sai",
"english": "South American Indian (Other)"
},
"ang": {
"terminologic": "",
"alpha2": "",
"french": "anglo-saxon (ca.450-1100)",
"bibliographic": "ang",
"english": "English, Old (ca.450-1100)"
},
"pra": {
"terminologic": "",
"alpha2": "",
"french": "pr\u00e2krit, langues",
"bibliographic": "pra",
"english": "Prakrit languages"
},
"san": {
"terminologic": "",
"alpha2": "sa",
"french": "sanskrit",
"bibliographic": "san",
"english": "Sanskrit"
},
"bho": {
"terminologic": "",
"alpha2": "",
"french": "bhojpuri",
"bibliographic": "bho",
"english": "Bhojpuri"
},
"sal": {
"terminologic": "",
"alpha2": "",
"french": "salishennes, langues",
"bibliographic": "sal",
"english": "Salishan languages"
},
"pro": {
"terminologic": "",
"alpha2": "",
"french": "proven\u00e7al ancien (jusqu'\u00e0 1500)",
"bibliographic": "pro",
"english": "Proven\u00e7al, Old (to 1500)"
},
"raj": {
"terminologic": "",
"alpha2": "",
"french": "rajasthani",
"bibliographic": "raj",
"english": "Rajasthani"
},
"sad": {
"terminologic": "",
"alpha2": "",
"french": "sandawe",
"bibliographic": "sad",
"english": "Sandawe"
},
"anp": {
"terminologic": "",
"alpha2": "",
"french": "angika",
"bibliographic": "anp",
"english": "Angika"
},
"rap": {
"terminologic": "",
"alpha2": "",
"french": "rapanui",
"bibliographic": "rap",
"english": "Rapanui"
},
"sas": {
"terminologic": "",
"alpha2": "",
"french": "sasak",
"bibliographic": "sas",
"english": "Sasak"
},
"nqo": {
"terminologic": "",
"alpha2": "",
"french": "n'ko",
"bibliographic": "nqo",
"english": "N'Ko"
},
"sat": {
"terminologic": "",
"alpha2": "",
"french": "santal",
"bibliographic": "sat",
"english": "Santali"
},
"min": {
"terminologic": "",
"alpha2": "",
"french": "minangkabau",
"bibliographic": "min",
"english": "Minangkabau"
},
"lim": {
"terminologic": "",
"alpha2": "li",
"french": "limbourgeois",
"bibliographic": "lim",
"english": "Limburgan; Limburger; Limburgish"
},
"lin": {
"terminologic": "",
"alpha2": "ln",
"french": "lingala",
"bibliographic": "lin",
"english": "Lingala"
},
"lit": {
"terminologic": "",
"alpha2": "lt",
"french": "lituanien",
"bibliographic": "lit",
"english": "Lithuanian"
},
"bur": {
"terminologic": "mya",
"alpha2": "my",
"french": "birman",
"bibliographic": "bur",
"english": "Burmese"
},
"srn": {
"terminologic": "",
"alpha2": "",
"french": "sranan tongo",
"bibliographic": "srn",
"english": "Sranan Tongo"
},
"btk": {
"terminologic": "",
"alpha2": "",
"french": "batak, langues",
"bibliographic": "btk",
"english": "Batak languages"
},
"ypk": {
"terminologic": "",
"alpha2": "",
"french": "yupik, langues",
"bibliographic": "ypk",
"english": "Yupik languages"
},
"mis": {
"terminologic": "",
"alpha2": "",
"french": "langues non cod\u00e9es",
"bibliographic": "mis",
"english": "Uncoded languages"
},
"kac": {
"terminologic": "",
"alpha2": "",
"french": "kachin; jingpho",
"bibliographic": "kac",
"english": "Kachin; Jingpho"
},
"kab": {
"terminologic": "",
"alpha2": "",
"french": "kabyle",
"bibliographic": "kab",
"english": "Kabyle"
},
"kaa": {
"terminologic": "",
"alpha2": "",
"french": "karakalpak",
"bibliographic": "kaa",
"english": "Kara-Kalpak"
},
"kan": {
"terminologic": "",
"alpha2": "kn",
"french": "kannada",
"bibliographic": "kan",
"english": "Kannada"
},
"kam": {
"terminologic": "",
"alpha2": "",
"french": "kamba",
"bibliographic": "kam",
"english": "Kamba"
},
"kal": {
"terminologic": "",
"alpha2": "kl",
"french": "groenlandais",
"bibliographic": "kal",
"english": "Kalaallisut; Greenlandic"
},
"kas": {
"terminologic": "",
"alpha2": "ks",
"french": "kashmiri",
"bibliographic": "kas",
"english": "Kashmiri"
},
"kar": {
"terminologic": "",
"alpha2": "",
"french": "karen, langues",
"bibliographic": "kar",
"english": "Karen languages"
},
"kaw": {
"terminologic": "",
"alpha2": "",
"french": "kawi",
"bibliographic": "kaw",
"english": "Kawi"
},
"kau": {
"terminologic": "",
"alpha2": "kr",
"french": "kanouri",
"bibliographic": "kau",
"english": "Kanuri"
},
"kat": {
"terminologic": "kat",
"alpha2": "ka",
"french": "g\u00e9orgien",
"bibliographic": "geo",
"english": "Georgian"
},
"kaz": {
"terminologic": "",
"alpha2": "kk",
"french": "kazakh",
"bibliographic": "kaz",
"english": "Kazakh"
},
"tyv": {
"terminologic": "",
"alpha2": "",
"french": "touva",
"bibliographic": "tyv",
"english": "Tuvinian"
},
"awa": {
"terminologic": "",
"alpha2": "",
"french": "awadhi",
"bibliographic": "awa",
"english": "Awadhi"
},
"urd": {
"terminologic": "",
"alpha2": "ur",
"french": "ourdou",
"bibliographic": "urd",
"english": "Urdu"
},
"ka": {
"terminologic": "kat",
"alpha2": "ka",
"french": "g\u00e9orgien",
"bibliographic": "geo",
"english": "Georgian"
},
"doi": {
"terminologic": "",
"alpha2": "",
"french": "dogri",
"bibliographic": "doi",
"english": "Dogri"
},
"kg": {
"terminologic": "",
"alpha2": "kg",
"french": "kongo",
"bibliographic": "kon",
"english": "Kongo"
},
"kk": {
"terminologic": "",
"alpha2": "kk",
"french": "kazakh",
"bibliographic": "kaz",
"english": "Kazakh"
},
"kj": {
"terminologic": "",
"alpha2": "kj",
"french": "kuanyama; kwanyama",
"bibliographic": "kua",
"english": "Kuanyama; Kwanyama"
},
"ki": {
"terminologic": "",
"alpha2": "ki",
"french": "kikuyu",
"bibliographic": "kik",
"english": "Kikuyu; Gikuyu"
},
"ko": {
"terminologic": "",
"alpha2": "ko",
"french": "cor\u00e9en",
"bibliographic": "kor",
"english": "Korean"
},
"kn": {
"terminologic": "",
"alpha2": "kn",
"french": "kannada",
"bibliographic": "kan",
"english": "Kannada"
},
"km": {
"terminologic": "",
"alpha2": "km",
"french": "khmer central",
"bibliographic": "khm",
"english": "Central Khmer"
},
"kl": {
"terminologic": "",
"alpha2": "kl",
"french": "groenlandais",
"bibliographic": "kal",
"english": "Kalaallisut; Greenlandic"
},
"ks": {
"terminologic": "",
"alpha2": "ks",
"french": "kashmiri",
"bibliographic": "kas",
"english": "Kashmiri"
},
"kr": {
"terminologic": "",
"alpha2": "kr",
"french": "kanouri",
"bibliographic": "kau",
"english": "Kanuri"
},
"kw": {
"terminologic": "",
"alpha2": "kw",
"french": "cornique",
"bibliographic": "cor",
"english": "Cornish"
},
"kv": {
"terminologic": "",
"alpha2": "kv",
"french": "kom",
"bibliographic": "kom",
"english": "Komi"
},
"ku": {
"terminologic": "",
"alpha2": "ku",
"french": "kurde",
"bibliographic": "kur",
"english": "Kurdish"
},
"ky": {
"terminologic": "",
"alpha2": "ky",
"french": "kirghiz",
"bibliographic": "kir",
"english": "Kirghiz; Kyrgyz"
},
"kut": {
"terminologic": "",
"alpha2": "",
"french": "kutenai",
"bibliographic": "kut",
"english": "Kutenai"
},
"tkl": {
"terminologic": "",
"alpha2": "",
"french": "tokelau",
"bibliographic": "tkl",
"english": "Tokelau"
},
"nld": {
"terminologic": "nld",
"alpha2": "nl",
"french": "n\u00e9erlandais; flamand",
"bibliographic": "dut",
"english": "Dutch; Flemish"
},
"oji": {
"terminologic": "",
"alpha2": "oj",
"french": "ojibwa",
"bibliographic": "oji",
"english": "Ojibwa"
},
"oci": {
"terminologic": "",
"alpha2": "oc",
"french": "occitan (apr\u00e8s 1500); proven\u00e7al",
"bibliographic": "oci",
"english": "Occitan (post 1500); Proven\u00e7al"
},
"bua": {
"terminologic": "",
"alpha2": "",
"french": "bouriate",
"bibliographic": "bua",
"english": "Buriat"
},
"wol": {
"terminologic": "",
"alpha2": "wo",
"french": "wolof",
"bibliographic": "wol",
"english": "Wolof"
},
"jav": {
"terminologic": "",
"alpha2": "jv",
"french": "javanais",
"bibliographic": "jav",
"english": "Javanese"
},
"hrv": {
"terminologic": "",
"alpha2": "hr",
"french": "croate",
"bibliographic": "hrv",
"english": "Croatian"
},
"zza": {
"terminologic": "",
"alpha2": "",
"french": "zaza; dimili; dimli; kirdki; kirmanjki; zazaki",
"bibliographic": "zza",
"english": "Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki"
},
"ger": {
"terminologic": "deu",
"alpha2": "de",
"french": "allemand",
"bibliographic": "ger",
"english": "German"
},
"mga": {
"terminologic": "",
"alpha2": "",
"french": "irlandais moyen (900-1200)",
"bibliographic": "mga",
"english": "Irish, Middle (900-1200)"
},
"hit": {
"terminologic": "",
"alpha2": "",
"french": "hittite",
"bibliographic": "hit",
"english": "Hittite"
},
"dyu": {
"terminologic": "",
"alpha2": "",
"french": "dioula",
"bibliographic": "dyu",
"english": "Dyula"
},
"ssw": {
"terminologic": "",
"alpha2": "ss",
"french": "swati",
"bibliographic": "ssw",
"english": "Swati"
},
"de": {
"terminologic": "deu",
"alpha2": "de",
"french": "allemand",
"bibliographic": "ger",
"english": "German"
},
"da": {
"terminologic": "",
"alpha2": "da",
"french": "danois",
"bibliographic": "dan",
"english": "Danish"
},
"dz": {
"terminologic": "",
"alpha2": "dz",
"french": "dzongkha",
"bibliographic": "dzo",
"english": "Dzongkha"
},
"lui": {
"terminologic": "",
"alpha2": "",
"french": "luiseno",
"bibliographic": "lui",
"english": "Luiseno"
},
"dv": {
"terminologic": "",
"alpha2": "dv",
"french": "maldivien",
"bibliographic": "div",
"english": "Divehi; Dhivehi; Maldivian"
},
"hil": {
"terminologic": "",
"alpha2": "",
"french": "hiligaynon",
"bibliographic": "hil",
"english": "Hiligaynon"
},
"him": {
"terminologic": "",
"alpha2": "",
"french": "langues himachalis; langues paharis occidentales",
"bibliographic": "him",
"english": "Himachali languages; Western Pahari languages"
},
"hin": {
"terminologic": "",
"alpha2": "hi",
"french": "hindi",
"bibliographic": "hin",
"english": "Hindi"
},
"crp": {
"terminologic": "",
"alpha2": "",
"french": "cr\u00e9oles et pidgins",
"bibliographic": "crp",
"english": "Creoles and pidgins "
},
"myn": {
"terminologic": "",
"alpha2": "",
"french": "maya, langues",
"bibliographic": "myn",
"english": "Mayan languages"
},
"bas": {
"terminologic": "",
"alpha2": "",
"french": "basa",
"bibliographic": "bas",
"english": "Basa"
},
"baq": {
"terminologic": "eus",
"alpha2": "eu",
"french": "basque",
"bibliographic": "baq",
"english": "Basque"
},
"bad": {
"terminologic": "",
"alpha2": "",
"french": "banda, langues",
"bibliographic": "bad",
"english": "Banda languages"
},
"nep": {
"terminologic": "",
"alpha2": "ne",
"french": "n\u00e9palais",
"bibliographic": "nep",
"english": "Nepali"
},
"cre": {
"terminologic": "",
"alpha2": "cr",
"french": "cree",
"bibliographic": "cre",
"english": "Cree"
},
"ban": {
"terminologic": "",
"alpha2": "",
"french": "balinais",
"bibliographic": "ban",
"english": "Balinese"
},
"bal": {
"terminologic": "",
"alpha2": "",
"french": "baloutchi",
"bibliographic": "bal",
"english": "Baluchi"
},
"bam": {
"terminologic": "",
"alpha2": "bm",
"french": "bambara",
"bibliographic": "bam",
"english": "Bambara"
},
"bak": {
"terminologic": "",
"alpha2": "ba",
"french": "bachkir",
"bibliographic": "bak",
"english": "Bashkir"
},
"shn": {
"terminologic": "",
"alpha2": "",
"french": "chan",
"bibliographic": "shn",
"english": "Shan"
},
"bai": {
"terminologic": "",
"alpha2": "",
"french": "bamil\u00e9k\u00e9, langues",
"bibliographic": "bai",
"english": "Bamileke languages"
},
"arp": {
"terminologic": "",
"alpha2": "",
"french": "arapaho",
"bibliographic": "arp",
"english": "Arapaho"
},
"art": {
"terminologic": "",
"alpha2": "",
"french": "artificielles, langues",
"bibliographic": "art",
"english": "Artificial languages"
},
"arw": {
"terminologic": "",
"alpha2": "",
"french": "arawak",
"bibliographic": "arw",
"english": "Arawak"
},
"ara": {
"terminologic": "",
"alpha2": "ar",
"french": "arabe",
"bibliographic": "ara",
"english": "Arabic"
},
"arc": {
"terminologic": "",
"alpha2": "",
"french": "aram\u00e9en d'empire (700-300 BCE)",
"bibliographic": "arc",
"english": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)"
},
"arg": {
"terminologic": "",
"alpha2": "an",
"french": "aragonais",
"bibliographic": "arg",
"english": "Aragonese"
},
"sem": {
"terminologic": "",
"alpha2": "",
"french": "s\u00e9mitiques, langues",
"bibliographic": "sem",
"english": "Semitic languages"
},
"sel": {
"terminologic": "",
"alpha2": "",
"french": "selkoupe",
"bibliographic": "sel",
"english": "Selkup"
},
"nub": {
"terminologic": "",
"alpha2": "",
"french": "nubiennes, langues",
"bibliographic": "nub",
"english": "Nubian languages"
},
"arm": {
"terminologic": "hye",
"alpha2": "hy",
"french": "arm\u00e9nien",
"bibliographic": "arm",
"english": "Armenian"
},
"arn": {
"terminologic": "",
"alpha2": "",
"french": "mapudungun; mapuche; mapuce",
"bibliographic": "arn",
"english": "Mapudungun; Mapuche"
},
"lus": {
"terminologic": "",
"alpha2": "",
"french": "lushai",
"bibliographic": "lus",
"english": "Lushai"
},
"wa": {
"terminologic": "",
"alpha2": "wa",
"french": "wallon",
"bibliographic": "wln",
"english": "Walloon"
},
"mus": {
"terminologic": "",
"alpha2": "",
"french": "muskogee",
"bibliographic": "mus",
"english": "Creek"
},
"lua": {
"terminologic": "",
"alpha2": "",
"french": "luba-lulua",
"bibliographic": "lua",
"english": "Luba-Lulua"
},
"lub": {
"terminologic": "",
"alpha2": "lu",
"french": "luba-katanga",
"bibliographic": "lub",
"english": "Luba-Katanga"
},
"iro": {
"terminologic": "",
"alpha2": "",
"french": "iroquoises, langues",
"bibliographic": "iro",
"english": "Iroquoian languages"
},
"ira": {
"terminologic": "",
"alpha2": "",
"french": "iraniennes, langues",
"bibliographic": "ira",
"english": "Iranian languages"
},
"mun": {
"terminologic": "",
"alpha2": "",
"french": "mounda, langues",
"bibliographic": "mun",
"english": "Munda languages"
},
"tur": {
"terminologic": "",
"alpha2": "tr",
"french": "turc",
"bibliographic": "tur",
"english": "Turkish"
},
"lun": {
"terminologic": "",
"alpha2": "",
"french": "lunda",
"bibliographic": "lun",
"english": "Lunda"
},
"luo": {
"terminologic": "",
"alpha2": "",
"french": "luo (Kenya et Tanzanie)",
"bibliographic": "luo",
"english": "Luo (Kenya and Tanzania)"
},
"iku": {
"terminologic": "",
"alpha2": "iu",
"french": "inuktitut",
"bibliographic": "iku",
"english": "Inuktitut"
},
"tso": {
"terminologic": "",
"alpha2": "ts",
"french": "tsonga",
"bibliographic": "tso",
"english": "Tsonga"
},
"tup": {
"terminologic": "",
"alpha2": "",
"french": "tupi, langues",
"bibliographic": "tup",
"english": "Tupi languages"
},
"jv": {
"terminologic": "",
"alpha2": "jv",
"french": "javanais",
"bibliographic": "jav",
"english": "Javanese"
},
"zbl": {
"terminologic": "",
"alpha2": "",
"french": "symboles Bliss; Bliss",
"bibliographic": "zbl",
"english": "Blissymbols; Blissymbolics; Bliss"
},
"tut": {
"terminologic": "",
"alpha2": "",
"french": "alta\u00efques, langues",
"bibliographic": "tut",
"english": "Altaic languages"
},
"tuk": {
"terminologic": "",
"alpha2": "tk",
"french": "turkm\u00e8ne",
"bibliographic": "tuk",
"english": "Turkmen"
},
"tum": {
"terminologic": "",
"alpha2": "",
"french": "tumbuka",
"bibliographic": "tum",
"english": "Tumbuka"
},
"ja": {
"terminologic": "",
"alpha2": "ja",
"french": "japonais",
"bibliographic": "jpn",
"english": "Japanese"
},
"cop": {
"terminologic": "",
"alpha2": "",
"french": "copte",
"bibliographic": "cop",
"english": "Coptic"
},
"cos": {
"terminologic": "",
"alpha2": "co",
"french": "corse",
"bibliographic": "cos",
"english": "Corsican"
},
"cor": {
"terminologic": "",
"alpha2": "kw",
"french": "cornique",
"bibliographic": "cor",
"english": "Cornish"
},
"ilo": {
"terminologic": "",
"alpha2": "",
"french": "ilocano",
"bibliographic": "ilo",
"english": "Iloko"
},
"la": {
"terminologic": "",
"alpha2": "la",
"french": "latin",
"bibliographic": "lat",
"english": "Latin"
},
"gwi": {
"terminologic": "",
"alpha2": "",
"french": "gwich'in",
"bibliographic": "gwi",
"english": "Gwich'in"
},
"und": {
"terminologic": "",
"alpha2": "",
"french": "ind\u00e9termin\u00e9e",
"bibliographic": "und",
"english": "Undetermined"
},
"lo": {
"terminologic": "",
"alpha2": "lo",
"french": "lao",
"bibliographic": "lao",
"english": "Lao"
},
"tli": {
"terminologic": "",
"alpha2": "",
"french": "tlingit",
"bibliographic": "tli",
"english": "Tlingit"
},
"tlh": {
"terminologic": "",
"alpha2": "",
"french": "klingon",
"bibliographic": "tlh",
"english": "Klingon; tlhIngan-Hol"
},
"nno": {
"terminologic": "",
"alpha2": "nn",
"french": "norv\u00e9gien nynorsk; nynorsk, norv\u00e9gien",
"bibliographic": "nno",
"english": "Norwegian Nynorsk; Nynorsk, Norwegian"
},
"ch": {
"terminologic": "",
"alpha2": "ch",
"french": "chamorro",
"bibliographic": "cha",
"english": "Chamorro"
},
"co": {
"terminologic": "",
"alpha2": "co",
"french": "corse",
"bibliographic": "cos",
"english": "Corsican"
},
"ca": {
"terminologic": "",
"alpha2": "ca",
"french": "catalan; valencien",
"bibliographic": "cat",
"english": "Catalan; Valencian"
},
"por": {
"terminologic": "",
"alpha2": "pt",
"french": "portugais",
"bibliographic": "por",
"english": "Portuguese"
},
"ce": {
"terminologic": "",
"alpha2": "ce",
"french": "tch\u00e9tch\u00e8ne",
"bibliographic": "che",
"english": "Chechen"
},
"pon": {
"terminologic": "",
"alpha2": "",
"french": "pohnpei",
"bibliographic": "pon",
"english": "Pohnpeian"
},
"pol": {
"terminologic": "",
"alpha2": "pl",
"french": "polonais",
"bibliographic": "pol",
"english": "Polish"
},
"sah": {
"terminologic": "",
"alpha2": "",
"french": "iakoute",
"bibliographic": "sah",
"english": "Yakut"
},
"cs": {
"terminologic": "ces",
"alpha2": "cs",
"french": "tch\u00e8que",
"bibliographic": "cze",
"english": "Czech"
},
"cr": {
"terminologic": "",
"alpha2": "cr",
"french": "cree",
"bibliographic": "cre",
"english": "Cree"
},
"bnt": {
"terminologic": "",
"alpha2": "",
"french": "bantoues, autres langues",
"bibliographic": "bnt",
"english": "Bantu (Other)"
},
"cv": {
"terminologic": "",
"alpha2": "cv",
"french": "tchouvache",
"bibliographic": "chv",
"english": "Chuvash"
},
"cu": {
"terminologic": "",
"alpha2": "cu",
"french": "slavon d'\u00e9glise; vieux slave; slavon liturgique; vieux bulgare",
"bibliographic": "chu",
"english": "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"
},
"lv": {
"terminologic": "",
"alpha2": "lv",
"french": "letton",
"bibliographic": "lav",
"english": "Latvian"
},
"fra": {
"terminologic": "fra",
"alpha2": "fr",
"french": "fran\u00e7ais",
"bibliographic": "fre",
"english": "French"
},
"dum": {
"terminologic": "",
"alpha2": "",
"french": "n\u00e9erlandais moyen (ca. 1050-1350)",
"bibliographic": "dum",
"english": "Dutch, Middle (ca.1050-1350)"
},
"fre": {
"terminologic": "fra",
"alpha2": "fr",
"french": "fran\u00e7ais",
"bibliographic": "fre",
"english": "French"
},
"swa": {
"terminologic": "",
"alpha2": "sw",
"french": "swahili",
"bibliographic": "swa",
"english": "Swahili"
},
"dua": {
"terminologic": "",
"alpha2": "",
"french": "douala",
"bibliographic": "dua",
"english": "Duala"
},
"fro": {
"terminologic": "",
"alpha2": "",
"french": "fran\u00e7ais ancien (842-ca.1400)",
"bibliographic": "fro",
"english": "French, Old (842-ca.1400)"
},
"yap": {
"terminologic": "",
"alpha2": "",
"french": "yapois",
"bibliographic": "yap",
"english": "Yapese"
},
"frm": {
"terminologic": "",
"alpha2": "",
"french": "fran\u00e7ais moyen (1400-1600)",
"bibliographic": "frm",
"english": "French, Middle (ca.1400-1600)"
},
"nb": {
"terminologic": "",
"alpha2": "nb",
"french": "norv\u00e9gien bokm\u00e5l",
"bibliographic": "nob",
"english": "Bokm\u00e5l, Norwegian; Norwegian Bokm\u00e5l"
},
"frs": {
"terminologic": "",
"alpha2": "",
"french": "frison oriental",
"bibliographic": "frs",
"english": "Eastern Frisian"
},
"frr": {
"terminologic": "",
"alpha2": "",
"french": "frison septentrional",
"bibliographic": "frr",
"english": "Northern Frisian"
},
"yao": {
"terminologic": "",
"alpha2": "",
"french": "yao",
"bibliographic": "yao",
"english": "Yao"
},
"pa": {
"terminologic": "",
"alpha2": "pa",
"french": "pendjabi",
"bibliographic": "pan",
"english": "Panjabi; Punjabi"
},
"xal": {
"terminologic": "",
"alpha2": "",
"french": "kalmouk; o\u00efrat",
"bibliographic": "xal",
"english": "Kalmyk; Oirat"
},
"fry": {
"terminologic": "",
"alpha2": "fy",
"french": "frison occidental",
"bibliographic": "fry",
"english": "Western Frisian"
},
"pi": {
"terminologic": "",
"alpha2": "pi",
"french": "pali",
"bibliographic": "pli",
"english": "Pali"
},
"dut": {
"terminologic": "nld",
"alpha2": "nl",
"french": "n\u00e9erlandais; flamand",
"bibliographic": "dut",
"english": "Dutch; Flemish"
},
"pl": {
"terminologic": "",
"alpha2": "pl",
"french": "polonais",
"bibliographic": "pol",
"english": "Polish"
},
"gay": {
"terminologic": "",
"alpha2": "",
"french": "gayo",
"bibliographic": "gay",
"english": "Gayo"
},
"oto": {
"terminologic": "",
"alpha2": "",
"french": "otomi, langues",
"bibliographic": "oto",
"english": "Otomian languages"
},
"ota": {
"terminologic": "",
"alpha2": "",
"french": "turc ottoman (1500-1928)",
"bibliographic": "ota",
"english": "Turkish, Ottoman (1500-1928)"
},
"hmn": {
"terminologic": "",
"alpha2": "",
"french": "hmong",
"bibliographic": "hmn",
"english": "Hmong; Mong"
},
"ile": {
"terminologic": "",
"alpha2": "ie",
"french": "interlingue",
"bibliographic": "ile",
"english": "Interlingue; Occidental"
},
"myv": {
"terminologic": "",
"alpha2": "",
"french": "erza",
"bibliographic": "myv",
"english": "Erzya"
},
"gaa": {
"terminologic": "",
"alpha2": "",
"french": "ga",
"bibliographic": "gaa",
"english": "Ga"
},
"fur": {
"terminologic": "",
"alpha2": "",
"french": "frioulan",
"bibliographic": "fur",
"english": "Friulian"
},
"mlg": {
"terminologic": "",
"alpha2": "mg",
"french": "malgache",
"bibliographic": "mlg",
"english": "Malagasy"
},
"slv": {
"terminologic": "",
"alpha2": "sl",
"french": "slov\u00e8ne",
"bibliographic": "slv",
"english": "Slovenian"
},
"ain": {
"terminologic": "",
"alpha2": "",
"french": "a\u00efnou",
"bibliographic": "ain",
"english": "Ainu"
},
"fil": {
"terminologic": "",
"alpha2": "",
"french": "filipino; pilipino",
"bibliographic": "fil",
"english": "Filipino; Pilipino"
},
"mlt": {
"terminologic": "",
"alpha2": "mt",
"french": "maltais",
"bibliographic": "mlt",
"english": "Maltese"
},
"slk": {
"terminologic": "slk",
"alpha2": "sk",
"french": "slovaque",
"bibliographic": "slo",
"english": "Slovak"
},
"rar": {
"terminologic": "",
"alpha2": "",
"french": "rarotonga; maori des \u00eeles Cook",
"bibliographic": "rar",
"english": "Rarotongan; Cook Islands Maori"
},
"ful": {
"terminologic": "",
"alpha2": "ff",
"french": "peul",
"bibliographic": "ful",
"english": "Fulah"
},
"sla": {
"terminologic": "",
"alpha2": "",
"french": "slaves, langues",
"bibliographic": "sla",
"english": "Slavic languages"
},
"ve": {
"terminologic": "",
"alpha2": "ve",
"french": "venda",
"bibliographic": "ven",
"english": "Venda"
},
"jpn": {
"terminologic": "",
"alpha2": "ja",
"french": "japonais",
"bibliographic": "jpn",
"english": "Japanese"
},
"vol": {
"terminologic": "",
"alpha2": "vo",
"french": "volap\u00fck",
"bibliographic": "vol",
"english": "Volap\u00fck"
},
"vi": {
"terminologic": "",
"alpha2": "vi",
"french": "vietnamien",
"bibliographic": "vie",
"english": "Vietnamese"
},
"is": {
"terminologic": "isl",
"alpha2": "is",
"french": "islandais",
"bibliographic": "ice",
"english": "Icelandic"
},
"av": {
"terminologic": "",
"alpha2": "av",
"french": "avar",
"bibliographic": "ava",
"english": "Avaric"
},
"iu": {
"terminologic": "",
"alpha2": "iu",
"french": "inuktitut",
"bibliographic": "iku",
"english": "Inuktitut"
},
"it": {
"terminologic": "",
"alpha2": "it",
"french": "italien",
"bibliographic": "ita",
"english": "Italian"
},
"vo": {
"terminologic": "",
"alpha2": "vo",
"french": "volap\u00fck",
"bibliographic": "vol",
"english": "Volap\u00fck"
},
"ii": {
"terminologic": "",
"alpha2": "ii",
"french": "yi de Sichuan",
"bibliographic": "iii",
"english": "Sichuan Yi; Nuosu"
},
"mya": {
"terminologic": "mya",
"alpha2": "my",
"french": "birman",
"bibliographic": "bur",
"english": "Burmese"
},
"ik": {
"terminologic": "",
"alpha2": "ik",
"french": "inupiaq",
"bibliographic": "ipk",
"english": "Inupiaq"
},
"io": {
"terminologic": "",
"alpha2": "io",
"french": "ido",
"bibliographic": "ido",
"english": "Ido"
},
"ine": {
"terminologic": "",
"alpha2": "",
"french": "indo-europ\u00e9ennes, langues",
"bibliographic": "ine",
"english": "Indo-European languages"
},
"ia": {
"terminologic": "",
"alpha2": "ia",
"french": "interlingua (langue auxiliaire internationale)",
"bibliographic": "ina",
"english": "Interlingua (International Auxiliary Language Association)"
},
"ave": {
"terminologic": "",
"alpha2": "ae",
"french": "avestique",
"bibliographic": "ave",
"english": "Avestan"
},
"jpr": {
"terminologic": "",
"alpha2": "",
"french": "jud\u00e9o-persan",
"bibliographic": "jpr",
"english": "Judeo-Persian"
},
"ie": {
"terminologic": "",
"alpha2": "ie",
"french": "interlingue",
"bibliographic": "ile",
"english": "Interlingue; Occidental"
},
"id": {
"terminologic": "",
"alpha2": "id",
"french": "indon\u00e9sien",
"bibliographic": "ind",
"english": "Indonesian"
},
"ig": {
"terminologic": "",
"alpha2": "ig",
"french": "igbo",
"bibliographic": "ibo",
"english": "Igbo"
},
"pap": {
"terminologic": "",
"alpha2": "",
"french": "papiamento",
"bibliographic": "pap",
"english": "Papiamento"
},
"ewo": {
"terminologic": "",
"alpha2": "",
"french": "\u00e9wondo",
"bibliographic": "ewo",
"english": "Ewondo"
},
"pau": {
"terminologic": "",
"alpha2": "",
"french": "palau",
"bibliographic": "pau",
"english": "Palauan"
},
"ewe": {
"terminologic": "",
"alpha2": "ee",
"french": "\u00e9w\u00e9",
"bibliographic": "ewe",
"english": "Ewe"
},
"zgh": {
"terminologic": "",
"alpha2": "",
"french": "amazighe standard marocain",
"bibliographic": "zgh",
"english": "Standard Moroccan Tamazight"
},
"paa": {
"terminologic": "",
"alpha2": "",
"french": "papoues, langues",
"bibliographic": "paa",
"english": "Papuan languages"
},
"pag": {
"terminologic": "",
"alpha2": "",
"french": "pangasinan",
"bibliographic": "pag",
"english": "Pangasinan"
},
"pal": {
"terminologic": "",
"alpha2": "",
"french": "pahlavi",
"bibliographic": "pal",
"english": "Pahlavi"
},
"pam": {
"terminologic": "",
"alpha2": "",
"french": "pampangan",
"bibliographic": "pam",
"english": "Pampanga; Kapampangan"
},
"pan": {
"terminologic": "",
"alpha2": "pa",
"french": "pendjabi",
"bibliographic": "pan",
"english": "Panjabi; Punjabi"
},
"syc": {
"terminologic": "",
"alpha2": "",
"french": "syriaque classique",
"bibliographic": "syc",
"english": "Classical Syriac"
},
"phi": {
"terminologic": "",
"alpha2": "",
"french": "philippines, langues",
"bibliographic": "phi",
"english": "Philippine languages"
},
"nog": {
"terminologic": "",
"alpha2": "",
"french": "noga\u00ef; nogay",
"bibliographic": "nog",
"english": "Nogai"
},
"phn": {
"terminologic": "",
"alpha2": "",
"french": "ph\u00e9nicien",
"bibliographic": "phn",
"english": "Phoenician"
},
"kir": {
"terminologic": "",
"alpha2": "ky",
"french": "kirghiz",
"bibliographic": "kir",
"english": "Kirghiz; Kyrgyz"
},
"nia": {
"terminologic": "",
"alpha2": "",
"french": "nias",
"bibliographic": "nia",
"english": "Nias"
},
"kik": {
"terminologic": "",
"alpha2": "ki",
"french": "kikuyu",
"bibliographic": "kik",
"english": "Kikuyu; Gikuyu"
},
"syr": {
"terminologic": "",
"alpha2": "",
"french": "syriaque",
"bibliographic": "syr",
"english": "Syriac"
},
"kin": {
"terminologic": "",
"alpha2": "rw",
"french": "rwanda",
"bibliographic": "kin",
"english": "Kinyarwanda"
},
"niu": {
"terminologic": "",
"alpha2": "",
"french": "niu\u00e9",
"bibliographic": "niu",
"english": "Niuean"
},
"gsw": {
"terminologic": "",
"alpha2": "",
"french": "suisse al\u00e9manique; al\u00e9manique; alsacien",
"bibliographic": "gsw",
"english": "Swiss German; Alemannic; Alsatian"
},
"epo": {
"terminologic": "",
"alpha2": "eo",
"french": "esp\u00e9ranto",
"bibliographic": "epo",
"english": "Esperanto"
},
"jbo": {
"terminologic": "",
"alpha2": "",
"french": "lojban",
"bibliographic": "jbo",
"english": "Lojban"
},
"mic": {
"terminologic": "",
"alpha2": "",
"french": "mi'kmaq; micmac",
"bibliographic": "mic",
"english": "Mi'kmaq; Micmac"
},
"tha": {
"terminologic": "",
"alpha2": "th",
"french": "tha\u00ef",
"bibliographic": "tha",
"english": "Thai"
},
"sam": {
"terminologic": "",
"alpha2": "",
"french": "samaritain",
"bibliographic": "sam",
"english": "Samaritan Aramaic"
},
"hai": {
"terminologic": "",
"alpha2": "",
"french": "haida",
"bibliographic": "hai",
"english": "Haida"
},
"gmh": {
"terminologic": "",
"alpha2": "",
"french": "allemand, moyen haut (ca. 1050-1500)",
"bibliographic": "gmh",
"english": "German, Middle High (ca.1050-1500)"
},
"cus": {
"terminologic": "",
"alpha2": "",
"french": "couchitiques, langues",
"bibliographic": "cus",
"english": "Cushitic languages"
},
"ell": {
"terminologic": "ell",
"alpha2": "el",
"french": "grec moderne (apr\u00e8s 1453)",
"bibliographic": "gre",
"english": "Greek, Modern (1453-)"
},
"efi": {
"terminologic": "",
"alpha2": "",
"french": "efik",
"bibliographic": "efi",
"english": "Efik"
},
"wen": {
"terminologic": "",
"alpha2": "",
"french": "sorabes, langues",
"bibliographic": "wen",
"english": "Sorbian languages"
},
"ady": {
"terminologic": "",
"alpha2": "",
"french": "adygh\u00e9",
"bibliographic": "ady",
"english": "Adyghe; Adygei"
},
"elx": {
"terminologic": "",
"alpha2": "",
"french": "\u00e9lamite",
"bibliographic": "elx",
"english": "Elamite"
},
"ada": {
"terminologic": "",
"alpha2": "",
"french": "adangme",
"bibliographic": "ada",
"english": "Adangme"
},
"nav": {
"terminologic": "",
"alpha2": "nv",
"french": "navaho",
"bibliographic": "nav",
"english": "Navajo; Navaho"
},
"hat": {
"terminologic": "",
"alpha2": "ht",
"french": "ha\u00eftien; cr\u00e9ole ha\u00eftien",
"bibliographic": "hat",
"english": "Haitian; Haitian Creole"
},
"hau": {
"terminologic": "",
"alpha2": "ha",
"french": "haoussa",
"bibliographic": "hau",
"english": "Hausa"
},
"haw": {
"terminologic": "",
"alpha2": "",
"french": "hawa\u00efen",
"bibliographic": "haw",
"english": "Hawaiian"
},
"bin": {
"terminologic": "",
"alpha2": "",
"french": "bini; edo",
"bibliographic": "bin",
"english": "Bini; Edo"
},
"amh": {
"terminologic": "",
"alpha2": "am",
"french": "amharique",
"bibliographic": "amh",
"english": "Amharic"
},
"bik": {
"terminologic": "",
"alpha2": "",
"french": "bikol",
"bibliographic": "bik",
"english": "Bikol"
},
"bih": {
"terminologic": "",
"alpha2": "bh",
"french": "langues biharis",
"bibliographic": "bih",
"english": "Bihari languages"
},
"mos": {
"terminologic": "",
"alpha2": "",
"french": "mor\u00e9",
"bibliographic": "mos",
"english": "Mossi"
},
"moh": {
"terminologic": "",
"alpha2": "",
"french": "mohawk",
"bibliographic": "moh",
"english": "Mohawk"
},
"mon": {
"terminologic": "",
"alpha2": "mn",
"french": "mongol",
"bibliographic": "mon",
"english": "Mongolian"
},
"bis": {
"terminologic": "",
"alpha2": "bi",
"french": "bichlamar",
"bibliographic": "bis",
"english": "Bislama"
},
"bla": {
"terminologic": "",
"alpha2": "",
"french": "blackfoot",
"bibliographic": "bla",
"english": "Siksika"
},
"cy": {
"terminologic": "cym",
"alpha2": "cy",
"french": "gallois",
"bibliographic": "wel",
"english": "Welsh"
},
"tib": {
"terminologic": "bod",
"alpha2": "bo",
"french": "tib\u00e9tain",
"bibliographic": "tib",
"english": "Tibetan"
},
"tvl": {
"terminologic": "",
"alpha2": "",
"french": "tuvalu",
"bibliographic": "tvl",
"english": "Tuvalu"
},
"tgk": {
"terminologic": "",
"alpha2": "tg",
"french": "tadjik",
"bibliographic": "tgk",
"english": "Tajik"
},
"ijo": {
"terminologic": "",
"alpha2": "",
"french": "ijo, langues",
"bibliographic": "ijo",
"english": "Ijo languages"
},
"est": {
"terminologic": "",
"alpha2": "et",
"french": "estonien",
"bibliographic": "est",
"english": "Estonian"
},
"kmb": {
"terminologic": "",
"alpha2": "",
"french": "kimbundu",
"bibliographic": "kmb",
"english": "Kimbundu"
},
"ice": {
"terminologic": "isl",
"alpha2": "is",
"french": "islandais",
"bibliographic": "ice",
"english": "Icelandic"
},
"peo": {
"terminologic": "",
"alpha2": "",
"french": "perse, vieux (ca. 600-400 av. J.-C.)",
"bibliographic": "peo",
"english": "Persian, Old (ca.600-400 B.C.)"
},
"tl": {
"terminologic": "",
"alpha2": "tl",
"french": "tagalog",
"bibliographic": "tgl",
"english": "Tagalog"
},
"tgl": {
"terminologic": "",
"alpha2": "tl",
"french": "tagalog",
"bibliographic": "tgl",
"english": "Tagalog"
},
"umb": {
"terminologic": "",
"alpha2": "",
"french": "umbundu",
"bibliographic": "umb",
"english": "Umbundu"
},
"tmh": {
"terminologic": "",
"alpha2": "",
"french": "tamacheq",
"bibliographic": "tmh",
"english": "Tamashek"
},
"fon": {
"terminologic": "",
"alpha2": "",
"french": "fon",
"bibliographic": "fon",
"english": "Fon"
},
"hsb": {
"terminologic": "",
"alpha2": "",
"french": "haut-sorabe",
"bibliographic": "hsb",
"english": "Upper Sorbian"
},
"be": {
"terminologic": "",
"alpha2": "be",
"french": "bi\u00e9lorusse",
"bibliographic": "bel",
"english": "Belarusian"
},
"bg": {
"terminologic": "",
"alpha2": "bg",
"french": "bulgare",
"bibliographic": "bul",
"english": "Bulgarian"
},
"run": {
"terminologic": "",
"alpha2": "rn",
"french": "rundi",
"bibliographic": "run",
"english": "Rundi"
},
"ba": {
"terminologic": "",
"alpha2": "ba",
"french": "bachkir",
"bibliographic": "bak",
"english": "Bashkir"
},
"ps": {
"terminologic": "",
"alpha2": "ps",
"french": "pachto",
"bibliographic": "pus",
"english": "Pushto; Pashto"
},
"bm": {
"terminologic": "",
"alpha2": "bm",
"french": "bambara",
"bibliographic": "bam",
"english": "Bambara"
},
"bn": {
"terminologic": "",
"alpha2": "bn",
"french": "bengali",
"bibliographic": "ben",
"english": "Bengali"
},
"bo": {
"terminologic": "bod",
"alpha2": "bo",
"french": "tib\u00e9tain",
"bibliographic": "tib",
"english": "Tibetan"
},
"bh": {
"terminologic": "",
"alpha2": "bh",
"french": "langues biharis",
"bibliographic": "bih",
"english": "Bihari languages"
},
"bi": {
"terminologic": "",
"alpha2": "bi",
"french": "bichlamar",
"bibliographic": "bis",
"english": "Bislama"
},
"sag": {
"terminologic": "",
"alpha2": "sg",
"french": "sango",
"bibliographic": "sag",
"english": "Sango"
},
"br": {
"terminologic": "",
"alpha2": "br",
"french": "breton",
"bibliographic": "bre",
"english": "Breton"
},
"bs": {
"terminologic": "",
"alpha2": "bs",
"french": "bosniaque",
"bibliographic": "bos",
"english": "Bosnian"
},
"rus": {
"terminologic": "",
"alpha2": "ru",
"french": "russe",
"bibliographic": "rus",
"english": "Russian"
},
"rup": {
"terminologic": "",
"alpha2": "",
"french": "aroumain; mac\u00e9do-roumain",
"bibliographic": "rup",
"english": "Aromanian; Arumanian; Macedo-Romanian"
},
"pli": {
"terminologic": "",
"alpha2": "pi",
"french": "pali",
"bibliographic": "pli",
"english": "Pali"
},
"om": {
"terminologic": "",
"alpha2": "om",
"french": "galla",
"bibliographic": "orm",
"english": "Oromo"
},
"oj": {
"terminologic": "",
"alpha2": "oj",
"french": "ojibwa",
"bibliographic": "oji",
"english": "Ojibwa"
},
"ace": {
"terminologic": "",
"alpha2": "",
"french": "aceh",
"bibliographic": "ace",
"english": "Achinese"
},
"ach": {
"terminologic": "",
"alpha2": "",
"french": "acoli",
"bibliographic": "ach",
"english": "Acoli"
},
"nde": {
"terminologic": "",
"alpha2": "nd",
"french": "nd\u00e9b\u00e9l\u00e9 du Nord",
"bibliographic": "nde",
"english": "Ndebele, North; North Ndebele"
},
"dzo": {
"terminologic": "",
"alpha2": "dz",
"french": "dzongkha",
"bibliographic": "dzo",
"english": "Dzongkha"
},
"kru": {
"terminologic": "",
"alpha2": "",
"french": "kurukh",
"bibliographic": "kru",
"english": "Kurukh"
},
"srr": {
"terminologic": "",
"alpha2": "",
"french": "s\u00e9r\u00e8re",
"bibliographic": "srr",
"english": "Serer"
},
"ido": {
"terminologic": "",
"alpha2": "io",
"french": "ido",
"bibliographic": "ido",
"english": "Ido"
},
"srp": {
"terminologic": "",
"alpha2": "sr",
"french": "serbe",
"bibliographic": "srp",
"english": "Serbian"
},
"kro": {
"terminologic": "",
"alpha2": "",
"french": "krou, langues",
"bibliographic": "kro",
"english": "Kru languages"
},
"krl": {
"terminologic": "",
"alpha2": "",
"french": "car\u00e9lien",
"bibliographic": "krl",
"english": "Karelian"
},
"krc": {
"terminologic": "",
"alpha2": "",
"french": "karatchai balkar",
"bibliographic": "krc",
"english": "Karachay-Balkar"
},
"nds": {
"terminologic": "",
"alpha2": "",
"french": "bas allemand; bas saxon; allemand, bas; saxon, bas",
"bibliographic": "nds",
"english": "Low German; Low Saxon; German, Low; Saxon, Low"
},
"os": {
"terminologic": "",
"alpha2": "os",
"french": "oss\u00e8te",
"bibliographic": "oss",
"english": "Ossetian; Ossetic"
},
"or": {
"terminologic": "",
"alpha2": "or",
"french": "oriya",
"bibliographic": "ori",
"english": "Oriya"
},
"zul": {
"terminologic": "",
"alpha2": "zu",
"french": "zoulou",
"bibliographic": "zul",
"english": "Zulu"
},
"twi": {
"terminologic": "",
"alpha2": "tw",
"french": "twi",
"bibliographic": "twi",
"english": "Twi"
},
"sog": {
"terminologic": "",
"alpha2": "",
"french": "sogdien",
"bibliographic": "sog",
"english": "Sogdian"
},
"nso": {
"terminologic": "",
"alpha2": "",
"french": "pedi; sepedi; sotho du Nord",
"bibliographic": "nso",
"english": "Pedi; Sepedi; Northern Sotho"
},
"swe": {
"terminologic": "",
"alpha2": "sv",
"french": "su\u00e9dois",
"bibliographic": "swe",
"english": "Swedish"
},
"som": {
"terminologic": "",
"alpha2": "so",
"french": "somali",
"bibliographic": "som",
"english": "Somali"
},
"son": {
"terminologic": "",
"alpha2": "",
"french": "songhai, langues",
"bibliographic": "son",
"english": "Songhai languages"
},
"snd": {
"terminologic": "",
"alpha2": "sd",
"french": "sindhi",
"bibliographic": "snd",
"english": "Sindhi"
},
"sot": {
"terminologic": "",
"alpha2": "st",
"french": "sotho du Sud",
"bibliographic": "sot",
"english": "Sotho, Southern"
},
"mkd": {
"terminologic": "mkd",
"alpha2": "mk",
"french": "mac\u00e9donien",
"bibliographic": "mac",
"english": "Macedonian"
},
"wak": {
"terminologic": "",
"alpha2": "",
"french": "wakashanes, langues",
"bibliographic": "wak",
"english": "Wakashan languages"
},
"her": {
"terminologic": "",
"alpha2": "hz",
"french": "herero",
"bibliographic": "her",
"english": "Herero"
},
"lol": {
"terminologic": "",
"alpha2": "",
"french": "mongo",
"bibliographic": "lol",
"english": "Mongo"
},
"mkh": {
"terminologic": "",
"alpha2": "",
"french": "m\u00f4n-khmer, langues",
"bibliographic": "mkh",
"english": "Mon-Khmer languages"
},
"heb": {
"terminologic": "",
"alpha2": "he",
"french": "h\u00e9breu",
"bibliographic": "heb",
"english": "Hebrew"
},
"loz": {
"terminologic": "",
"alpha2": "",
"french": "lozi",
"bibliographic": "loz",
"english": "Lozi"
},
"gil": {
"terminologic": "",
"alpha2": "",
"french": "kiribati",
"bibliographic": "gil",
"english": "Gilbertese"
},
"was": {
"terminologic": "",
"alpha2": "",
"french": "washo",
"bibliographic": "was",
"english": "Washo"
},
"war": {
"terminologic": "",
"alpha2": "",
"french": "waray",
"bibliographic": "war",
"english": "Waray"
},
"hz": {
"terminologic": "",
"alpha2": "hz",
"french": "herero",
"bibliographic": "her",
"english": "Herero"
},
"hy": {
"terminologic": "hye",
"alpha2": "hy",
"french": "arm\u00e9nien",
"bibliographic": "arm",
"english": "Armenian"
},
"sid": {
"terminologic": "",
"alpha2": "",
"french": "sidamo",
"bibliographic": "sid",
"english": "Sidamo"
},
"hr": {
"terminologic": "",
"alpha2": "hr",
"french": "croate",
"bibliographic": "hrv",
"english": "Croatian"
},
"ht": {
"terminologic": "",
"alpha2": "ht",
"french": "ha\u00eftien; cr\u00e9ole ha\u00eftien",
"bibliographic": "hat",
"english": "Haitian; Haitian Creole"
},
"hu": {
"terminologic": "",
"alpha2": "hu",
"french": "hongrois",
"bibliographic": "hun",
"english": "Hungarian"
},
"hi": {
"terminologic": "",
"alpha2": "hi",
"french": "hindi",
"bibliographic": "hin",
"english": "Hindi"
},
"ho": {
"terminologic": "",
"alpha2": "ho",
"french": "hiri motu",
"bibliographic": "hmo",
"english": "Hiri Motu"
},
"bul": {
"terminologic": "",
"alpha2": "bg",
"french": "bulgare",
"bibliographic": "bul",
"english": "Bulgarian"
},
"wal": {
"terminologic": "",
"alpha2": "",
"french": "walamo",
"bibliographic": "wal",
"english": "Walamo"
},
"ha": {
"terminologic": "",
"alpha2": "ha",
"french": "haoussa",
"bibliographic": "hau",
"english": "Hausa"
},
"bug": {
"terminologic": "",
"alpha2": "",
"french": "bugi",
"bibliographic": "bug",
"english": "Buginese"
},
"he": {
"terminologic": "",
"alpha2": "he",
"french": "h\u00e9breu",
"bibliographic": "heb",
"english": "Hebrew"
},
"uz": {
"terminologic": "",
"alpha2": "uz",
"french": "ouszbek",
"bibliographic": "uzb",
"english": "Uzbek"
},
"aze": {
"terminologic": "",
"alpha2": "az",
"french": "az\u00e9ri",
"bibliographic": "aze",
"english": "Azerbaijani"
},
"ur": {
"terminologic": "",
"alpha2": "ur",
"french": "ourdou",
"bibliographic": "urd",
"english": "Urdu"
},
"zha": {
"terminologic": "",
"alpha2": "za",
"french": "zhuang; chuang",
"bibliographic": "zha",
"english": "Zhuang; Chuang"
},
"uk": {
"terminologic": "",
"alpha2": "uk",
"french": "ukrainien",
"bibliographic": "ukr",
"english": "Ukrainian"
},
"ug": {
"terminologic": "",
"alpha2": "ug",
"french": "ou\u00efgour",
"bibliographic": "uig",
"english": "Uighur; Uyghur"
},
"zho": {
"terminologic": "zho",
"alpha2": "zh",
"french": "chinois",
"bibliographic": "chi",
"english": "Chinese"
},
"aa": {
"terminologic": "",
"alpha2": "aa",
"french": "afar",
"bibliographic": "\ufeffaar",
"english": "Afar"
},
"ab": {
"terminologic": "",
"alpha2": "ab",
"french": "abkhaze",
"bibliographic": "abk",
"english": "Abkhazian"
},
"ae": {
"terminologic": "",
"alpha2": "ae",
"french": "avestique",
"bibliographic": "ave",
"english": "Avestan"
},
"uig": {
"terminologic": "",
"alpha2": "ug",
"french": "ou\u00efgour",
"bibliographic": "uig",
"english": "Uighur; Uyghur"
},
"af": {
"terminologic": "",
"alpha2": "af",
"french": "afrikaans",
"bibliographic": "afr",
"english": "Afrikaans"
},
"ak": {
"terminologic": "",
"alpha2": "ak",
"french": "akan",
"bibliographic": "aka",
"english": "Akan"
},
"am": {
"terminologic": "",
"alpha2": "am",
"french": "amharique",
"bibliographic": "amh",
"english": "Amharic"
},
"an": {
"terminologic": "",
"alpha2": "an",
"french": "aragonais",
"bibliographic": "arg",
"english": "Aragonese"
},
"khi": {
"terminologic": "",
"alpha2": "",
"french": "kho\u00efsan, langues",
"bibliographic": "khi",
"english": "Khoisan languages"
},
"as": {
"terminologic": "",
"alpha2": "as",
"french": "assamais",
"bibliographic": "asm",
"english": "Assamese"
},
"ar": {
"terminologic": "",
"alpha2": "ar",
"french": "arabe",
"bibliographic": "ara",
"english": "Arabic"
},
"inh": {
"terminologic": "",
"alpha2": "",
"french": "ingouche",
"bibliographic": "inh",
"english": "Ingush"
},
"khm": {
"terminologic": "",
"alpha2": "km",
"french": "khmer central",
"bibliographic": "khm",
"english": "Central Khmer"
},
"kho": {
"terminologic": "",
"alpha2": "",
"french": "khotanais; sakan",
"bibliographic": "kho",
"english": "Khotanese; Sakan"
},
"ind": {
"terminologic": "",
"alpha2": "id",
"french": "indon\u00e9sien",
"bibliographic": "ind",
"english": "Indonesian"
},
"kha": {
"terminologic": "",
"alpha2": "",
"french": "khasi",
"bibliographic": "kha",
"english": "Khasi"
},
"az": {
"terminologic": "",
"alpha2": "az",
"french": "az\u00e9ri",
"bibliographic": "aze",
"english": "Azerbaijani"
},
"ina": {
"terminologic": "",
"alpha2": "ia",
"french": "interlingua (langue auxiliaire internationale)",
"bibliographic": "ina",
"english": "Interlingua (International Auxiliary Language Association)"
},
"inc": {
"terminologic": "",
"alpha2": "",
"french": "indo-aryennes, langues",
"bibliographic": "inc",
"english": "Indic languages"
},
"nl": {
"terminologic": "nld",
"alpha2": "nl",
"french": "n\u00e9erlandais; flamand",
"bibliographic": "dut",
"english": "Dutch; Flemish"
},
"nn": {
"terminologic": "",
"alpha2": "nn",
"french": "norv\u00e9gien nynorsk; nynorsk, norv\u00e9gien",
"bibliographic": "nno",
"english": "Norwegian Nynorsk; Nynorsk, Norwegian"
},
"no": {
"terminologic": "",
"alpha2": "no",
"french": "norv\u00e9gien",
"bibliographic": "nor",
"english": "Norwegian"
},
"na": {
"terminologic": "",
"alpha2": "na",
"french": "nauruan",
"bibliographic": "nau",
"english": "Nauru"
},
"nah": {
"terminologic": "",
"alpha2": "",
"french": "nahuatl, langues",
"bibliographic": "nah",
"english": "Nahuatl languages"
},
"nai": {
"terminologic": "",
"alpha2": "",
"french": "nord-am\u00e9rindiennes, langues",
"bibliographic": "nai",
"english": "North American Indian languages"
},
"nd": {
"terminologic": "",
"alpha2": "nd",
"french": "nd\u00e9b\u00e9l\u00e9 du Nord",
"bibliographic": "nde",
"english": "Ndebele, North; North Ndebele"
},
"ne": {
"terminologic": "",
"alpha2": "ne",
"french": "n\u00e9palais",
"bibliographic": "nep",
"english": "Nepali"
},
"tir": {
"terminologic": "",
"alpha2": "ti",
"french": "tigrigna",
"bibliographic": "tir",
"english": "Tigrinya"
},
"ng": {
"terminologic": "",
"alpha2": "ng",
"french": "ndonga",
"bibliographic": "ndo",
"english": "Ndonga"
},
"ny": {
"terminologic": "",
"alpha2": "ny",
"french": "chichewa; chewa; nyanja",
"bibliographic": "nya",
"english": "Chichewa; Chewa; Nyanja"
},
"nap": {
"terminologic": "",
"alpha2": "",
"french": "napolitain",
"bibliographic": "nap",
"english": "Neapolitan"
},
"gre": {
"terminologic": "ell",
"alpha2": "el",
"french": "grec moderne (apr\u00e8s 1453)",
"bibliographic": "gre",
"english": "Greek, Modern (1453-)"
},
"grb": {
"terminologic": "",
"alpha2": "",
"french": "grebo",
"bibliographic": "grb",
"english": "Grebo"
},
"grc": {
"terminologic": "",
"alpha2": "",
"french": "grec ancien (jusqu'\u00e0 1453)",
"bibliographic": "grc",
"english": "Greek, Ancient (to 1453)"
},
"nau": {
"terminologic": "",
"alpha2": "na",
"french": "nauruan",
"bibliographic": "nau",
"english": "Nauru"
},
"grn": {
"terminologic": "",
"alpha2": "gn",
"french": "guarani",
"bibliographic": "grn",
"english": "Guarani"
},
"nr": {
"terminologic": "",
"alpha2": "nr",
"french": "nd\u00e9b\u00e9l\u00e9 du Sud",
"bibliographic": "nbl",
"english": "Ndebele, South; South Ndebele"
},
"tig": {
"terminologic": "",
"alpha2": "",
"french": "tigr\u00e9",
"bibliographic": "tig",
"english": "Tigre"
},
"yor": {
"terminologic": "",
"alpha2": "yo",
"french": "yoruba",
"bibliographic": "yor",
"english": "Yoruba"
},
"nv": {
"terminologic": "",
"alpha2": "nv",
"french": "navaho",
"bibliographic": "nav",
"english": "Navajo; Navaho"
},
"mri": {
"terminologic": "mri",
"alpha2": "mi",
"french": "maori",
"bibliographic": "mao",
"english": "Maori"
},
"zun": {
"terminologic": "",
"alpha2": "",
"french": "zuni",
"bibliographic": "zun",
"english": "Zuni"
},
"sqi": {
"terminologic": "sqi",
"alpha2": "sq",
"french": "albanais",
"bibliographic": "alb",
"english": "Albanian"
},
"gon": {
"terminologic": "",
"alpha2": "",
"french": "gond",
"bibliographic": "gon",
"english": "Gondi"
},
"\ufeffaar": {
"terminologic": "",
"alpha2": "aa",
"french": "afar",
"bibliographic": "\ufeffaar",
"english": "Afar"
},
"cpe": {
"terminologic": "",
"alpha2": "",
"french": "cr\u00e9oles et pidgins bas\u00e9s sur l'anglais",
"bibliographic": "cpe",
"english": "Creoles and pidgins, English based"
},
"cpf": {
"terminologic": "",
"alpha2": "",
"french": "cr\u00e9oles et pidgins bas\u00e9s sur le fran\u00e7ais",
"bibliographic": "cpf",
"english": "Creoles and pidgins, French-based "
},
"hmo": {
"terminologic": "",
"alpha2": "ho",
"french": "hiri motu",
"bibliographic": "hmo",
"english": "Hiri Motu"
},
"cpp": {
"terminologic": "",
"alpha2": "",
"french": "cr\u00e9oles et pidgins bas\u00e9s sur le portugais",
"bibliographic": "cpp",
"english": "Creoles and pidgins, Portuguese-based "
}
} | PypiClean |
/DiPAS-2.0.tar.gz/DiPAS-2.0/README.md | <div align="center">
<a href="https://gitlab.com/Dominik1123/dipas">
<img
alt="dipas-logo"
src="https://gitlab.com/Dominik1123/dipas/-/raw/develop/logo/logo.png"
width="30%"
style="display: block; margin-left: auto; margin-right: auto;"
>
</a>
</div>
[](https://gitlab.com/Dominik1123/dipas/-/commits/develop)
[](https://gitlab.com/Dominik1123/dipas/-/commits/develop)
[](https://pypi.org/project/DiPAS/)
[](https://pypi.org/project/dipas/)
-----
**DiPAS** is a program for differentiable simulations of particle accelerators. It acts as a framework and thus
supports a wide range of use cases such as [particle tracking](https://dipas.readthedocs.io/en/stable/usage/tracking.html)
or [optics calculations](https://dipas.readthedocs.io/en/stable/usage/optics.html) such as closed orbit search or
computation of Twiss parameters.
The involved computations are backed by the [PyTorch](https://pytorch.org/) package which also provides the relevant
functionality for differentiation of user-defined quantities as well as a variety of gradient-based optimizers that integrate
with the thus derived quantities.
The DiPAS program can [parse MADX](https://dipas.readthedocs.io/en/stable/usage/building.html#Parsing-MADX-scripts)
lattice definitions and hence allows for zero-overhead importing of existing lattices.
In addition, it supports [custom lattice definitions](https://dipas.readthedocs.io/en/stable/usage/building.html#Using-the-build-API)
from provided element classes.
DiPAS can also be used via command line interface, see [`dipas --help`](https://dipas.readthedocs.io/en/stable/usage/cli.html)
for more information.
## Relevant links
* [Documentation](https://dipas.readthedocs.io/)
* [Examples](https://gitlab.com/Dominik1123/dipas/blob/master/examples)
* [PyPI Project](https://pypi.org/project/dipas/)
## Example usage
Minimizing loss along beamline by tuning quadrupoles:
```py
import numpy
from dipas.build import from_file
from dipas.elements import Quadrupole
import torch
lattice = from_file('example.madx')
for quad in lattice[Quadrupole]:
quad.k1 = torch.nn.Parameter(quad.k1)
optimizer = torch.optim.Adam(lattice.parameters(), lr=1e-3)
particles = torch.from_numpy(numpy.load('particles.npy'))
while True:
tracked, loss_val = lattice.linear(particles, recloss='sum')
lost = 1 - tracked.shape[1] / particles.shape[1]
if lost < 0.01: # Fraction of particles lost less than 1%.
break
optimizer.zero_grad()
loss_val.backward()
optimizer.step()
```
| PypiClean |
/MFD%20Floods-0.1.14.tar.gz/MFD Floods-0.1.14/bin/rgb2pct.py |
import os.path
import sys
from osgeo import gdal
def Usage():
print('Usage: rgb2pct.py [-n colors | -pct palette_file] [-of format] source_file dest_file')
sys.exit(1)
def DoesDriverHandleExtension(drv, ext):
exts = drv.GetMetadataItem(gdal.DMD_EXTENSIONS)
return exts is not None and exts.lower().find(ext.lower()) >= 0
def GetExtension(filename):
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return ext
def GetOutputDriversFor(filename):
drv_list = []
ext = GetExtension(filename)
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
if (drv.GetMetadataItem(gdal.DCAP_CREATE) is not None or
drv.GetMetadataItem(gdal.DCAP_CREATECOPY) is not None) and \
drv.GetMetadataItem(gdal.DCAP_RASTER) is not None:
if ext and DoesDriverHandleExtension(drv, ext):
drv_list.append(drv.ShortName)
else:
prefix = drv.GetMetadataItem(gdal.DMD_CONNECTION_PREFIX)
if prefix is not None and filename.lower().startswith(prefix.lower()):
drv_list.append(drv.ShortName)
# GMT is registered before netCDF for opening reasons, but we want
# netCDF to be used by default for output.
if ext.lower() == 'nc' and not drv_list and \
drv_list[0].upper() == 'GMT' and drv_list[1].upper() == 'NETCDF':
drv_list = ['NETCDF', 'GMT']
return drv_list
def GetOutputDriverFor(filename):
drv_list = GetOutputDriversFor(filename)
ext = GetExtension(filename)
if not drv_list:
if not ext:
return 'GTiff'
else:
raise Exception("Cannot guess driver for %s" % filename)
elif len(drv_list) > 1:
print("Several drivers matching %s extension. Using %s" % (ext if ext else '', drv_list[0]))
return drv_list[0]
# =============================================================================
# Mainline
# =============================================================================
color_count = 256
frmt = None
src_filename = None
dst_filename = None
pct_filename = None
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv is None:
sys.exit(0)
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of' or arg == '-f':
i = i + 1
frmt = argv[i]
elif arg == '-n':
i = i + 1
color_count = int(argv[i])
elif arg == '-pct':
i = i + 1
pct_filename = argv[i]
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
else:
Usage()
i = i + 1
if dst_filename is None:
Usage()
# Open source file
src_ds = gdal.Open(src_filename)
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
if src_ds.RasterCount < 3:
print('%s has %d band(s), need 3 for inputs red, green and blue.'
% (src_filename, src_ds.RasterCount))
sys.exit(1)
# Ensure we recognise the driver.
if frmt is None:
frmt = GetOutputDriverFor(dst_filename)
dst_driver = gdal.GetDriverByName(frmt)
if dst_driver is None:
print('"%s" driver not registered.' % frmt)
sys.exit(1)
# Generate palette
ct = gdal.ColorTable()
if pct_filename is None:
err = gdal.ComputeMedianCutPCT(src_ds.GetRasterBand(1),
src_ds.GetRasterBand(2),
src_ds.GetRasterBand(3),
color_count, ct,
callback=gdal.TermProgress_nocb)
else:
pct_ds = gdal.Open(pct_filename)
ct = pct_ds.GetRasterBand(1).GetRasterColorTable().Clone()
# Create the working file. We have to use TIFF since there are few formats
# that allow setting the color table after creation.
if format == 'GTiff':
tif_filename = dst_filename
else:
import tempfile
tif_filedesc, tif_filename = tempfile.mkstemp(suffix='.tif')
gtiff_driver = gdal.GetDriverByName('GTiff')
tif_ds = gtiff_driver.Create(tif_filename,
src_ds.RasterXSize, src_ds.RasterYSize, 1)
tif_ds.GetRasterBand(1).SetRasterColorTable(ct)
# ----------------------------------------------------------------------------
# We should copy projection information and so forth at this point.
tif_ds.SetProjection(src_ds.GetProjection())
tif_ds.SetGeoTransform(src_ds.GetGeoTransform())
if src_ds.GetGCPCount() > 0:
tif_ds.SetGCPs(src_ds.GetGCPs(), src_ds.GetGCPProjection())
# ----------------------------------------------------------------------------
# Actually transfer and dither the data.
err = gdal.DitherRGB2PCT(src_ds.GetRasterBand(1),
src_ds.GetRasterBand(2),
src_ds.GetRasterBand(3),
tif_ds.GetRasterBand(1),
ct,
callback=gdal.TermProgress_nocb)
tif_ds = None
if tif_filename != dst_filename:
tif_ds = gdal.Open(tif_filename)
dst_driver.CreateCopy(dst_filename, tif_ds)
tif_ds = None
os.close(tif_filedesc)
gtiff_driver.Delete(tif_filename) | PypiClean |
/Flask-Scaffold-0.5.1.tar.gz/Flask-Scaffold-0.5.1/app/templates/static/node_modules/angular-grid/src/ts/toolPanel/columnSelectionPanel.ts |
module awk.grid {
var utils = Utils;
var svgFactory = SvgFactory.getInstance();
export class ColumnSelectionPanel {
private gridOptionsWrapper: GridOptionsWrapper;
private columnController: ColumnController;
private cColumnList: any;
layout: any;
private eRootPanel: any;
constructor(columnController: ColumnController, gridOptionsWrapper: GridOptionsWrapper) {
this.gridOptionsWrapper = gridOptionsWrapper;
this.setupComponents();
this.columnController = columnController;
this.columnController.addChangeListener(this.columnsChanged.bind(this));
}
private columnsChanged() {
this.cColumnList.setModel(this.columnController.getAllColumns());
}
public getDragSource() {
return this.cColumnList.getUniqueId();
}
private columnCellRenderer(params: any) {
var column = params.value;
var colDisplayName = this.columnController.getDisplayNameForCol(column);
var eResult = document.createElement('span');
var eVisibleIcons = document.createElement('span');
utils.addCssClass(eVisibleIcons, 'ag-visible-icons');
var eShowing = utils.createIcon('columnVisible', this.gridOptionsWrapper, column, svgFactory.createColumnShowingSvg);
var eHidden = utils.createIcon('columnHidden', this.gridOptionsWrapper, column, svgFactory.createColumnHiddenSvg);
eVisibleIcons.appendChild(eShowing);
eVisibleIcons.appendChild(eHidden);
eShowing.style.display = column.visible ? '' : 'none';
eHidden.style.display = column.visible ? 'none' : '';
eResult.appendChild(eVisibleIcons);
var eValue = document.createElement('span');
eValue.innerHTML = colDisplayName;
eResult.appendChild(eValue);
if (!column.visible) {
utils.addCssClass(eResult, 'ag-column-not-visible');
}
// change visible if use clicks the visible icon, or if row is double clicked
eVisibleIcons.addEventListener('click', showEventListener);
var that = this;
function showEventListener() {
that.columnController.setColumnVisible(column, !column.visible);
}
return eResult;
}
private setupComponents() {
this.cColumnList = new AgList();
this.cColumnList.setCellRenderer(this.columnCellRenderer.bind(this));
this.cColumnList.addStyles({height: '100%', overflow: 'auto'});
this.cColumnList.addItemMovedListener(this.onItemMoved.bind(this));
this.cColumnList.setReadOnly(true);
var localeTextFunc = this.gridOptionsWrapper.getLocaleTextFunc();
var columnsLocalText = localeTextFunc('columns', 'Columns');
var eNorthPanel = document.createElement('div');
eNorthPanel.innerHTML = '<div style="text-align: center;">' + columnsLocalText + '</div>';
this.layout = new BorderLayout({
center: this.cColumnList.getGui(),
north: eNorthPanel
});
}
private onItemMoved(fromIndex: number, toIndex: number) {
this.columnController.moveColumn(fromIndex, toIndex);
}
public getGui() {
return this.eRootPanel.getGui();
}
}
} | PypiClean |
/BioCompass-0.9.2.tar.gz/BioCompass-0.9.2/CONTRIBUTING.rst | .. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/castelao/gene_cluster_network/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Gene Cluster Network could always use more documentation, whether as part of the
official Gene Cluster Network docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/castelao/gene_cluster_network/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `gene_cluster_network` for local development.
1. Fork the `gene_cluster_network` repo on GitHub.
2. Clone your fork locally::
$ git clone git@github.com:your_name_here/gene_cluster_network.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv gene_cluster_network
$ cd gene_cluster_network/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 gene_cluster_network tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
https://travis-ci.org/castelao/gene_cluster_network/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_gene_cluster_network
| PypiClean |
/latchsdk-1.0.0.zip/latch-1.0.0/latch.py | import json
import logging
class Error(object):
def __init__(self, json_data):
'''
Constructor
'''
self.code = json_data['code']
self.message = json_data['message']
def get_code(self):
return self.code
def get_message(self):
return self.message
def to_json(self):
return {"code" : self.code, "message" : self.message}
def __repr__(self):
return json.dumps(self.to_json())
def __str__(self):
return self.__repr__()
class LatchResponse(object):
'''
This class models a response from any of the endpoints in the Latch API.
It consists of a "data" and an "error" elements. Although normally only one of them will be
present, they are not mutually exclusive, since errors can be non fatal, and therefore a response
could have valid information in the data field and at the same time inform of an error.
'''
def __init__(self, json_string):
'''
@param $json a json string received from one of the methods of the Latch API
'''
json_object = json.loads(json_string)
if "data" in json_object:
self.data = json_object["data"]
else:
self.data = ""
if "error" in json_object:
self.error = Error(json_object["error"])
else:
self.error = ""
def get_data(self):
'''
@return JsonObject the data part of the API response
'''
return self.data
def set_data(self, data):
'''
@param $data the data to include in the API response
'''
self.data = json.loads(data)
def get_error(self):
'''
@return Error the error part of the API response, consisting of an error code and an error message
'''
return self.error
def set_error(self, error):
'''
@param $error an error to include in the API response
'''
self.error = Error(error)
def to_json(self):
'''
@return a Json object with the data and error parts set if they exist
'''
json_response = {}
if hasattr(self, "data"):
json_response["data"] = self.data
if hasattr(self, "error"):
json_response["error"] = self.error
return json_response;
class Latch(object):
API_HOST = "latch.elevenpaths.com";
API_PORT = 443;
API_HTTPS = True
API_PROXY = None;
API_PROXY_PORT = None;
API_CHECK_STATUS_URL = "/api/0.6/status";
API_PAIR_URL = "/api/0.6/pair";
API_PAIR_WITH_ID_URL = "/api/0.6/pairWithId";
API_UNPAIR_URL = "/api/0.6/unpair";
AUTHORIZATION_HEADER_NAME = "Authorization";
DATE_HEADER_NAME = "X-11Paths-Date";
AUTHORIZATION_METHOD = "11PATHS";
AUTHORIZATION_HEADER_FIELD_SEPARATOR = " ";
UTC_STRING_FORMAT = "%Y-%m-%d %H:%M:%S";
X_11PATHS_HEADER_PREFIX = "X-11paths-";
X_11PATHS_HEADER_SEPARATOR = ":";
@staticmethod
def set_host(host):
'''
@param $host The host to be connected with (http://hostname) or (https://hostname)
'''
if host.startswith("http://"):
Latch.API_HOST = host[len("http://"):]
Latch.API_PORT = 80
Latch.API_HTTPS = False
elif host.startswith("https://"):
Latch.API_HOST = host[len("https://"):]
Latch.API_PORT = 443
Latch.API_HTTPS = True
@staticmethod
def set_proxy(proxy, port):
'''
Enable using a Proxy to connect through
@param $proxy The proxy server
@param $port The proxy port number
'''
Latch.API_PROXY = proxy
Latch.API_PROXY_PORT = port
@staticmethod
def get_part_from_header(part, header):
'''
The custom header consists of three parts, the method, the appId and the signature.
This method returns the specified part if it exists.
@param $part The zero indexed part to be returned
@param $header The HTTP header value from which to extract the part
@return string the specified part from the header or an empty string if not existent
'''
if (header):
parts = header.split(Latch.AUTHORIZATION_HEADER_FIELD_SEPARATOR);
if(len(parts) >= part):
return parts[part]
return ""
@staticmethod
def get_auth_method_from_header(authorizationHeader):
'''
@param $authorizationHeader Authorization HTTP Header
@return string the Authorization method. Typical values are "Basic", "Digest" or "11PATHS"
'''
return Latch.get_part_from_header(0, authorizationHeader)
@staticmethod
def get_app_id_from_header(authorizationHeader):
'''
@param $authorizationHeader Authorization HTTP Header
@return string the requesting application Id. Identifies the application using the API
'''
return Latch.get_part_from_header(1, authorizationHeader)
@staticmethod
def get_signature_from_header(authorizationHeader):
'''
@param $authorizationHeader Authorization HTTP Header
@return string the signature of the current request. Verifies the identity of the application using the API
'''
return Latch.get_part_from_header(2, authorizationHeader)
def __init__(self, appId, secretKey):
'''
Create an instance of the class with the Application ID and secret obtained from Eleven Paths
@param $appId
@param $secretKey
'''
self.appId = appId
self.secretKey = secretKey
def _http_get(self, url, xHeaders=None):
'''
HTTP GET Request to the specified API endpoint
@param $string $url
@param $string $xHeaders
@return LatchResponse
'''
try:
# Try to use the new Python3 HTTP library if available
import http.client as http
except:
# Must be using Python2 so use the appropriate library
import httplib as http
authHeaders = self.authentication_headers("GET", url, xHeaders)
#print(headers)
if Latch.API_PROXY != None:
if Latch.API_HTTPS:
conn = http.HTTPSConnection(Latch.API_PROXY, Latch.API_PROXY_PORT)
conn.set_tunnel(Latch.API_HOST, Latch.API_PORT)
else:
conn = http.HTTPConnection(Latch.API_PROXY, Latch.API_PROXY_PORT)
url = "http://" + Latch.API_HOST + url
else:
if Latch.API_HTTPS:
conn = http.HTTPSConnection(Latch.API_HOST, Latch.API_PORT)
else:
conn = http.HTTPConnection(Latch.API_HOST, Latch.API_PORT)
try:
conn.request("GET", url, headers=authHeaders)
response = conn.getresponse()
responseData = response.read().decode('utf8')
#print("response:" + responseData)
conn.close();
ret = LatchResponse(responseData)
except:
ret = LatchResponse("{}")
return ret
def pairWithId(self, accountId):
return self._http_get(self.API_PAIR_WITH_ID_URL + "/" + accountId)
def pair(self, token):
return self._http_get(self.API_PAIR_URL + "/" + token)
def status(self, accountId):
return self._http_get(self.API_CHECK_STATUS_URL + "/" + accountId)
def operationStatus(self, accountId, operationId):
return self._http_get(self.API_CHECK_STATUS_URL + "/" + accountId + "/op/" + operationId)
def unpair(self, accountId):
return self._http_get(self.API_UNPAIR_URL + "/" + accountId)
def sign_data(self, data):
'''
@param $data the string to sign
@return string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
'''
from hashlib import sha1
import hmac
import binascii
sha1Hash = hmac.new(self.secretKey.encode(), data.encode(), sha1)
return binascii.b2a_base64(sha1Hash.digest())[:-1].decode('utf8')
def authentication_headers(self, HTTPMethod, queryString, xHeaders=None, utc=None):
'''
Calculate the authentication headers to be sent with a request to the API
@param $HTTPMethod the HTTP Method, currently only GET is supported
@param $queryString the urlencoded string including the path (from the first forward slash) and the parameters
@param $xHeaders HTTP headers specific to the 11-paths API. null if not needed.
@param $utc the Universal Coordinated Time for the Date HTTP header
@return array a map with the Authorization and Date headers needed to sign a Latch API request
'''
if (not utc):
utc = Latch.get_current_UTC()
utc = utc.strip()
#logging.debug(HTTPMethod);
#logging.debug(queryString);
#logging.debug(utc);
stringToSign = (HTTPMethod.upper().strip() + "\n" +
utc + "\n" +
self.get_serialized_headers(xHeaders) + "\n" +
queryString.strip())
authorizationHeader = (Latch.AUTHORIZATION_METHOD + Latch.AUTHORIZATION_HEADER_FIELD_SEPARATOR +
self.appId + Latch.AUTHORIZATION_HEADER_FIELD_SEPARATOR +
self.sign_data(stringToSign))
headers = dict()
headers[Latch.AUTHORIZATION_HEADER_NAME] = authorizationHeader;
headers[Latch.DATE_HEADER_NAME] = utc;
return headers
def get_serialized_headers(self, xHeaders):
'''
Prepares and returns a string ready to be signed from the 11-paths specific HTTP headers received
@param $xHeaders a non neccesarily ordered map (array without duplicates) of the HTTP headers to be ordered.
@return string The serialized headers, an empty string if no headers are passed, or None if there's a problem such as non 11paths specific headers
'''
if (xHeaders):
headers = dict((k.lower(), v) for k, v in xHeaders.iteritems())
headers.sort()
serializedHeaders = ""
for key, value in headers:
if (not key.startsWith(Latch.X_11PATHS_HEADER_PREFIX.lower())):
logging.error("Error serializing headers. Only specific " + Latch.X_11PATHS_HEADER_PREFIX + " headers need to be singed")
return None
serializedHeaders += key + Latch.X_11PATHS_HEADER_SEPARATOR + value + " "
return serializedHeaders.strip()
else:
return ""
@staticmethod
def get_current_UTC():
'''
@return a string representation of the current time in UTC to be used in a Date HTTP Header
'''
import time
return time.strftime(Latch.UTC_STRING_FORMAT, time.gmtime()) | PypiClean |
/OASYS1_HALF_SRW-0.0.3-py3-none-any.whl/orangecontrib/srw/widgets/gui/ow_srw_optical_element.py | import os, numpy
from numpy import nan
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette, QColor, QFont, QPixmap
from PyQt5.QtWidgets import QMessageBox, QDialogButtonBox, QDialog, QLabel, QSizePolicy
import orangecanvas.resources as resources
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import TriggerIn, TriggerOut
from syned.widget.widget_decorator import WidgetDecorator
from syned.beamline.element_coordinates import ElementCoordinates
from syned.beamline.beamline_element import BeamlineElement
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, WavefrontPropagationParameters, WavefrontPropagationOptionalParameters
from wofrysrw.propagator.propagators2D.srw_propagation_mode import SRWPropagationMode
from wofrysrw.propagator.propagators2D.srw_fresnel_native import FresnelSRWNative, SRW_APPLICATION
from wofrysrw.propagator.propagators2D.srw_fresnel_wofry import FresnelSRWWofry
from wofrysrw.beamline.optical_elements.srw_optical_element import SRWOpticalElementDisplacement
from orangecontrib.srw.util.srw_objects import SRWData
from orangecontrib.srw.widgets.gui.ow_srw_wavefront_viewer import SRWWavefrontViewer
from wofrysrw.beamline.optical_elements.srw_optical_element import Orientation
class OWSRWOpticalElement(SRWWavefrontViewer, WidgetDecorator):
maintainer = "Luca Rebuffi"
maintainer_email = "lrebuffi(@at@)anl.gov"
keywords = ["data", "file", "load", "read"]
category = "SRW Optical Elements"
outputs = [{"name":"SRWData",
"type":SRWData,
"doc":"SRW Optical Element Data",
"id":"data"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"}]
inputs = [("SRWData", SRWData, "set_input"),
("Trigger", TriggerOut, "propagate_new_wavefront"),
WidgetDecorator.syned_input_data()[0]]
oe_name = None
p = Setting(0.0)
q = Setting(0.0)
angle_radial = Setting(0.0)
angle_azimuthal = Setting(0.0)
orientation_azimuthal = Setting(0)
invert_tangent_component = Setting(0)
angle_radial_mrad = Setting(0.0)
shape = Setting(0)
surface_shape = Setting(0)
drift_before_auto_resize_before_propagation = Setting(0)
drift_before_auto_resize_after_propagation = Setting(0)
drift_before_relative_precision_for_propagation_with_autoresizing = Setting(1.0)
drift_before_allow_semianalytical_treatment_of_quadratic_phase_term = Setting(1)
drift_before_do_any_resizing_on_fourier_side_using_fft = Setting(0)
drift_before_horizontal_range_modification_factor_at_resizing = Setting(1.0)
drift_before_horizontal_resolution_modification_factor_at_resizing = Setting(1.0)
drift_before_vertical_range_modification_factor_at_resizing = Setting(1.0)
drift_before_vertical_resolution_modification_factor_at_resizing = Setting(1.0)
drift_before_type_of_wavefront_shift_before_resizing = Setting(0)
drift_before_new_horizontal_wavefront_center_position_after_shift = Setting(0)
drift_before_new_vertical_wavefront_center_position_after_shift = Setting(0)
drift_before_orientation_of_the_output_optical_axis_vector_x = Setting(0.0)
drift_before_orientation_of_the_output_optical_axis_vector_y = Setting(0.0)
drift_before_orientation_of_the_output_optical_axis_vector_z = Setting(0.0)
drift_before_orientation_of_the_horizontal_base_vector_x = Setting(0.0)
drift_before_orientation_of_the_horizontal_base_vector_y = Setting(0.0)
drift_auto_resize_before_propagation = Setting(0)
drift_auto_resize_after_propagation = Setting(0)
drift_relative_precision_for_propagation_with_autoresizing = Setting(1.0)
drift_allow_semianalytical_treatment_of_quadratic_phase_term = Setting(1)
drift_do_any_resizing_on_fourier_side_using_fft = Setting(0)
drift_horizontal_range_modification_factor_at_resizing = Setting(1.0)
drift_horizontal_resolution_modification_factor_at_resizing = Setting(1.0)
drift_vertical_range_modification_factor_at_resizing = Setting(1.0)
drift_vertical_resolution_modification_factor_at_resizing = Setting(1.0)
drift_type_of_wavefront_shift_before_resizing = Setting(0)
drift_new_horizontal_wavefront_center_position_after_shift = Setting(0)
drift_new_vertical_wavefront_center_position_after_shift = Setting(0)
drift_after_orientation_of_the_output_optical_axis_vector_x = Setting(0.0)
drift_after_orientation_of_the_output_optical_axis_vector_y = Setting(0.0)
drift_after_orientation_of_the_output_optical_axis_vector_z = Setting(0.0)
drift_after_orientation_of_the_horizontal_base_vector_x = Setting(0.0)
drift_after_orientation_of_the_horizontal_base_vector_y = Setting(0.0)
oe_auto_resize_before_propagation = Setting(0)
oe_auto_resize_after_propagation = Setting(0)
oe_relative_precision_for_propagation_with_autoresizing = Setting(1.0)
oe_allow_semianalytical_treatment_of_quadratic_phase_term = Setting(0)
oe_do_any_resizing_on_fourier_side_using_fft = Setting(0)
oe_horizontal_range_modification_factor_at_resizing = Setting(1.0)
oe_horizontal_resolution_modification_factor_at_resizing = Setting(1.0)
oe_vertical_range_modification_factor_at_resizing = Setting(1.0)
oe_vertical_resolution_modification_factor_at_resizing = Setting(1.0)
oe_type_of_wavefront_shift_before_resizing = Setting(0)
oe_new_horizontal_wavefront_center_position_after_shift = Setting(0)
oe_new_vertical_wavefront_center_position_after_shift = Setting(0)
oe_orientation_of_the_output_optical_axis_vector_x = Setting(0.0)
oe_orientation_of_the_output_optical_axis_vector_y = Setting(0.0)
oe_orientation_of_the_output_optical_axis_vector_z = Setting(0.0)
oe_orientation_of_the_horizontal_base_vector_x = Setting(0.0)
oe_orientation_of_the_horizontal_base_vector_y = Setting(0.0)
has_displacement = Setting(0)
shift_x = Setting(0.0)
shift_y = Setting(0.0)
rotation_x = Setting(0.0)
rotation_y = Setting(0.0)
input_srw_data = None
has_orientation_angles=True
has_oe_wavefront_propagation_parameters_tab = True
azimuth_hor_vert=False
has_p = True
has_q = True
check_positive_distances = True
has_displacement_tab=True
TABS_AREA_HEIGHT = 555
CONTROL_AREA_WIDTH = 405
def __init__(self, has_orientation_angles=True, azimuth_hor_vert=False, has_p=True, has_q=True, check_positive_distances=True, has_oe_wavefront_propagation_parameters_tab=True, has_displacement_tab=True):
super().__init__()
self.has_orientation_angles=has_orientation_angles
self.azimuth_hor_vert=azimuth_hor_vert
self.has_p = has_p
self.has_q = has_q
self.check_positive_distances = check_positive_distances
self.has_oe_wavefront_propagation_parameters_tab = has_oe_wavefront_propagation_parameters_tab
self.has_displacement_tab=has_displacement_tab
self.runaction = widget.OWAction("Propagate Wavefront", self)
self.runaction.triggered.connect(self.propagate_wavefront)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Propagate Wavefront", callback=self.propagate_wavefront)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
self.tab_bas = oasysgui.createTabPage(self.tabs_setting, "Optical Element")
self.tab_pro = oasysgui.createTabPage(self.tabs_setting, "Wavefront Propagation")
if self.has_displacement_tab: self.tab_dis = oasysgui.createTabPage(self.tabs_setting, "Displacement")
self.coordinates_box = oasysgui.widgetBox(self.tab_bas, "Coordinates", addSpace=True, orientation="vertical")
if self.has_p:
oasysgui.lineEdit(self.coordinates_box, self, "p", "Distance from previous Continuation Plane [m]", labelWidth=280, valueType=float, orientation="horizontal",
callback=self.set_p)
if self.has_q:
oasysgui.lineEdit(self.coordinates_box, self, "q", "Distance to next Continuation Plane [m]", labelWidth=280, valueType=float, orientation="horizontal",
callback=self.set_q)
if self.has_orientation_angles:
self.le_angle_radial = oasysgui.lineEdit(self.coordinates_box, self, "angle_radial", "Incident Angle (to normal) [deg]", labelWidth=220, valueType=float, orientation="horizontal", callback=self.calculate_angle_radial_mrad)
self.le_angle_radial_mrad = oasysgui.lineEdit(self.coordinates_box, self, "angle_radial_mrad", "Incident Angle (from surface) [mrad]", labelWidth=220, valueType=float, orientation="horizontal", callback=self.calculate_angle_radial_deg)
self.calculate_angle_radial_mrad()
if self.azimuth_hor_vert:
gui.comboBox(self.coordinates_box, self, "orientation_azimuthal", label="Orientation of central normal vector",
items=["Up", "Down", "Left", "Right"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.coordinates_box, self, "invert_tangent_component", label="Invert Tangent Component",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
else:
oasysgui.lineEdit(self.coordinates_box, self, "angle_azimuthal", "Rotation along Beam Axis [deg]", labelWidth=280, valueType=float, orientation="horizontal")
self.draw_specific_box()
self.tabs_prop_setting = oasysgui.tabWidget(self.tab_pro)
self.tabs_prop_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-10)
if self.has_p: self.tab_drift_before = oasysgui.createTabPage(self.tabs_prop_setting, "Drift Space Before")
if self.has_oe_wavefront_propagation_parameters_tab: self.tab_oe = oasysgui.createTabPage(self.tabs_prop_setting, "Optical Element")
if self.has_q: self.tab_drift = oasysgui.createTabPage(self.tabs_prop_setting, "Drift Space After")
if self.has_p: self.set_p()
if self.has_q: self.set_q()
# DRIFT SPACE
if self.has_p:
gui.comboBox(self.tab_drift_before, self, "drift_before_auto_resize_before_propagation", label="Auto Resize Before Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.tab_drift_before, self, "drift_before_auto_resize_after_propagation", label="Auto Resize After Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_relative_precision_for_propagation_with_autoresizing", "Relative precision for propagation with\nautoresizing (1.0 is nominal)", labelWidth=300, valueType=float, orientation="horizontal")
propagator_box = oasysgui.widgetBox(self.tab_drift_before, "", addSpace=False, orientation="horizontal")
gui.comboBox(propagator_box, self, "drift_before_allow_semianalytical_treatment_of_quadratic_phase_term", label="Propagator",
items=["Standard", "Quadratic Term", "Quadratic Term Special", "From Waist", "To Waist"], labelWidth=200,
sendSelectedValue=False, orientation="horizontal")
gui.button(propagator_box, self, "?", width=20, callback=self.show_propagator_info)
gui.comboBox(self.tab_drift_before, self, "drift_before_do_any_resizing_on_fourier_side_using_fft", label="Do any resizing on fourier side using fft",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_horizontal_range_modification_factor_at_resizing", "H range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_horizontal_resolution_modification_factor_at_resizing", "H resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_vertical_range_modification_factor_at_resizing", "V range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_vertical_resolution_modification_factor_at_resizing", "V resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
# not yet used by SRW
#oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_type_of_wavefront_shift_before_resizing", "Type of wavefront shift before resizing", labelWidth=300, valueType=int, orientation="horizontal")
#oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_new_horizontal_wavefront_center_position_after_shift", "New horizontal wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
#oasysgui.lineEdit(self.tab_drift_before, self, "drift_before_new_vertical_wavefront_center_position_after_shift", "New vertical wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
drift_before_optional_box = oasysgui.widgetBox(self.tab_drift_before, "Optional", addSpace=False, orientation="vertical")
oasysgui.lineEdit(drift_before_optional_box, self, "drift_before_orientation_of_the_output_optical_axis_vector_x", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_before_optional_box, self, "drift_before_orientation_of_the_output_optical_axis_vector_y", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_before_optional_box, self, "drift_before_orientation_of_the_output_optical_axis_vector_z", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Z", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_before_optional_box, self, "drift_before_orientation_of_the_horizontal_base_vector_x" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_before_optional_box, self, "drift_before_orientation_of_the_horizontal_base_vector_y" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
# OE
if self.has_oe_wavefront_propagation_parameters_tab:
gui.comboBox(self.tab_oe, self, "oe_auto_resize_before_propagation", label="Auto Resize Before Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.tab_oe, self, "oe_auto_resize_after_propagation", label="Auto Resize After Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_oe, self, "oe_relative_precision_for_propagation_with_autoresizing", "Relative precision for propagation with\nautoresizing (1.0 is nominal)", labelWidth=300, valueType=float, orientation="horizontal")
propagator_box = oasysgui.widgetBox(self.tab_oe, "", addSpace=False, orientation="horizontal")
gui.comboBox(propagator_box, self, "oe_allow_semianalytical_treatment_of_quadratic_phase_term", label="Propagator",
items=["Standard", "Quadratic Term", "Quadratic Term Special", "From Waist", "To Waist"], labelWidth=200,
sendSelectedValue=False, orientation="horizontal")
gui.button(propagator_box, self, "?", width=20, callback=self.show_propagator_info)
gui.comboBox(self.tab_oe, self, "oe_do_any_resizing_on_fourier_side_using_fft", label="Do any resizing on fourier side using fft",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_oe, self, "oe_horizontal_range_modification_factor_at_resizing", "H range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_oe, self, "oe_horizontal_resolution_modification_factor_at_resizing", "H resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_oe, self, "oe_vertical_range_modification_factor_at_resizing", "V range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_oe, self, "oe_vertical_resolution_modification_factor_at_resizing", "V resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
# not yet used by SRW
#oasysgui.lineEdit(self.tab_oe, self, "oe_type_of_wavefront_shift_before_resizing", "Type of wavefront shift before resizing", labelWidth=300, valueType=int, orientation="horizontal")
#oasysgui.lineEdit(self.tab_oe, self, "oe_new_horizontal_wavefront_center_position_after_shift", "New horizontal wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
#oasysgui.lineEdit(self.tab_oe, self, "oe_new_vertical_wavefront_center_position_after_shift", "New vertical wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
oe_optional_box = oasysgui.widgetBox(self.tab_oe, "Optional", addSpace=False, orientation="vertical")
oasysgui.lineEdit(oe_optional_box, self, "oe_orientation_of_the_output_optical_axis_vector_x", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(oe_optional_box, self, "oe_orientation_of_the_output_optical_axis_vector_y", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(oe_optional_box, self, "oe_orientation_of_the_output_optical_axis_vector_z", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Z", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(oe_optional_box, self, "oe_orientation_of_the_horizontal_base_vector_x" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(oe_optional_box, self, "oe_orientation_of_the_horizontal_base_vector_y" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
# DRIFT SPACE
if self.has_q:
gui.comboBox(self.tab_drift, self, "drift_auto_resize_before_propagation", label="Auto Resize Before Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.tab_drift, self, "drift_auto_resize_after_propagation", label="Auto Resize After Propagation",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift, self, "drift_relative_precision_for_propagation_with_autoresizing", "Relative precision for propagation with\nautoresizing (1.0 is nominal)", labelWidth=300, valueType=float, orientation="horizontal")
propagator_box = oasysgui.widgetBox(self.tab_drift, "", addSpace=False, orientation="horizontal")
gui.comboBox(propagator_box, self, "drift_allow_semianalytical_treatment_of_quadratic_phase_term", label="Propagator",
items=["Standard", "Quadratic Term", "Quadratic Term Special", "From Waist", "To Waist"], labelWidth=200,
sendSelectedValue=False, orientation="horizontal")
gui.button(propagator_box, self, "?", width=20, callback=self.show_propagator_info)
gui.comboBox(self.tab_drift, self, "drift_do_any_resizing_on_fourier_side_using_fft", label="Do any resizing on fourier side using fft",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift, self, "drift_horizontal_range_modification_factor_at_resizing", "H range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift, self, "drift_horizontal_resolution_modification_factor_at_resizing", "H resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift, self, "drift_vertical_range_modification_factor_at_resizing", "V range modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.tab_drift, self, "drift_vertical_resolution_modification_factor_at_resizing", "V resolution modification factor at resizing", labelWidth=300, valueType=float, orientation="horizontal")
# not yet used by SRW
#oasysgui.lineEdit(self.tab_drift, self, "drift_type_of_wavefront_shift_before_resizing", "Type of wavefront shift before resizing", labelWidth=300, valueType=int, orientation="horizontal")
#oasysgui.lineEdit(self.tab_drift, self, "drift_new_horizontal_wavefront_center_position_after_shift", "New horizontal wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
#oasysgui.lineEdit(self.tab_drift, self, "drift_new_vertical_wavefront_center_position_after_shift", "New vertical wavefront center position [m]", labelWidth=300, valueType=float, orientation="horizontal")
drift_optional_box = oasysgui.widgetBox(self.tab_drift, "Optional", addSpace=False, orientation="vertical")
oasysgui.lineEdit(drift_optional_box, self, "drift_after_orientation_of_the_output_optical_axis_vector_x", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_optional_box, self, "drift_after_orientation_of_the_output_optical_axis_vector_y", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_optional_box, self, "drift_after_orientation_of_the_output_optical_axis_vector_z", "Orientation of the Output Optical Axis vector\nin the Incident Beam Frame: Z", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_optional_box, self, "drift_after_orientation_of_the_horizontal_base_vector_x" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: X", labelWidth=290, valueType=float, orientation="horizontal")
oasysgui.lineEdit(drift_optional_box, self, "drift_after_orientation_of_the_horizontal_base_vector_y" , "Orientation of the Horizontal Base vector of the\nOutput Frame in the Incident Beam Frame: Y", labelWidth=290, valueType=float, orientation="horizontal")
#DISPLACEMENTS
if self.has_displacement_tab:
gui.comboBox(self.tab_dis, self, "has_displacement", label="Has Displacement",
items=["No", "Yes"], labelWidth=280,
sendSelectedValue=False, orientation="horizontal", callback=self.set_displacement)
gui.separator(self.tab_dis)
self.displacement_box = oasysgui.widgetBox(self.tab_dis, "", addSpace=False, orientation="vertical", height=250)
self.displacement_box_empty = oasysgui.widgetBox(self.tab_dis, "", addSpace=False, orientation="vertical", height=250)
shift_box = oasysgui.widgetBox(self.displacement_box, "Shift", addSpace=False, orientation="vertical")
oasysgui.lineEdit(shift_box, self, "shift_x", "Horizontal [m]", labelWidth=280, valueType=float, orientation="horizontal")
oasysgui.lineEdit(shift_box, self, "shift_y", "Vertical [m]", labelWidth=280, valueType=float, orientation="horizontal")
rotation_box = oasysgui.widgetBox(self.displacement_box, "Rotation", addSpace=False, orientation="vertical")
oasysgui.lineEdit(rotation_box, self, "rotation_y", "Around Horizontal Axis [CCW, deg]", labelWidth=280, valueType=float, orientation="horizontal")
oasysgui.lineEdit(rotation_box, self, "rotation_x", "Around Vertical Axis [CCW, deg]", labelWidth=280, valueType=float, orientation="horizontal")
self.set_displacement()
def set_p(self):
if self.p == 0.0:
self.tab_drift_before.setEnabled(False)
else:
self.tab_drift_before.setEnabled(True)
def set_q(self):
if self.q == 0.0:
self.tab_drift.setEnabled(False)
else:
self.tab_drift.setEnabled(True)
def set_displacement(self):
self.displacement_box.setVisible(self.has_displacement==1)
self.displacement_box_empty.setVisible(self.has_displacement==0)
def calculate_angle_radial_mrad(self):
self.angle_radial_mrad = round(numpy.radians(90-self.angle_radial)*1000, 7)
def calculate_angle_radial_deg(self):
self.angle_radial = round(numpy.degrees(0.5 * numpy.pi - (self.angle_radial_mrad / 1000)), 10)
class PropagatorInfoDialog(QDialog):
usage_path = os.path.join(resources.package_dirname("orangecontrib.srw.widgets.gui"), "misc", "propagator_info.png")
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle('Propagator Info')
self.setMinimumHeight(180)
self.setMinimumWidth(340)
usage_box = oasysgui.widgetBox(self, "", addSpace=True, orientation="vertical")
label = QLabel("")
label.setAlignment(Qt.AlignCenter)
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
label.setPixmap(QPixmap(self.usage_path))
usage_box.layout().addWidget(label)
bbox = QDialogButtonBox(QDialogButtonBox.Ok)
bbox.accepted.connect(self.accept)
usage_box.layout().addWidget(bbox)
def show_propagator_info(self):
try:
dialog = OWSRWOpticalElement.PropagatorInfoDialog(parent=self)
dialog.show()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def draw_specific_box(self):
raise NotImplementedError()
def check_data(self):
if self.check_positive_distances:
if self.has_p: congruence.checkPositiveNumber(self.p, "Distance from previous Continuation Plane")
if self.has_q: congruence.checkPositiveNumber(self.q, "Distance to next Continuation Plane")
else:
if self.has_p: congruence.checkNumber(self.p, "Distance from previous Continuation Plane")
if self.has_q: congruence.checkNumber(self.q, "Distance to next Continuation Plane")
if self.has_orientation_angles:
congruence.checkPositiveAngle(self.angle_radial, "Incident Angle (to normal)")
if self.azimuth_hor_vert:
if self.orientation_azimuthal == Orientation.UP:
self.angle_azimuthal = 0.0
elif self.orientation_azimuthal == Orientation.DOWN:
self.angle_azimuthal = 180.0
elif self.orientation_azimuthal == Orientation.LEFT:
self.angle_azimuthal = 90.0
elif self.orientation_azimuthal == Orientation.RIGHT:
self.angle_azimuthal = 270.0
else:
congruence.checkPositiveAngle(self.angle_azimuthal, "Rotation along Beam Axis")
else:
self.angle_radial = 0.0
self.angle_azimuthal = 0.0
if self.has_displacement:
congruence.checkAngle(self.rotation_x, "Rotation Around Horizontal Axis")
congruence.checkAngle(self.rotation_y, "Rotation Around Vertical Axis")
def propagate_new_wavefront(self, trigger):
try:
if trigger and trigger.new_object == True:
if trigger.has_additional_parameter("variable_name"):
if self.input_srw_data is None: raise Exception("No Input Data")
variable_name = trigger.get_additional_parameter("variable_name").strip()
variable_display_name = trigger.get_additional_parameter("variable_display_name").strip()
variable_value = trigger.get_additional_parameter("variable_value")
variable_um = trigger.get_additional_parameter("variable_um")
def check_options(variable_name):
if variable_name in ["shift_x",
"rotation_x",
"shift_y",
"rotation_y"]:
self.has_displacement = 1
self.set_displacement()
if "," in variable_name:
variable_names = variable_name.split(",")
for variable_name in variable_names:
setattr(self, variable_name.strip(), variable_value)
check_options(variable_name)
else:
setattr(self, variable_name, variable_value)
check_options(variable_name)
self.input_srw_data.get_srw_wavefront().setScanningData(SRWWavefront.ScanningData(variable_name, variable_value, variable_display_name, variable_um))
self.propagate_wavefront()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def propagate_wavefront(self):
try:
self.progressBarInit()
if self.input_srw_data is None: raise Exception("No Input Data")
self.check_data()
# propagation to o.e.
propagator = PropagationManager.Instance()
propagation_mode = propagator.get_propagation_mode(SRW_APPLICATION)
handler_name = FresnelSRWNative.HANDLER_NAME if propagation_mode == SRWPropagationMode.STEP_BY_STEP or \
propagation_mode == SRWPropagationMode.WHOLE_BEAMLINE else \
FresnelSRWWofry.HANDLER_NAME
input_wavefront = self.input_srw_data.get_srw_wavefront()
srw_beamline = self.input_srw_data.get_srw_beamline().duplicate()
working_srw_beamline = self.input_srw_data.get_working_srw_beamline().duplicate()
optical_element = self.get_optical_element()
optical_element.name = self.oe_name if not self.oe_name is None else self.windowTitle()
if self.has_displacement==1:
optical_element.displacement = SRWOpticalElementDisplacement(shift_x=self.shift_x,
shift_y=self.shift_y,
rotation_x=numpy.radians(-self.rotation_x),
rotation_y=numpy.radians(-self.rotation_y))
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=self.p,
q=self.q,
angle_radial=numpy.radians(self.angle_radial),
angle_azimuthal=numpy.radians(self.angle_azimuthal)))
srw_beamline.append_beamline_element(beamline_element)
working_srw_beamline.append_beamline_element(beamline_element)
self.progressBarSet(20)
if propagation_mode == SRWPropagationMode.WHOLE_BEAMLINE:
self.set_additional_parameters(beamline_element, None, srw_beamline)
self.set_additional_parameters(beamline_element, None, working_srw_beamline)
if hasattr(self, "is_final_screen") and self.is_final_screen == 1:
propagation_parameters = PropagationParameters(wavefront=input_wavefront.duplicate(),
propagation_elements = None)
propagation_parameters.set_additional_parameters("working_beamline", working_srw_beamline)
self.setStatusMessage("Begin Propagation")
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name=handler_name)
self.setStatusMessage("Propagation Completed")
output_srw_data = SRWData(srw_beamline=srw_beamline,
srw_wavefront=output_wavefront)
output_srw_data.reset_working_srw_beamline()
else:
output_wavefront = None
output_srw_data = SRWData(srw_beamline=srw_beamline,
srw_wavefront=input_wavefront)
else:
propagation_elements = PropagationElements()
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront.duplicate(),
propagation_elements = propagation_elements)
self.set_additional_parameters(beamline_element, propagation_parameters, srw_beamline)
self.setStatusMessage("Begin Propagation")
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name=handler_name)
self.setStatusMessage("Propagation Completed")
output_srw_data = SRWData(srw_beamline=srw_beamline,
srw_wavefront=output_wavefront)
self.progressBarSet(50)
if not output_wavefront is None:
output_wavefront.setScanningData(self.input_srw_data.get_srw_wavefront().scanned_variable_data)
self.output_wavefront = output_wavefront
self.initializeTabs()
tickets = []
self.run_calculation_for_plots(tickets=tickets, progress_bar_value=50)
self.plot_results(tickets, 80)
self.progressBarFinished()
self.setStatusMessage("")
self.send("SRWData", output_srw_data)
self.send("Trigger", TriggerIn(new_object=True))
except Exception as e:
QMessageBox.critical(self, "Error", str(e.args[0]), QMessageBox.Ok)
self.setStatusMessage("")
self.progressBarFinished()
if self.IS_DEVELOP: raise e
def set_additional_parameters(self, beamline_element, propagation_parameters=None, beamline=None):
from wofrysrw.beamline.srw_beamline import Where
srw_drift_before_wavefront_propagation_parameters = None
srw_drift_before_wavefront_propagation_optional_parameters = None
# DRIFT BEFORE
if beamline_element.get_coordinates().p() != 0:
srw_drift_before_wavefront_propagation_parameters = WavefrontPropagationParameters(
auto_resize_before_propagation = self.drift_before_auto_resize_before_propagation,
auto_resize_after_propagation = self.drift_before_auto_resize_after_propagation,
relative_precision_for_propagation_with_autoresizing = self.drift_before_relative_precision_for_propagation_with_autoresizing,
allow_semianalytical_treatment_of_quadratic_phase_term = self.drift_before_allow_semianalytical_treatment_of_quadratic_phase_term,
do_any_resizing_on_fourier_side_using_fft = self.drift_before_do_any_resizing_on_fourier_side_using_fft,
horizontal_range_modification_factor_at_resizing = self.drift_before_horizontal_range_modification_factor_at_resizing,
horizontal_resolution_modification_factor_at_resizing = self.drift_before_horizontal_resolution_modification_factor_at_resizing ,
vertical_range_modification_factor_at_resizing = self.drift_before_vertical_range_modification_factor_at_resizing,
vertical_resolution_modification_factor_at_resizing = self.drift_before_vertical_resolution_modification_factor_at_resizing ,
type_of_wavefront_shift_before_resizing = self.drift_before_type_of_wavefront_shift_before_resizing,
new_horizontal_wavefront_center_position_after_shift = self.drift_before_new_horizontal_wavefront_center_position_after_shift,
new_vertical_wavefront_center_position_after_shift = self.drift_before_new_vertical_wavefront_center_position_after_shift
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_drift_before_wavefront_propagation_parameters", srw_drift_before_wavefront_propagation_parameters)
if self.has_drift_before_wavefront_propagation_optional_parameters():
srw_drift_before_wavefront_propagation_optional_parameters = WavefrontPropagationOptionalParameters(
orientation_of_the_output_optical_axis_vector_x = self.drift_before_orientation_of_the_output_optical_axis_vector_x,
orientation_of_the_output_optical_axis_vector_y = self.drift_before_orientation_of_the_output_optical_axis_vector_y,
orientation_of_the_output_optical_axis_vector_z = self.drift_before_orientation_of_the_output_optical_axis_vector_z,
orientation_of_the_horizontal_base_vector_x = self.drift_before_orientation_of_the_horizontal_base_vector_x,
orientation_of_the_horizontal_base_vector_y = self.drift_before_orientation_of_the_horizontal_base_vector_y
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_drift_before_wavefront_propagation_optional_parameters", srw_drift_before_wavefront_propagation_optional_parameters)
if not beamline is None: beamline.append_wavefront_propagation_parameters(srw_drift_before_wavefront_propagation_parameters, srw_drift_before_wavefront_propagation_optional_parameters, Where.DRIFT_BEFORE)
# OE
srw_oe_wavefront_propagation_parameters = None
srw_oe_wavefront_propagation_optional_parameters = None
if self.has_oe_wavefront_propagation_parameters_tab:
srw_oe_wavefront_propagation_parameters = WavefrontPropagationParameters(
auto_resize_before_propagation = self.oe_auto_resize_before_propagation,
auto_resize_after_propagation = self.oe_auto_resize_after_propagation,
relative_precision_for_propagation_with_autoresizing = self.oe_relative_precision_for_propagation_with_autoresizing,
allow_semianalytical_treatment_of_quadratic_phase_term = self.oe_allow_semianalytical_treatment_of_quadratic_phase_term,
do_any_resizing_on_fourier_side_using_fft = self.oe_do_any_resizing_on_fourier_side_using_fft,
horizontal_range_modification_factor_at_resizing = self.oe_horizontal_range_modification_factor_at_resizing,
horizontal_resolution_modification_factor_at_resizing = self.oe_horizontal_resolution_modification_factor_at_resizing ,
vertical_range_modification_factor_at_resizing = self.oe_vertical_range_modification_factor_at_resizing,
vertical_resolution_modification_factor_at_resizing = self.oe_vertical_resolution_modification_factor_at_resizing ,
type_of_wavefront_shift_before_resizing = self.oe_type_of_wavefront_shift_before_resizing,
new_horizontal_wavefront_center_position_after_shift = self.oe_new_horizontal_wavefront_center_position_after_shift,
new_vertical_wavefront_center_position_after_shift = self.oe_new_vertical_wavefront_center_position_after_shift
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_oe_wavefront_propagation_parameters", srw_oe_wavefront_propagation_parameters)
if self.has_oe_wavefront_propagation_optional_parameters():
srw_oe_wavefront_propagation_optional_parameters = WavefrontPropagationOptionalParameters(
orientation_of_the_output_optical_axis_vector_x = self.oe_orientation_of_the_output_optical_axis_vector_x,
orientation_of_the_output_optical_axis_vector_y = self.oe_orientation_of_the_output_optical_axis_vector_y,
orientation_of_the_output_optical_axis_vector_z = self.oe_orientation_of_the_output_optical_axis_vector_z,
orientation_of_the_horizontal_base_vector_x = self.oe_orientation_of_the_horizontal_base_vector_x,
orientation_of_the_horizontal_base_vector_y = self.oe_orientation_of_the_horizontal_base_vector_y
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_oe_wavefront_propagation_optional_parameters", srw_oe_wavefront_propagation_optional_parameters)
if not beamline is None: beamline.append_wavefront_propagation_parameters(srw_oe_wavefront_propagation_parameters, srw_oe_wavefront_propagation_optional_parameters, Where.OE)
# DRIFT AFTER
srw_drift_after_wavefront_propagation_parameters = None
srw_drift_after_wavefront_propagation_optional_parameters = None
if beamline_element.get_coordinates().q():
srw_drift_after_wavefront_propagation_parameters = WavefrontPropagationParameters(
auto_resize_before_propagation = self.drift_auto_resize_before_propagation,
auto_resize_after_propagation = self.drift_auto_resize_after_propagation,
relative_precision_for_propagation_with_autoresizing = self.drift_relative_precision_for_propagation_with_autoresizing,
allow_semianalytical_treatment_of_quadratic_phase_term = self.drift_allow_semianalytical_treatment_of_quadratic_phase_term,
do_any_resizing_on_fourier_side_using_fft = self.drift_do_any_resizing_on_fourier_side_using_fft,
horizontal_range_modification_factor_at_resizing = self.drift_horizontal_range_modification_factor_at_resizing,
horizontal_resolution_modification_factor_at_resizing = self.drift_horizontal_resolution_modification_factor_at_resizing ,
vertical_range_modification_factor_at_resizing = self.drift_vertical_range_modification_factor_at_resizing,
vertical_resolution_modification_factor_at_resizing = self.drift_vertical_resolution_modification_factor_at_resizing ,
type_of_wavefront_shift_before_resizing = self.drift_type_of_wavefront_shift_before_resizing,
new_horizontal_wavefront_center_position_after_shift = self.drift_new_horizontal_wavefront_center_position_after_shift,
new_vertical_wavefront_center_position_after_shift = self.drift_new_vertical_wavefront_center_position_after_shift
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_drift_after_wavefront_propagation_parameters", srw_drift_after_wavefront_propagation_parameters)
if self.has_drift_after_wavefront_propagation_optional_parameters():
srw_drift_after_wavefront_propagation_optional_parameters = WavefrontPropagationOptionalParameters(
orientation_of_the_output_optical_axis_vector_x = self.drift_after_orientation_of_the_output_optical_axis_vector_x,
orientation_of_the_output_optical_axis_vector_y = self.drift_after_orientation_of_the_output_optical_axis_vector_y,
orientation_of_the_output_optical_axis_vector_z = self.drift_after_orientation_of_the_output_optical_axis_vector_z,
orientation_of_the_horizontal_base_vector_x = self.drift_after_orientation_of_the_horizontal_base_vector_x,
orientation_of_the_horizontal_base_vector_y = self.drift_after_orientation_of_the_horizontal_base_vector_y
)
if not propagation_parameters is None: propagation_parameters.set_additional_parameters("srw_drift_after_wavefront_propagation_optional_parameters", srw_drift_after_wavefront_propagation_optional_parameters)
if not beamline is None: beamline.append_wavefront_propagation_parameters(srw_drift_after_wavefront_propagation_parameters, srw_drift_after_wavefront_propagation_optional_parameters, Where.DRIFT_AFTER)
def has_drift_before_wavefront_propagation_optional_parameters(self):
return self.drift_before_orientation_of_the_output_optical_axis_vector_x != 0.0 or \
self.drift_before_orientation_of_the_output_optical_axis_vector_y != 0.0 or \
self.drift_before_orientation_of_the_output_optical_axis_vector_z != 0.0 or \
self.drift_before_orientation_of_the_horizontal_base_vector_x != 0.0 or \
self.drift_before_orientation_of_the_horizontal_base_vector_y != 0.0
def has_oe_wavefront_propagation_optional_parameters(self):
return self.oe_orientation_of_the_output_optical_axis_vector_x != 0.0 or \
self.oe_orientation_of_the_output_optical_axis_vector_y != 0.0 or \
self.oe_orientation_of_the_output_optical_axis_vector_z != 0.0 or \
self.oe_orientation_of_the_horizontal_base_vector_x != 0.0 or \
self.oe_orientation_of_the_horizontal_base_vector_y != 0.0
def has_drift_after_wavefront_propagation_optional_parameters(self):
return self.drift_after_orientation_of_the_output_optical_axis_vector_x != 0.0 or \
self.drift_after_orientation_of_the_output_optical_axis_vector_y != 0.0 or \
self.drift_after_orientation_of_the_output_optical_axis_vector_z != 0.0 or \
self.drift_after_orientation_of_the_horizontal_base_vector_x != 0.0 or \
self.drift_after_orientation_of_the_horizontal_base_vector_y != 0.0
def get_optical_element(self):
raise NotImplementedError()
def set_input(self, srw_data):
if not srw_data is None:
self.input_srw_data = srw_data
if self.is_automatic_run:
self.propagate_wavefront()
def run_calculation_for_plots(self, tickets, progress_bar_value):
if self.view_type==2:
e, h, v, i = self.output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, i, tickets)
self.progressBarSet(progress_bar_value)
e, h, v, i = self.output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, i, tickets)
e, h, v, p = self.output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, p, tickets, int_phase=1)
e, h, v, p = self.output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, p, tickets, int_phase=1)
self.progressBarSet(progress_bar_value + 10)
elif self.view_type==1:
e, h, v, i = self.output_wavefront.get_intensity(multi_electron=False)
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, i, tickets)
self.progressBarSet(progress_bar_value)
e, h, v, p = self.output_wavefront.get_phase()
SRWWavefrontViewer.add_2D_wavefront_plot(e, h, v, p, tickets, int_phase=1)
self.progressBarSet(progress_bar_value + 10)
def receive_syned_data(self, data):
if not data is None:
try:
beamline_element = data.get_beamline_element_at(-1)
if not beamline_element is None:
self.oe_name = beamline_element._optical_element._name
self.p = beamline_element._coordinates._p
self.q = beamline_element._coordinates._q
if self.has_orientation_angles:
self.angle_azimuthal = numpy.degrees(beamline_element._coordinates._angle_azimuthal)
self.angle_radial = numpy.degrees(beamline_element._coordinates._angle_radial)
if self.azimuth_hor_vert:
if self.angle_azimuthal == 0.0:
self.orientation_azimuthal = Orientation.UP
elif self.angle_azimuthal == 180.0:
self.orientation_azimuthal = Orientation.DOWN
elif self.angle_azimuthal == 90.0:
self.orientation_azimuthal = Orientation.LEFT
elif self.angle_azimuthal == 270.0:
self.orientation_azimuthal == Orientation.RIGHT
else:
raise Exception("Syned Data not correct: Orientation of central normal vector not recognized")
else:
self.angle_azimuthal = 0.0
self.angle_radial = 0.0
self.receive_specific_syned_data(beamline_element._optical_element)
else:
raise Exception("Syned Data not correct: Empty Beamline Element")
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
def receive_specific_syned_data(self, optical_element):
raise NotImplementedError()
def callResetSettings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
def getVariablesToPlot(self):
if self.view_type == 2:
return [[1, 2], [1, 2], [1, 2], [1, 2]]
else:
return [[1, 2], [1, 2]]
def getWeightedPlots(self):
if self.view_type == 2:
return [False, False, True, True]
else:
return [False, True]
def getWeightTickets(self):
if self.view_type == 2:
return [nan, nan, 0, 1]
else:
return [nan, 0]
def getTitles(self, with_um=False):
if self.view_type == 2:
if with_um: return ["Intensity SE \u03c3 [ph/s/.1%bw/mm\u00b2]",
"Intensity SE \u03c0 [ph/s/.1%bw/mm\u00b2]",
"Phase SE \u03c3 [rad]",
"Phase SE \u03c0 [rad]"]
else: return ["Intensity SE \u03c3",
"Intensity SE \u03c0",
"Phase SE \u03c3",
"Phase SE \u03c0"]
else:
if with_um: return ["Intensity SE [ph/s/.1%bw/mm\u00b2]",
"Phase SE [rad]"]
else: return ["Intensity SE",
"Phase SE"]
def getXTitles(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYTitles(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"]
def getXUM(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYUM(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"] | PypiClean |
/3d-converter-0.9.0.tar.gz/3d-converter-0.9.0/models_converter/formats/wavefront/writer.py | from models_converter.formats.universal import Scene
from models_converter.interfaces import WriterInterface
class Writer(WriterInterface):
def __init__(self):
self.writen = ''
self.temp_vertices_offsets = {
'POSITION': 0,
'TEXCOORD': 0,
'NORMAL': 0
}
self.vertices_offsets = {
'POSITION': 0,
'TEXCOORD': 0,
'NORMAL': 0
}
def write(self, scene: Scene):
for geometry in scene.get_geometries():
for key in self.vertices_offsets.keys():
self.vertices_offsets[key] = self.temp_vertices_offsets[key]
prefix = ''
for vertex in geometry.get_vertices():
if vertex.get_type() == 'POSITION':
prefix = 'v '
elif vertex.get_type() == 'NORMAL':
prefix = 'vn '
elif vertex.get_type() == 'TEXCOORD':
prefix = 'vt '
self.temp_vertices_offsets[vertex.get_type()] += len(vertex.get_points())
for triangle in vertex.get_points():
temp_string = prefix
for point in triangle:
temp_string += str(point * vertex.get_scale()) + ' '
self.writen += f'{temp_string}\n'
self.writen += '\n\n'
for material in geometry.get_materials():
self.writen += f'o {geometry.get_name()}|{material.get_name()}\n\n'
for triangle in material.get_triangles():
temp_string = 'f '
for point in triangle:
temp_list = [
str(point[0] + self.vertices_offsets['POSITION'] + 1), # POSITION
str(point[2] + self.vertices_offsets['TEXCOORD'] + 1), # TEXCOORD
str(point[1] + self.vertices_offsets['NORMAL'] + 1) # NORMAL
]
temp_string += '/'.join(temp_list) + ' '
self.writen += f'{temp_string}\n'
self.writen += '\n\n' | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.