id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/html/_build/doctrees/nbsphinx/_build/html/_build/html/_build/html/_build/html/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/bert_embedding.py |
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-25 17:40
import os
os.environ['TF_KERAS'] = '1'
import codecs
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import pysoftNLP.kashgari as kashgari
import tensorflow as tf
from pysoftNLP.kashgari.layers import NonMaskingLayer
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
layer_nums: int = 4,
trainable: bool = False,
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
layer_nums: number of layers whose outputs will be concatenated into a single tensor,
default `4`, output the last 4 hidden layers as the thesis suggested
trainable: whether if the model is trainable, default `False` and set it to `True`
for fine-tune this embedding layer during your training
sequence_length:
processor:
from_saved_model:
"""
self.trainable = trainable
# Do not need to train the whole bert model if just to use its feature output
self.training = False
self.layer_nums = layer_nums
if isinstance(sequence_length, tuple):
raise ValueError('BERT embedding only accept `int` type `sequence_length`')
if sequence_length == 'variable':
raise ValueError('BERT embedding only accept sequences in equal length')
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
dict_path = os.path.join(self.model_folder, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built until sequence length is determined")
return
config_path = os.path.join(self.model_folder, 'bert_config.json')
check_point_path = os.path.join(self.model_folder, 'bert_model.ckpt')
bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
check_point_path,
seq_len=seq_len,
output_layer_num=self.layer_nums,
training=self.training,
trainable=self.trainable)
self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
bert_seq_len = int(bert_model.output.shape[1])
if bert_seq_len < seq_len:
logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
self.sequence_length = bert_seq_len
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_bert()
super(BERTEmbedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
if self.embed_model is None:
raise ValueError('need to build model for embed sentence')
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
b = BERTEmbedding(task=kashgari.CLASSIFICATION,
model_folder='/Users/brikerman/.kashgari/embedding/bert/chinese_L-12_H-768_A-12',
sequence_length=12)
from kashgari.corpus import SMP2018ECDTCorpus
test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
data2 = '你 好 啊'.split(' ')
r = b.embed([data1], True)
tokens = b.process_x_dataset([['语', '言', '模', '型']])[0]
target_index = [101, 6427, 6241, 3563, 1798, 102]
target_index = target_index + [0] * (12 - len(target_index))
assert list(tokens[0]) == list(target_index)
print(tokens)
print(r)
print(r.shape) | PypiClean |
/Mastodon.py-1.8.1.tar.gz/Mastodon.py-1.8.1/docs/04_auth.rst | App registration, authentication and preferences
================================================
.. py:module:: mastodon
.. py:class: Mastodon
Before you can use the Mastodon API, you have to register your
application (which gets you a client key and client secret)
and then log in (which gets you an access token) and out (revoking
the access token you are logged in with). These functions
allow you to do those things. Additionally, it is also possible
to programmatically register a new user.
For convenience, once you have a client id, secret and access token,
you can simply pass them to the constructor of the class, too!
Note that while it is perfectly reasonable to log back in whenever
your app starts, registering a new application on every
startup is not, so don't do that - instead, register an application
once, and then persist your client id and secret. A convenient method
for this is provided by the functions dealing with registering the app,
logging in and the Mastodon classes constructor.
App registration and information
--------------------------------
.. automethod:: Mastodon.create_app
.. automethod:: Mastodon.app_verify_credentials
Authentication
--------------
.. automethod:: Mastodon.__init__
.. _log_in():
.. automethod:: Mastodon.log_in
.. _auth_request_url():
.. automethod:: Mastodon.auth_request_url
.. _set_language():
.. automethod:: Mastodon.set_language
.. automethod:: Mastodon.revoke_access_token
.. automethod:: Mastodon.create_account
.. automethod:: Mastodon.email_resend_confirmation
User preferences
----------------
.. automethod:: Mastodon.preferences | PypiClean |
/OASYS1-SYNED-1.0.45.tar.gz/OASYS1-SYNED-1.0.45/orangecontrib/syned/widgets/scanning/multiple_height_profile_simulator_S.py |
import os
import orangecanvas.resources as resources
try:
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
from oasys.util.oasys_objects import OasysPreProcessorData, OasysErrorProfileData, OasysSurfaceData
import oasys.util.oasys_util as OU
from oasys.widgets.abstract.error_profile.abstract_multiple_height_profile_simulator_T import OWAbstractMultipleHeightProfileSimulatorT
class OWMultipleHeightProfileSimulatorS(OWAbstractMultipleHeightProfileSimulatorT):
name = "Multiple Height Profile Simulator (S)"
id = "height_profile_simulator_s"
icon = "icons/simulator_S.png"
description = "Calculation of mirror surface height profile"
author = "Luca Rebuffi"
maintainer_email = "lrebuffi@anl.gov"
priority = 2
category = ""
keywords = ["height_profile_simulator"]
outputs = [{"name": "PreProcessor_Data",
"type": OasysPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"},
{"name":"Files",
"type":list,
"doc":"Files",
"id":"Files"}]
usage_path = os.path.join(resources.package_dirname("orangecontrib.shadow.widgets.gui"), "misc", "height_error_profile_usage.png")
def __init__(self):
super().__init__()
def get_usage_path(self):
return self.usage_path
def write_error_profile_file(self, zz, xx, yy, outFile):
OU.write_surface_file(zz, xx, yy, outFile)
def send_data(self, height_profile_file_names, dimension_x, dimension_y):
self.send("PreProcessor_Data", OasysPreProcessorData(error_profile_data=OasysErrorProfileData(surface_data=OasysSurfaceData(xx=self.xx,
yy=self.yy,
zz=self.zz,
surface_data_file=height_profile_file_names),
error_profile_x_dim=dimension_x,
error_profile_y_dim=dimension_y)))
self.send("Files", height_profile_file_names) | PypiClean |
/LovelyPlots-0.0.27.tar.gz/LovelyPlots-0.0.27/README.md | # LovelyPlots
  [](
http://doi.org/10.5281/zenodo.6903937)
LovelyPlots is a repository containing ``matplotlib`` style sheets to nicely format figures for scientific papers, thesis and presentations while keeping them fully editable in ``Adobe Illustrator``. Additonaly, ``.svg`` exports options allows figures to automatically adapt their font to your document's font. For example, ``.svg`` figures imported in a ``.tex`` file will automatically be generated with the text font used in your ``.tex`` file.
<p align="center" width="50%">
<img width="70%" src="figs/plots/ipynb+use_mathtext.svg">
</p>
# Installation
```bash
# to install latest PyPI release
pip install LovelyPlots
# to install latest GitHub commit
pip install --upgrade git+https://github.com/killiansheriff/LovelyPlots
```
The pip installation will move all of the ``matplotlib`` style files ``*.mplstyle`` into the appropriate ``matplotlib`` directory.
# Usage
LovelyPlots main style is called ``ipynb``. To use it, add the following lines to the begining of your python scripts:
```python
import matplotlib.pyplot as plt
plt.style.use('ipynb')
```
Styles can be combined:
```python
import matplotlib.pyplot as plt
plt.style.use(['ipynb','colorsblind34'])
```
In the above case, the ``ipynb`` default color cycle will be overwritten by a 34 colors colorblind safe color cycle called ``colorsblind34``.
If you only wish to apply a style on a specific plot, this can be achieved using:
```python
import matplotlib.pyplot as plt
with plt.style.context('ipynb'):
fig, ax = plt.subplots()
ax.plot(x, y)
```
# Examples
A few styles are presented here, please see [Styles](#Styles) for a list of all implemented styles. The script used to generate these plots can be found [here](examples/plot.py).
:warning: If you use the ``use_mathtext`` style, ``Adobe Illustrator`` might fail to nicely detect text objects. Please see [here](#Tips-and-Tricks) for work arounds.
The ``['ipynb', 'use_mathtext']`` style:

The ``['ipynb', 'use_mathtext','colors10-markers']`` style:

The ``['ipynb', 'use_mathtext','colors5-light']`` style:

The ``['ipynb', 'use_mathtext', 'colors10-ls']`` style:

The ``['ipynb']`` style:

The ``['paper', 'use_mathtext']`` style:

# Styles
LovelyPlots main style is called ``ipynb``. The latter by default sets the figure size to ``(4.5, 3.46) inches``, uses the default ``matplotlib`` font, activate scientific notation and makes sure your ``matplotlib`` exports will be editable in ``Adobe Illustrator``. Its default color cycle was set to ``colors10``.
## Color cycles
A multitude of color cycles were implemented:
``colors5-light``

``colors5``

``colors10``

``colorsblind10``

``colorsblind34``
Can be seen [here](figs/colors/colorsblind34.png).
## Lines styles, markers and combinations styles
Line styles, markers styles and combinations can be set using the following styles: `ls5`, `marker7`, `colors10-ls`, `colors10-markers`.
## Color maps
Default ``matplotlib`` colormaps were implemented and can be used by adding the following styles: ``cmap-viridis``, ``cmap-inferno``, ``cmap-cividis``, ``cmap-magma``, ``cmap-plasma``.
## Utils
Specific ``matplotlibrc`` parameters can be turned ``on/off`` using the following utilities styles: ``svg_no_fonttype``, ``use_mathtex``, ``use_tex``.
## Fonts
By default the ``ipynb`` style uses the default ``matplotlib`` font. However, one can set its favorite font from a TIFF file:
```python
import matplotlib.pyplot as plt
import LovelyPlots.utils as lp
plt.style.use('ipynb')
lp.set_font('my_font.tiff')
```
# Tips and Tricks
## Adobe Illustrator
Unfortunately, ``mathtext`` (and thus nicely formated scientific notation) will mess up ``Adobe illustrator`` ability to detect text objects, and is thus not activated by default. If you wish to use it, please add the style ``use_mathtext``.
## Latex and SVG files
By default, the ``ipynb`` style sets ``svg.fonttype: none``. This allows for plots saved as ``.svg`` not to carry font information. Consequently, when opened in another environement, the plot will be generated with the default system font.
For example, this allows ``.svg`` plots imported inside a ``Latex`` file to directly be generated with the proper document font, without you having to manually edit the fonts to match your document's font. Additonally, you can open the ``.svg`` file as text file, find the ugly ``1e10`` scientific notation and replace it with ``$10^10$`` so that it is nicely formated when included in your ``.tex`` file.
An example of how to show an svg in a ``.tex`` file:
```tex
\usepackage{svg}
\begin{figure}[htbp]
\centering
\includesvg{myfig.svg}
\end{figure}
```
## Retina displays
For those using ``IPython`` notebooks, you can set retina display support by adding the following lines to the begining on your python script:
```python
import LovelyPlots.utils as lp
lp.set_retina()
```
## Useth in Google Colab
To use on Google Colab, you will need to run the following code:
```python
!pip install LovelyPlots
plt.style.reload_library()
plt.style.use('ipynb')
```
# Acknowledgements
This reprository was inspired by [SciencePlots](https://github.com/garrettj403/SciencePlots), but adds different styles and crucial functionalities for useth in ``.tex`` files and ``Adobe Illustrator``.
# Citing
If you use LovelyPlots, please consider citing:
@software{killian_sheriff_2022_6916993,
author = {Killian Sheriff},
title = {{LovelyPlots, a collection of matplotlib
stylesheets for scientific figures}},
month = jul,
year = 2022,
publisher = {Zenodo},
version = {v0.0.26\_LovelyPlots},
doi = {10.5281/zenodo.6916993},
url = {https://doi.org/10.5281/zenodo.6916993}
}
| PypiClean |
/MICC-0.1.21.tar.gz/MICC-0.1.21/README.rst | Metric in the Curve Complex: MICC
=================================
.. image:: https://travis-ci.org/MICC/MICC.svg?branch=master
:target: https://travis-ci.org/MICC/MICC
The curve complex is a simplicial complex composed of vertices representing equivalency classes of isotopic
simple closed curves on a surface of fixed genus and of edges drawn between vertices if classes contain a disjoint
representative. MICC is a tool designed to compute short distances between these disjoint representatives, based
on an intuitive disk-with-handles represntation of a surface.
Installation
------------
Installing through pip is recommended to use the programmatic interface:
::
$ pip install micc
Otherwise, the command line interface for MICC is available `here <http://micc.github.io/>`_.
Getting Started
---------------
Example useage of MICC:
.. code-block:: python
from micc.curvepair import CurvePair
top = [21,7,8,9,10,11,22,23,24,0,1,2,3,4,5,6,12,13,14,15,16,17,18,19,20]
bottom = [9,10,11,12,13,14,15,1,2,3,4,5,16,17,18,19,20,21,22,23,24,0,6,7,8]
test = CurvePair(top, bottom)
print test.distance
Documentation
-------------
TODO
License
-------
Copyright 2014 Matt Morse and Paul Glenn.
MICC is licensed under the `MIT License <https://github.com/MICC/MICC/blob/master/LICENSE>`_. | PypiClean |
/OCAICM-0.0.2.tar.gz/OCAICM-0.0.2/script/data_pre.py | import os.path
import pandas as pd
from rdkit.Chem import AllChem
import numpy as np
from sklearn import preprocessing
def saltremover(i):
l = i.split('.')
d = {len(a):a for a in l }
smile = d[max(d.keys())]
return smile
def stand_smiles(smiles):
try:
smiles = AllChem.MolToSmiles(AllChem.MolFromSmiles(smiles))
except:
smiles = ''
return smiles
def process(file,content,cpu=10):
if content == 'cano':
data = pd.read_csv(file)
start =len(data)
data['Smiles'] = data['Smiles'].apply(saltremover)
data['Smiles'] = data['Smiles'].apply(stand_smiles)
output = file.split('.csv')[0] + '_pro.csv'
if os.path.exists(output):
pass
else:
data.to_csv(output,index=False)
print('we meet some smiles which cannot revert to cano_smiles and the number is',start-len(data))
if content == 'descriptor':
from padelpy import padeldescriptor
output = file.split('.csv')[0] + '_pro.csv'
data = pd.read_csv(output)
data['activity'] = data['Smiles']
data=data[['Smiles','activity']]
smi = file.split('.csv')[0] +'.smi'
des = file.split('.csv')[0] +'_23d.csv'
if os.path.exists(des):
pass
else:
data.to_csv(smi,index=False,sep='\t',header=None)
padeldescriptor(mol_dir=smi, d_2d=True, d_3d=True, d_file=des,threads=50)
# print('done 2d3d',end=' ')
if content == 'pubchem':
from padelpy import padeldescriptor
output = file.split('.csv')[0] + '_pro.csv'
data = pd.read_csv(output)
data['activity'] = data['Smiles']
data=data[['Smiles','activity']]
smi = file.split('.csv')[0] +'.smi'
des = file.split('.csv')[0] +'_pubchem.csv'
if os.path.exists(des):
pass
else:
data.to_csv(smi,index=False,sep='\t',header=None)
padeldescriptor(mol_dir=smi, fingerprints=True, d_file=des,threads=30)
# print('done punchem',end=' ')
if content == 'adj_23d':
des = file.split('.csv')[0] + '_23d.csv'
name = file.split('.csv')[0] + '_23d_adj.csv'
if os.path.exists(name):
pass
else:
des = pd.read_csv(des).iloc[:,:1445] # save pre 1444 features
des = des.replace([np.inf,-np.inf],np.nan) # trans inf to nan
des = des.dropna(thresh=int(des.shape[1]/2),axis =0) #drop that row all 0 how ='all'
des = des.dropna(thresh=int(des.shape[0]/2),axis =1)
des = des.fillna(0) # fill nan by mean col
min_max_scaler = preprocessing.MinMaxScaler()
adj = min_max_scaler.fit_transform(des.drop(['Name'], axis=1)) # scaler
adj = pd.DataFrame(adj,columns=list(des.columns)[1:])
adj['Name']=des['Name']
adj.to_csv(name,index=False)
# print('done scaler',end =' ')
def start(file,des= None):
content = ['cano']
if '2d-3d' in des:
content.extend(['descriptor','adj_23d'])
if 'pubchem' in des:
content.append('pubchem')
for pro in content:
process(file,pro) | PypiClean |
/CaptainSoul-0.1.9.tar.gz/CaptainSoul-0.1.9/cptsoul/netsoul/__init__.py |
import logging
from time import time
from hashlib import md5
from collections import deque
from twisted.internet.protocol import connectionDone
from twisted.protocols.basic import LineOnlyReceiver
from cptsoul.common import CptCommon
from cptsoul.netsoul.tools import Rea, ReaList, NsUserCmdInfo, NsWhoResult, NsWhoEntry, urlEncode, urlDecode
class NsProtocol(LineOnlyReceiver, CptCommon):
delimiter = '\n'
def __init__(self, factory):
self.factory = factory
self.factory.setProtocol(self)
self._responseQueue = deque()
self._whoQueue = deque()
self._realist = ReaList(
Rea(r"^rep (?P<no>\d+) -- .*$", self._responseHook),
Rea(r"^ping (?P<t>\d+)\s?$", self._pingHook),
Rea(r"^salut (?P<num>\d+) (?P<md5_hash>[0-9a-fA-F]{32}) (?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
r" (?P<port>\d{1,5}) (?P<timestamp>\d+)$", self._salutHook),
Rea(r"^user_cmd (?P<no>\d+):\w+:\d+/\d+:(?P<login>.+)@(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
r":.+:(?P<loc>.+):.+ \| (?P<cmd>.*)$", self._userCmdHook))
self._cmd_realist = ReaList(
Rea(r"^who (?P<no>\d+) (?P<login>.+) (?P<ip>[\d\.]{7,15}) \d+ \d+ \d+ \d+ .+ (?P<loc>.+)"
r" .+ (?P<state>\w+)(:\d+)? (?P<res>.+)$", self._cmdWhoHook),
Rea(r"^who rep 002 -- cmd end$", self._cmdWhoEndHook),
Rea(r"^msg (?P<msg>.+) dst=(?P<dest>.*)$", self._cmdMsgHook),
Rea(r"^state (?P<state>\w+?)(:\d+)?\s?$", self._cmdStateHook),
Rea(r"^login\s?$", self._cmdLoginHook),
Rea(r"^logout\s?$", self._cmdLogoutHook),
Rea(r"^dotnetSoul_UserTyping null dst=.*$", self._cmdIsTypingHook),
Rea(r"^dotnetSoul_UserCancelledTyping null dst=.*$", self._cmdCancelTypingHook),
Rea(r"^file_ask (?P<data>.+) dst=.*$", self._cmdFileAskHook),
Rea(r"^file_start (?P<data>.+) dst=.*$", self._cmdFileStartHook)
)
def lineReceived(self, line):
logging.debug('Netsoul : << : "%s"' % line)
self.factory.rawHook(line)
if not self._realist.found_match(line):
logging.warning('Netsoul : Unknown line : "%s"' % line)
def connectionLost(self, reason=connectionDone):
pass
def connectionMade(self):
self.factory.connectionMadeHook()
def sendLine(self, line):
super(NsProtocol, self).sendLine(str(line))
logging.debug('Netsoul : >> : "%s"' % line)
self.factory.sendRawHook(line)
# HOOKS
def _userCmdHook(self, no, login, ip, loc, cmd):
if not self._cmd_realist.found_match_cmd(cmd, NsUserCmdInfo(int(no), login, ip, loc)):
logging.warning('Netsoul : Unknown cmd from %s@%s : "%s"' % (login, ip, cmd))
def _responseHook(self, no):
no = int(no)
if self._responseQueue:
logging.info('Netsoul : Got response %d' % no)
self._responseQueue.popleft()(no)
else:
logging.warning('Netsoul : No response expected')
def _pingHook(self, t):
logging.info('Netsoul : Got ping %d' % int(t))
self.sendLine('ping %s' % t)
def _salutHook(self, num, md5_hash, ip, port, timestamp):
logging.info('Netsoul : Got salut %s %s:%s' % (md5_hash, ip, port))
self.info['hash'] = md5_hash
self.info['host'] = ip
self.info['port'] = port
self.sendLine('auth_ag ext_user none none')
self._responseQueue.append(self._responseSalutHook)
# CMD HOOKS
def _cmdWhoHook(self, info, no, login, ip, loc, state, res):
if self._whoQueue:
self._whoQueue[0].add(NsWhoEntry(no, login, ip, loc, state, res))
else:
logging.warning("Netsoul : No who expected")
def _cmdWhoEndHook(self, info):
if self._whoQueue:
self.factory.cmdWhoHook(self._whoQueue.popleft())
else:
logging.warning("Netsoul : No who expected")
def _cmdMsgHook(self, info, msg, dest):
self.factory.cmdMsgHook(info, urlDecode(msg), dest.split(','))
def _cmdLoginHook(self, info):
self.factory.cmdLoginHook(info)
def _cmdLogoutHook(self, info):
self.factory.cmdLogoutHook(info)
def _cmdStateHook(self, info, state):
self.factory.cmdStateHook(info, state)
def _cmdIsTypingHook(self, info):
self.factory.cmdIsTypingHook(info)
def _cmdCancelTypingHook(self, info):
self.factory.cmdCancelTypingHook(info)
def _cmdFileAskHook(self, info, data):
name, size, desc, pas = urlDecode(data).split(' ', 4)
self.factory.cmdFileAskHook(info, urlDecode(name), int(size), urlDecode(desc))
def _cmdFileStartHook(self, info, data):
name, ip, port = urlDecode(data).split(' ', 3)
self.factory.cmdFileStartHook(info, urlDecode(name), urlDecode(ip), int(port))
# RESPONSE HOOKS
def _responseSalutHook(self, no):
if no == 2:
md5_hash = md5('%s-%s/%s%s' % (
self.info['hash'], self.info['host'], self.info['port'], self.config['password'])).hexdigest()
self.sendLine('ext_user_log %s %s %s %s' % (
self.config['login'], md5_hash, urlEncode(self.config['location']), 'CaptainSoul'))
self._responseQueue.append(self._responseLogHook)
else:
logging.warning('Netsoul : Salut response unknown %d' % no)
def _responseLogHook(self, no):
if no == 2:
self.factory.loggedHook()
elif no == 33:
self.factory.loginFailedHook()
elif no == 131:
# permission denied
self.factory.loginFailedHook()
else:
logging.warning('Netsoul : Log response unknown %d' % no)
self.factory.loginFailedHook()
# COMMANDS
def sendState(self, state):
if state:
self.sendLine('state %s:%d' % (state, time()))
def sendWatch(self, sendWho=True):
self.sendLine('user_cmd watch_log_user {%s}' % ','.join(self.config['watchlist']))
if sendWho:
self.sendWho(self.config['watchlist'])
def sendWho(self, logins):
if logins:
self._whoQueue.append(NsWhoResult(logins))
self.sendLine('user_cmd who {%s}' % ','.join(logins))
def sendExit(self):
self.sendLine('exit')
def sendCmdUser(self, cmd, data, dests):
if cmd and data and dests:
self.sendLine('user_cmd msg_user {%s} %s %s' % (','.join(dests), cmd, urlEncode(data)))
def sendMsg(self, msg, dests):
self.sendCmdUser('msg', msg, dests)
def sendStartTyping(self, dests):
self.sendCmdUser('dotnetSoul_UserTyping', 'null', dests)
def sendCancelTyping(self, dests):
self.sendCmdUser('dotnetSoul_UserCancelledTyping', 'null', dests)
def sendFileAsk(self, name, size, desc, dests):
self.sendCmdUser('file_ask', '%s %d %s passive' % (urlEncode(name), size, urlEncode(desc)), dests)
def sendFileStart(self, name, ip, port, dests):
self.sendCmdUser('file_start', '%s %s %d' % (urlEncode(name), ip, port), dests) | PypiClean |
/CAMELS_library-0.3.tar.gz/CAMELS_library-0.3/plots/autoencoder/plot.py | from pylab import *
import numpy as np
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.ticker import AutoMinorLocator
from matplotlib.colors import LogNorm
from matplotlib.patches import Ellipse
rcParams["mathtext.fontset"]='cm'
############################### figure ###########################
#fig=figure(figsize=(15,10)) #give dimensions to the figure
##################################################################
################################ INPUT #######################################
#axes range
##############################################################################
############################ subplots ############################
#gs = gridspec.GridSpec(2,1,height_ratios=[5,2])
#ax1=plt.subplot(gs[0])
#ax2=plt.subplot(gs[1])
#make a subplot at a given position and with some given dimensions
#ax2=axes([0.4,0.55,0.25,0.1])
#gs.update(hspace=0.0,wspace=0.4,bottom=0.6,top=1.05)
#subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=0.5, hspace=0.5)
#set minor ticks
#ax1.xaxis.set_minor_locator(AutoMinorLocator(4))
#ax1.yaxis.set_minor_locator(AutoMinorLocator(4))
#ax1.xaxis.set_major_formatter( NullFormatter() ) #unset x label
#ax1.yaxis.set_major_formatter( NullFormatter() ) #unset y label
# custom xticks
#ax1.set_xticks([0.25, 0.5, 1.0])
#ax1.set_yticks([0.25, 0.5, 1.0])
#ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) #for log
#ax1.get_yaxis().set_label_coords(-0.2,0.5) #align y-axis for multiple plots
##################################################################
##################### special behaviour stuff ####################
#to show error missing error bars in log scale
#ax1.set_yscale('log',nonposy='clip') #set log scale for the y-axis
#set the x-axis in %f format instead of %e
#ax1.xaxis.set_major_formatter(ScalarFormatter())
#set size of ticks
#ax1.tick_params(axis='both', which='major', labelsize=10)
#ax1.tick_params(axis='both', which='minor', labelsize=8)
#set the position of the ylabel
#ax1.yaxis.set_label_coords(-0.2, 0.4)
#set yticks in scientific notation
#ax1.ticklabel_format(axis='y',style='sci',scilimits=(1,4))
#set the x-axis in %f format instead of %e
#formatter = matplotlib.ticker.FormatStrFormatter('$%.2e$')
#ax1.yaxis.set_major_formatter(formatter)
#add two legends in the same plot
#ax5 = ax1.twinx()
#ax5.yaxis.set_major_formatter( NullFormatter() ) #unset y label
#ax5.legend([p1,p2],['0.0 eV','0.3 eV'],loc=3,prop={'size':14},ncol=1)
#set points to show in the yaxis
#ax1.set_yticks([0,1,2])
#highlight a zoomed region
#mark_inset(ax1, ax2, loc1=2, loc2=4, fc="none",edgecolor='purple')
##################################################################
############################ plot type ###########################
#standard plot
#p1,=ax1.plot(x,y,linestyle='-',marker='None')
#error bar plot with the minimum and maximum values of the error bar interval
#p1=ax1.errorbar(r,xi,yerr=[delta_xi_min,delta_xi_max],lw=1,fmt='o',ms=2,
# elinewidth=1,capsize=5,linestyle='-')
#filled area
#p1=ax1.fill_between([x_min,x_max],[1.02,1.02],[0.98,0.98],color='k',alpha=0.2)
#hatch area
#ax1.fill([x_min,x_min,x_max,x_max],[y_min,3.0,3.0,y_min],#color='k',
# hatch='X',fill=False,alpha=0.5)
#scatter plot
#p1=ax1.scatter(k1,Pk1,c='b',edgecolor='none',s=8,marker='*')
#plot with markers
#pl4,=ax1.plot(ke3,Pk3/Pke3,marker='.',markevery=2,c='r',linestyle='None')
#set size of dashed lines
#ax.plot([0, 1], [0, 1], linestyle='--', dashes=(5, 1)) #length of 5, space of 1
#image plot
#cax = ax1.imshow(densities,cmap=get_cmap('jet'),origin='lower',
# extent=[x_min, x_max, y_min, y_max],
# #vmin=min_density,vmax=max_density)
# norm = LogNorm(vmin=min_density,vmax=max_density))
#cbar = fig.colorbar(cax, ax2, ax=ax1, ticks=[-1, 0, 1]) #in ax2 colorbar of ax1
#cbar.set_label(r"$M_{\rm CSF}\/[h^{-1}M_\odot]$",fontsize=14,labelpad=-50)
#cbar.ax.tick_params(labelsize=10) #to change size of ticks
#make a polygon
#polygon = Rectangle((0.4,50.0), 20.0, 20.0, edgecolor='purple',lw=0.5,
# fill=False)
#ax1.add_artist(polygon)
####################################################################
x_min, x_max = 2e-6, 7e-2
y_min, y_max = 1e-6, 1.0
fig = figure()
ax1 = fig.add_subplot(111)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim([x_min,x_max])
ax1.set_ylim([y_min,y_max])
ax1.set_xlabel(r'${\rm reconstruction\,\,error}$',fontsize=18)
ax1.set_ylabel(r'${\rm fraction\,\,of\,\,maps}$',fontsize=18)
f_out = 'reconstruction_loss.pdf'
f1 = '../../../Results/autoencoder/results/Errors_train_on_fiducialTNG_test_on_allfiducialTNG.txt'
f2 = '../../../Results/autoencoder/results/Errors_train_on_fiducialTNG_test_on_allTNG.txt'
f3 = '../../../Results/autoencoder/results/Error_CAMEL.txt'
error1,pdf1 = np.loadtxt(f1,unpack=True)
error2,pdf2 = np.loadtxt(f2,unpack=True)
error3,pdf3 = np.loadtxt(f3,unpack=True)
p1,=ax1.plot(error1,pdf1,linestyle='-',marker='None',c='b')
p2,=ax1.plot(error2,pdf2,linestyle='-',marker='None',c='r')
p3,=ax1.plot(error3,pdf3,linestyle='-',marker='None',c='g')
#place a label in the plot
#ax1.text(0.2,0.1, r"$z=4.0$", fontsize=22, color='k',transform=ax1.transAxes)
#legend
ax1.legend([p1,p2,p3],
[r"${\rm IllustrisTNG\,\,CV\,\,set}$",
r"${\rm IllustrisTNG\,\,LH\,\,set}$",
r"${\rm CAMELS\,\,logo}$"],
loc=1,prop={'size':13},ncol=1,frameon=True)
#columnspacing=2,labelspacing=2)
#ax1.set_title(r'$\sum m_\nu=0.0\/{\rm eV}$',position=(0.5,1.02),size=18)
#title('About as simple as it gets, folks')
#suptitle('About as simple as it gets, folks') #for title with several panels
#grid(True)
#show()
savefig(f_out, bbox_inches='tight')
close(fig)
###############################################################################
#some useful colors:
#'darkseagreen'
#'yellow'
#"hotpink"
#"gold
#"fuchsia"
#"lime"
#"brown"
#"silver"
#"cyan"
#"dodgerblue"
#"darkviolet"
#"magenta"
#"deepskyblue"
#"orchid"
#"aqua"
#"darkorange"
#"coral"
#"lightgreen"
#"salmon"
#"bisque" | PypiClean |
/KratosSwimmingDEMApplication-9.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/SwimmingDEMApplication/derivative_recovery/pouliot_2012_recoverer.py | import KratosMultiphysics as Kratos
from . import recoverer
from . import L2_projection_recoverer
import KratosMultiphysics.SwimmingDEMApplication.parameters_tools as PT
import sys
class Pouliot2012GradientRecoverer(L2_projection_recoverer.L2ProjectionGradientRecoverer):
def __init__(self, project_parameters, model_part):
L2_projection_recoverer.L2ProjectionGradientRecoverer.__init__(self, project_parameters, model_part)
self.element_type = "ComputeGradientPouliot20123D"
self.condition_type = "ComputeLaplacianSimplexCondition3D"
self.FillUpModelPart(self.element_type, self.condition_type)
self.DOFs = (Kratos.VELOCITY_COMPONENT_GRADIENT_X, Kratos.VELOCITY_COMPONENT_GRADIENT_Y, Kratos.VELOCITY_COMPONENT_GRADIENT_Z)
self.AddDofs(self.DOFs)
self.calculate_vorticity = (project_parameters["vorticity_calculation_type"].GetInt() > 0
or PT.RecursiveFindParametersWithCondition(project_parameters["properties"],
'vorticity_induced_lift_parameters'))
class Pouliot2012MaterialAccelerationRecoverer(Pouliot2012GradientRecoverer, L2_projection_recoverer.L2ProjectionMaterialAccelerationRecoverer):
def __init__(self, model_part, project_parameters, do_pre_recovery = False):
L2_projection_recoverer.L2ProjectionMaterialAccelerationRecoverer.__init__(self, project_parameters, model_part)
Pouliot2012GradientRecoverer.__init__(self, project_parameters, model_part)
self.do_pre_recovery = do_pre_recovery
scheme = Kratos.ResidualBasedIncrementalUpdateStaticScheme()
amgcl_smoother = Kratos.AMGCLSmoother.SPAI0
amgcl_krylov_type = Kratos.AMGCLIterativeSolverType.BICGSTAB_WITH_GMRES_FALLBACK
tolerance = 1e-12
max_iterations = 200
verbosity = 2 # 0->shows no information, 1->some information, 2->all the information
gmres_size = 400
if self.use_lumped_mass_matrix:
linear_solver = Kratos.CGSolver()
else:
linear_solver = Kratos.AMGCLSolver(amgcl_smoother, amgcl_krylov_type, tolerance, max_iterations, verbosity,gmres_size)
self.recovery_strategy = Kratos.ResidualBasedDerivativeRecoveryStrategy(self.recovery_model_part, scheme, linear_solver, False, True, False, False)
self.recovery_strategy.SetEchoLevel(0)
class Pouliot2012LaplacianRecoverer(L2_projection_recoverer.L2ProjectionDerivativesRecoverer, recoverer.LaplacianRecoverer):
def __init__(self, project_parameters, model_part):
recoverer.LaplacianRecoverer.__init__(self, project_parameters, model_part)
self.element_type = "ComputeLaplacianSimplex3D"
self.condition_type = "ComputeLaplacianSimplexCondition3D"
self.FillUpModelPart(self.element_type, self.condition_type)
self.DOFs = (Kratos.VELOCITY_LAPLACIAN_X, Kratos.VELOCITY_LAPLACIAN_Y, Kratos.VELOCITY_LAPLACIAN_Z)
self.AddDofs(self.DOFs)
def RecoverVectorLaplacian(self, vector_variable, laplacian_variable):
self.SetToZero(Kratos.VELOCITY_LAPLACIAN)
self.recovery_strategy.Solve()
def Solve(self):
Kratos.Logger.PrintInfo("SwimmingDEM", "\nSolving for the fluid acceleration...")
sys.stdout.flush()
self.SetToZero(Kratos.VELOCITY_COMPONENT_GRADIENT)
if self.do_pre_recovery:
self.recovery_strategy.Solve()
self.recovery_strategy.Solve() | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/lib/vcs/subprocessio.py | import collections
import os
import subprocess
import threading
class StreamFeeder(threading.Thread):
"""
Normal writing into pipe-like is blocking once the buffer is filled.
This thread allows a thread to seep data from a file-like into a pipe
without blocking the main thread.
We close inpipe once the end of the source stream is reached.
"""
def __init__(self, source):
super(StreamFeeder, self).__init__()
self.daemon = True
filelike = False
self.bytes = bytes()
if type(source) in (type(''), bytes, bytearray): # string-like
self.bytes = bytes(source)
else: # can be either file pointer or file-like
if isinstance(source, int): # file pointer it is
# converting file descriptor (int) stdin into file-like
source = os.fdopen(source, 'rb', 16384)
# let's see if source is file-like by now
filelike = hasattr(source, 'read')
if not filelike and not self.bytes:
raise TypeError("StreamFeeder's source object must be a readable "
"file-like, a file descriptor, or a string-like.")
self.source = source
self.readiface, self.writeiface = os.pipe()
def run(self):
t = self.writeiface
if self.bytes:
os.write(t, self.bytes)
else:
s = self.source
b = s.read(4096)
while b:
os.write(t, b)
b = s.read(4096)
os.close(t)
@property
def output(self):
return self.readiface
class InputStreamChunker(threading.Thread):
def __init__(self, source, target, buffer_size, chunk_size):
super(InputStreamChunker, self).__init__()
self.daemon = True # die die die.
self.source = source
self.target = target
self.chunk_count_max = int(buffer_size / chunk_size) + 1
self.chunk_size = chunk_size
self.data_added = threading.Event()
self.data_added.clear()
self.keep_reading = threading.Event()
self.keep_reading.set()
self.EOF = threading.Event()
self.EOF.clear()
self.go = threading.Event()
self.go.set()
def stop(self):
self.go.clear()
self.EOF.set()
try:
# this is not proper, but is done to force the reader thread let
# go of the input because, if successful, .close() will send EOF
# down the pipe.
self.source.close()
except:
pass
def run(self):
s = self.source
t = self.target
cs = self.chunk_size
ccm = self.chunk_count_max
kr = self.keep_reading
da = self.data_added
go = self.go
try:
b = s.read(cs)
except ValueError:
b = ''
while b and go.is_set():
if len(t) > ccm:
kr.clear()
kr.wait(2)
if not kr.wait(10):
raise IOError(
"Timed out while waiting for input from subprocess.")
t.append(b)
da.set()
try:
b = s.read(cs)
except ValueError: # probably "I/O operation on closed file"
b = ''
self.EOF.set()
da.set() # for cases when done but there was no input.
class BufferedGenerator(object):
"""
Class behaves as a non-blocking, buffered pipe reader.
Reads chunks of data (through a thread)
from a blocking pipe, and attaches these to an array (Deque) of chunks.
Reading is halted in the thread when max chunks is internally buffered.
The .next() may operate in blocking or non-blocking fashion by yielding
'' if no data is ready
to be sent or by not returning until there is some data to send
When we get EOF from underlying source pipe we raise the marker to raise
StopIteration after the last chunk of data is yielded.
"""
def __init__(self, source, buffer_size=65536, chunk_size=4096,
starting_values=None, bottomless=False):
starting_values = starting_values or []
if bottomless:
maxlen = int(buffer_size / chunk_size)
else:
maxlen = None
self.data = collections.deque(starting_values, maxlen)
self.worker = InputStreamChunker(source, self.data, buffer_size,
chunk_size)
if starting_values:
self.worker.data_added.set()
self.worker.start()
####################
# Generator's methods
####################
def __iter__(self):
return self
def __next__(self):
while not len(self.data) and not self.worker.EOF.is_set():
self.worker.data_added.clear()
self.worker.data_added.wait(0.2)
if len(self.data):
self.worker.keep_reading.set()
return bytes(self.data.popleft())
elif self.worker.EOF.is_set():
raise StopIteration
def throw(self, type, value=None, traceback=None):
if not self.worker.EOF.is_set():
raise type(value)
def start(self):
self.worker.start()
def stop(self):
self.worker.stop()
def close(self):
try:
self.worker.stop()
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
####################
# Threaded reader's infrastructure.
####################
@property
def input(self):
return self.worker.w
@property
def data_added_event(self):
return self.worker.data_added
@property
def data_added(self):
return self.worker.data_added.is_set()
@property
def reading_paused(self):
return not self.worker.keep_reading.is_set()
@property
def done_reading(self):
"""
Done_reading does not mean that the iterator's buffer is empty.
Iterator might have done reading from underlying source, but the read
chunks might still be available for serving through .next() method.
:returns: An Bool value.
"""
return self.worker.EOF.is_set()
@property
def length(self):
"""
returns int.
This is the length of the queue of chunks, not the length of
the combined contents in those chunks.
__len__() cannot be meaningfully implemented because this
reader is just flying through a bottomless pit content and
can only know the length of what it already saw.
If __len__() on WSGI server per PEP 3333 returns a value,
the response's length will be set to that. In order not to
confuse WSGI PEP3333 servers, we will not implement __len__
at all.
"""
return len(self.data)
def prepend(self, x):
self.data.appendleft(x)
def append(self, x):
self.data.append(x)
def extend(self, o):
self.data.extend(o)
def __getitem__(self, i):
return self.data[i]
class SubprocessIOChunker(object):
"""
Processor class wrapping handling of subprocess IO.
In a way, this is a "communicate()" replacement with a twist.
- We are multithreaded. Writing in and reading out, err are all sep threads.
- We support concurrent (in and out) stream processing.
- The output is not a stream. It's a queue of read string (bytes, not str)
chunks. The object behaves as an iterable. You can "for chunk in obj:" us.
- We are non-blocking in more respects than communicate()
(reading from subprocess out pauses when internal buffer is full, but
does not block the parent calling code. On the flip side, reading from
slow-yielding subprocess may block the iteration until data shows up. This
does not block the parallel inpipe reading occurring parallel thread.)
The purpose of the object is to allow us to wrap subprocess interactions into
an iterable that can be passed to a WSGI server as the application's return
value. Because of stream-processing-ability, WSGI does not have to read ALL
of the subprocess's output and buffer it, before handing it to WSGI server for
HTTP response. Instead, the class initializer reads just a bit of the stream
to figure out if error occurred or likely to occur and if not, just hands the
further iteration over subprocess output to the server for completion of HTTP
response.
The real or perceived subprocess error is trapped and raised as one of
EnvironmentError family of exceptions
Example usage:
# try:
# answer = SubprocessIOChunker(
# cmd,
# input,
# buffer_size = 65536,
# chunk_size = 4096
# )
# except (EnvironmentError) as e:
# print str(e)
# raise e
#
# return answer
"""
def __init__(self, cmd, inputstream=None, buffer_size=65536,
chunk_size=4096, starting_values=None, **kwargs):
"""
Initializes SubprocessIOChunker
:param cmd: A Subprocess.Popen style "cmd". Can be string or array of strings
:param inputstream: (Default: None) A file-like, string, or file pointer.
:param buffer_size: (Default: 65536) A size of total buffer per stream in bytes.
:param chunk_size: (Default: 4096) A max size of a chunk. Actual chunk may be smaller.
:param starting_values: (Default: []) An array of strings to put in front of output que.
"""
starting_values = starting_values or []
if inputstream:
input_streamer = StreamFeeder(inputstream)
input_streamer.start()
inputstream = input_streamer.output
# Note: fragile cmd mangling has been removed for use in Kallithea
assert isinstance(cmd, list), cmd
_p = subprocess.Popen(cmd, bufsize=-1,
stdin=inputstream,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
bg_out = BufferedGenerator(_p.stdout, buffer_size, chunk_size,
starting_values)
bg_err = BufferedGenerator(_p.stderr, 16000, 1, bottomless=True)
while not bg_out.done_reading and not bg_out.reading_paused:
# doing this until we reach either end of file, or end of buffer.
bg_out.data_added_event.wait(1)
bg_out.data_added_event.clear()
# at this point it's still ambiguous if we are done reading or just full buffer.
# Either way, if error (returned by ended process, or implied based on
# presence of stuff in stderr output) we error out.
# Else, we are happy.
returncode = _p.poll()
if (returncode is not None # process has terminated
and returncode != 0
): # and it failed
bg_out.stop()
out = b''.join(bg_out)
bg_err.stop()
err = b''.join(bg_err)
if (err.strip() == b'fatal: The remote end hung up unexpectedly' and
out.startswith(b'0034shallow ')
):
# hack inspired by https://github.com/schacon/grack/pull/7
bg_out = iter([out])
_p = None
elif err:
raise EnvironmentError("Subprocess exited due to an error: %s" % err)
else:
raise EnvironmentError(
"Subprocess exited with non 0 ret code: %s" % returncode)
self.process = _p
self.output = bg_out
self.error = bg_err
self.inputstream = inputstream
def __iter__(self):
return self
def __next__(self):
if self.process:
returncode = self.process.poll()
if (returncode is not None # process has terminated
and returncode != 0
): # and it failed
getattr(self.output, 'stop', lambda: None)()
self.error.stop()
err = ''.join(self.error)
raise EnvironmentError("Subprocess exited due to an error:\n" + err)
return next(self.output)
def throw(self, type, value=None, traceback=None):
if getattr(self.output, 'length') or not getattr(self.output, 'done_reading'):
raise type(value)
def close(self):
try:
getattr(self.process, 'terminate', lambda: None)()
except:
pass
try:
getattr(self.output, 'close', lambda: None)()
except:
pass
try:
self.error.close()
except:
pass
try:
os.close(self.inputstream)
except:
pass | PypiClean |
/Django_Persistent_Message-1.2-py3-none-any.whl/persistent_message/views/api.py |
import unicodedata
from persistent_message.models import Message, TagGroup, Tag
from persistent_message.decorators import message_admin_required
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.views import View
from django.utils.decorators import method_decorator
from logging import getLogger
import dateutil.parser
import json
logger = getLogger(__name__)
@method_decorator(message_admin_required, name='dispatch')
class MessageAPI(View):
def get(self, request, *args, **kwargs):
try:
message_id = kwargs['message_id']
try:
message = Message.objects.get(pk=message_id)
return self.json_response({'message': message.to_json()})
except Message.DoesNotExist:
return self.error_response(
404, 'Message {} not found'.format(message_id))
except KeyError:
messages = []
for message in sorted(Message.objects.all(), key=lambda m: (
m.is_active(), m.modified), reverse=True):
messages.append(message.to_json())
return self.json_response({'messages': messages})
def put(self, request, *args, **kwargs):
try:
message_id = kwargs['message_id']
self.message = Message.objects.get(pk=message_id)
except Message.DoesNotExist:
return self.error_response(
404, 'Message {} not found'.format(message_id))
except KeyError:
return self.error_response(400, 'Missing message ID')
try:
self._deserialize(request)
self.message.save()
if self.tags is not None:
self.message.tags.clear()
self.message.tags.add(*self.tags)
except ValidationError as ex:
return self.error_response(400, ex)
logger.info('Message ({}) updated'.format(self.message.pk))
return self.json_response({'message': self.message.to_json()})
def post(self, request, *args, **kwargs):
self.message = Message()
try:
self._deserialize(request)
self.message.save()
if self.tags is not None:
self.message.tags.add(*self.tags)
except ValidationError as ex:
return self.error_response(400, ex)
logger.info('Message ({}) created'.format(self.message.pk))
return self.json_response({'message': self.message.to_json()})
def delete(self, request, *args, **kwargs):
try:
message_id = kwargs['message_id']
message = Message.objects.get(pk=message_id)
except Message.DoesNotExist:
return self.error_response(
404, 'Message {} not found'.format(message_id))
except KeyError:
return self.error_response(400, 'Missing message ID')
message.delete()
logger.info('Message ({}) deleted'.format(message.pk))
return self.json_response({})
def error_response(self, status, message='', content={}):
content['error'] = '{}'.format(message)
return HttpResponse(json.dumps(content),
status=status,
content_type='application/json')
def json_response(self, content='', status=200):
return HttpResponse(json.dumps(content),
status=status,
content_type='application/json')
def _deserialize(self, request):
self.tags = None
try:
json_data = json.loads(request.body)['message']
if not any(key in json_data for key in [
'content', 'level', 'begins', 'expires', 'tags']):
raise ValidationError()
except Exception as ex:
raise ValidationError('Invalid JSON: {}'.format(request.body))
if 'content' in json_data:
self.message.content = unicodedata.normalize(
"NFKD", json_data['content']).strip()
if 'level' in json_data:
self.message.level = json_data['level']
if 'begins' in json_data:
begins = json_data['begins']
self.message.begins = dateutil.parser.parse(begins) if (
begins is not None) else None
if 'expires' in json_data:
expires = json_data['expires']
self.message.expires = dateutil.parser.parse(expires) if (
expires is not None) else None
if 'tags' in json_data:
self.tags = []
for name in json_data['tags']:
try:
self.tags.append(Tag.objects.get(name=name))
except Tag.DoesNotExist:
raise ValidationError('Invalid tag: {}'.format(name))
self.message.modified_by = request.user.username
@method_decorator(message_admin_required, name='dispatch')
class TagGroupAPI(View):
def get(self, request, *args, **kwargs):
groups = []
for group in TagGroup.objects.all():
groups.append(group.to_json())
return HttpResponse(json.dumps({'tag_groups': groups}),
content_type='application/json') | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/vqs/base.py |
import abc
from typing import Optional, Tuple
import numpy as np
import jax
import jax.numpy as jnp
from jax.nn.initializers import normal
import flax
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import CollectionFilter, DenyList # noqa: F401
import netket.jax as nkjax
from netket.operator import AbstractOperator, Squared
from netket.hilbert import AbstractHilbert
from netket.utils.types import PyTree, PRNGKeyT, NNInitFunc
from netket.utils.dispatch import dispatch, TrueT, FalseT
from netket.stats import Stats
class VariationalState(abc.ABC):
"""Abstract class for variational states representing either pure states
or mixed quantum states.
A variational state is a quantum state depending on a set of
parameters, and that supports operations such
as computing quantum expectation values and their gradients.
A Variational stat can be serialized using flax's msgpack machinery.
See `their docs <https://flax.readthedocs.io/en/latest/flax.serialization.html>`_.
"""
def __init__(self, hilbert: AbstractHilbert):
"""Initialize the Abstract base class of a Variational State defined
on an hilbert space.
Args:
hilbert: The hilbert space upon which this state is defined.
"""
self._hilbert = hilbert # type: AbstractHilbert
self._model_state = {} # type: PyTree
self._parameters = {} # type: PyTree
@property
def hilbert(self) -> AbstractHilbert:
r"""The descriptor of the Hilbert space
on which this variational state is defined.
"""
return self._hilbert
@property
def parameters(self) -> PyTree:
r"""The pytree of the parameters of the model."""
return self._parameters
@property
def n_parameters(self) -> int:
r"""The total number of parameters in the model."""
return nkjax.tree_size(self.parameters)
@parameters.setter
def parameters(self, pars: PyTree):
if not isinstance(pars, FrozenDict):
if not isinstance(pars, list) and not isinstance(pars, tuple):
pars = flax.core.freeze(pars)
self._parameters = pars
self.reset()
@property
def model_state(self) -> Optional[PyTree]:
r"""The optional pytree with the mutable state of the model."""
return self._model_state
@model_state.setter
def model_state(self, state: PyTree):
if not isinstance(state, FrozenDict):
if not isinstance(state, list) and not isinstance(state, tuple):
state = flax.core.freeze(state)
self._model_state = state
self.reset()
@property
def variables(self) -> PyTree:
r"""The PyTree containing the parameters and state of the model,
used when evaluating it.
"""
return flax.core.freeze({"params": self.parameters, **self.model_state})
@variables.setter
def variables(self, var: PyTree):
if not isinstance(var, FrozenDict):
var = flax.core.freeze(var)
self.model_state, self.parameters = var.pop("params")
def init_parameters(
self, init_fun: Optional[NNInitFunc] = None, *, seed: Optional[PRNGKeyT] = None
):
r"""
Re-initializes all the parameters with the provided initialization function,
defaulting to the normal distribution of standard deviation 0.01.
.. warning::
The init function will not change the dtype of the parameters, which is
determined by the model. DO NOT SPECIFY IT INSIDE THE INIT FUNCTION
Args:
init_fun: a jax initializer such as :func:`jax.nn.initializers.normal`.
Must be a Callable taking 3 inputs, the jax PRNG key, the shape and the
dtype, and outputting an array with the valid dtype and shape. If left
unspecified, defaults to :code:`jax.nn.initializers.normal(stddev=0.01)`
seed: Optional seed to be used. The seed is synced across all MPI processes.
If unspecified, uses a random seed.
"""
if init_fun is None:
init_fun = normal(stddev=0.01)
rng = nkjax.PRNGSeq(nkjax.PRNGKey(seed))
def new_pars(par):
return jnp.asarray(
init_fun(rng.take(1)[0], shape=par.shape, dtype=par.dtype),
dtype=par.dtype,
)
self.parameters = jax.tree_map(new_pars, self.parameters)
def reset(self):
r"""Resets the internal cache of th variational state.
Called automatically when the parameters/state is updated.
"""
def expect(self, Ô: AbstractOperator) -> Stats:
r"""Estimates the quantum expectation value for a given operator O.
In the case of a pure state $\psi$, this is $<O>= <Psi|O|Psi>/<Psi|Psi>$
otherwise for a mixed state $\rho$, this is $<O> = \Tr[\rho \hat{O}/\Tr[\rho]$.
Args:
Ô: the operator O.
Returns:
An estimation of the quantum expectation value <O>.
"""
return expect(self, Ô)
def grad(
self,
Ô,
*,
use_covariance: Optional[bool] = None,
mutable: Optional[CollectionFilter] = None,
) -> PyTree:
r"""Estimates the gradient of the quantum expectation value of a given operator O.
Args:
op (netket.operator.AbstractOperator): the operator O.
is_hermitian: optional override for whether to use or not the hermitian logic. By default
it's automatically detected.
Returns:
array: An estimation of the average gradient of the quantum expectation value <O>.
"""
r = self.expect_and_grad(Ô, use_covariance=use_covariance, mutable=mutable)
return r[1]
def expect_and_grad(
self,
Ô: AbstractOperator,
*,
mutable: Optional[CollectionFilter] = None,
use_covariance: Optional[bool] = None,
) -> Tuple[Stats, PyTree]:
r"""Estimates the quantum expectation value and its gradient for a given operator O.
Args:
Ô: The operator Ô for which expectation value and gradient are computed.
mutable: Can be bool, str, or list. Specifies which collections in the model_state should
be treated as mutable: bool: all/no collections are mutable. str: The name of a
single mutable collection. list: A list of names of mutable collections.
This is used to mutate the state of the model while you train it (for example
to implement BatchNorm. Consult
`Flax's Module.apply documentation <https://flax.readthedocs.io/en/latest/_modules/flax/linen/module.html#Module.apply>`_
for a more in-depth explanation).
use_covariance: whether to use the covariance formula, usually reserved for
hermitian operators, ⟨∂logψ Oˡᵒᶜ⟩ - ⟨∂logψ⟩⟨Oˡᵒᶜ⟩
Returns:
An estimate of the quantum expectation value <O>.
An estimate of the gradient of the quantum expectation value <O>.
"""
if mutable is None:
mutable = self.mutable
return expect_and_grad(self, Ô, use_covariance, mutable=mutable)
def expect_and_forces(
self,
Ô: AbstractOperator,
*,
mutable: Optional[CollectionFilter] = None,
) -> Tuple[Stats, PyTree]:
r"""Estimates the quantum expectation value and corresponding force vector for a given operator O.
The force vector F_j is defined as the covariance of log-derivative of the trial wave function
and the local estimators of the operator. For complex holomorphic states, this is
equivalent to the expectation gradient d<O>/d(θ_j)* = F_j. For real-parameter states,
the gradient is given by d<O>/dθ_j = 2 Re[F_j].
Args:
Ô: The operator Ô for which expectation value and force are computed.
mutable: Can be bool, str, or list. Specifies which collections in the model_state should
be treated as mutable: bool: all/no collections are mutable. str: The name of a
single mutable collection. list: A list of names of mutable collections.
This is used to mutate the state of the model while you train it (for example
to implement BatchNorm. Consult
`Flax's Module.apply documentation <https://flax.readthedocs.io/en/latest/_modules/flax/linen/module.html#Module.apply>`_
for a more in-depth explanation).
Returns:
An estimate of the quantum expectation value <O>.
An estimate of the forve vector F_j = cov[dlog(ψ)/dx_j, O_loc].
"""
if mutable is None:
mutable = self.mutable
return expect_and_forces(self, Ô, mutable=mutable)
# @abc.abstractmethod
def quantum_geometric_tensor(self, qgt_type):
r"""Computes an estimate of the quantum geometric tensor G_ij.
This function returns a linear operator that can be used to apply G_ij to a
given vector or can be converted to a full matrix.
Args:
qgt_type: the optional type of the quantum geometric tensor. By default it
is automatically selected.
Returns:
nk.optimizer.LinearOperator: A linear operator representing the quantum
geometric tensor.
"""
raise NotImplementedError # pragma: no cover
def to_array(self, normalize: bool = True) -> jnp.ndarray:
"""
Returns the dense-vector representation of this state.
Args:
normalize: If True, the vector is normalized to have L2-norm 1.
Returns:
An exponentially large vector representing the state in the computational
basis.
"""
return NotImplemented # pragma: no cover
def to_qobj(self): # -> "qutip.Qobj"
r"""Convert the variational state to a qutip's ket Qobj.
Returns:
A :class:`qutip.Qobj` object.
"""
from qutip import Qobj
q_dims = [list(self.hilbert.shape), [1 for i in range(self.hilbert.size)]]
return Qobj(np.asarray(self.to_array()), dims=q_dims)
class VariationalMixedState(VariationalState):
def __init__(self, hilbert, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hilbert_physical = hilbert
@property
def hilbert_physical(self) -> AbstractHilbert:
return self._hilbert_physical
def to_matrix(self, normalize: bool = True) -> jnp.ndarray:
"""
Returns the dense-matrix representation of this operator.
Args:
normalize: If True, the matrix is normalized to have trace 1.
Returns:
An exponentially large matrix representing the state in the computational
basis.
"""
return NotImplemented # pragma: no cover
def to_qobj(self): # -> "qutip.Qobj"
r"""Convert this mixed state to a qutip density matrix Qobj.
Returns:
A :class:`qutip.Qobj` object.
"""
from qutip import Qobj
q_dims = [list(self.hilbert_physical.shape), list(self.hilbert_physical.shape)]
return Qobj(np.asarray(self.to_matrix()), dims=q_dims)
@dispatch.abstract
def expect(vstate: VariationalState, operator: AbstractOperator):
"""
Computes the expectation value of the given operator over the
variational state.
Additional Information:
To implement `vstate.expect` for a custom operator, implement
the multiple-dispatch (plum-dispatc) based method according
.. code:
@nk.vqs.expect.register
expect(vstate : VStateType operator: OperatorType):
return ...
Args:
vstate: The VariationalState
operator: The Operator or SuperOperator.
Returns:
The expectation value wrapped in a `Stats` object.
"""
# default dispatch where use_covariance is not specified
# Give it an higher precedence so this is always executed first, no matter what, if there
# is a dispatch ambiguity.
# This is not needed, but makes the dispatch logic work fine even if the users write weak
# signatures (eg: if an users defines `expect_grad(vs: MCState, op: MyOperator, use_cov: Any)`
# instead of `expect_grad(vs: MCState, op: MyOperator, use_cov: bool)`
# there would be a resolution error because the signature defined by the user is stricter
# for some arguments, but the one below here is stricter for `use_covariance` which is
# set to bool. Since this signature below, in the worst case, does nothing, this ensures
# that `expect_and_grad` is more user-friendly.
@dispatch(precedence=10)
def expect_and_grad(
vstate: VariationalState,
operator: AbstractOperator,
use_covariance: Optional[bool],
*args,
mutable: CollectionFilter,
**kwargs,
):
r"""Estimates the quantum expectation value and its gradient for a given operator O.
See `VariationalState.expect_and_grad` docstring for more information.
Additional Information:
To implement `vstate.expect` for a custom operator, implement
the multiple-dispatch (plum-dispatc) based method according to the signature below.
.. code:
@nk.vqs.expect_and_grad.register
expect_and_grad(vstate : VStateType, operator: OperatorType,
use_covariance : bool/TrueT/FalseT, * mutable)
return ...
"""
# convert to type-static True/False
if isinstance(use_covariance, bool):
use_covariance = TrueT() if use_covariance else FalseT()
if use_covariance is None:
if isinstance(operator, Squared):
use_covariance = FalseT()
else:
use_covariance = TrueT() if operator.is_hermitian else FalseT()
return expect_and_grad(
vstate, operator, use_covariance, *args, mutable=mutable, **kwargs
)
@dispatch.abstract
def expect_and_forces(
vstate: VariationalState,
operator: AbstractOperator,
*args,
mutable: CollectionFilter,
**kwargs,
):
r"""Estimates the quantum expectation value and corresponding force vector for a given operator O.
See `VariationalState.expect_and_forces` docstring for more information.
Additional Information:
To implement `vstate.expect` for a custom operator, implement
the multiple-dispatch (plum-dispatc) based method according to the signature below.
.. code:
@nk.vqs.expect_and_forces.register
expect_and_forces(vstate : VStateType, operator: OperatorType,
use_covariance : bool/TrueT/FalseT, * mutable)
return ...
""" | PypiClean |
/Mesa-2.1.1-py3-none-any.whl/mesa/visualization/templates/external/bootstrap-slider-11.0.2/test/specs/TooltipMouseOverOptionSpec.js | describe("'ticks_tooltip' Option tests", function() {
var testSlider;
var mouseEventArguments;
beforeEach(function() {
// Set up default set of mouse event arguments
mouseEventArguments = [
'mousemove', // type
true, // canBubble
true, // cancelable
document, // view,
0, // detail
0, // screenX
0, // screenY
undefined, // clientX
undefined, // clientY,
false, // ctrlKey
false, // altKey
false, // shiftKey
false, // metaKey,
0, // button
null // relatedTarget
];
});
describe("ticks_tooltip states", function() {
it("should have the tooltip above the last hovered over element", function() {
testSlider = new Slider(document.getElementById("testSlider1"), {
ticks: [0, 1, 2, 3, 4, 5, 6],
ticks_positions: [0, 19, 29, 39, 49, 95, 100],
step: 1,
value: 4,
ticks_tooltip: true,
orientation: 'horizontal'
});
mouseEventArguments[8] = testSlider.sliderElem.offsetTop; // clientY
var mouse49 = document.createEvent('MouseEvents');
mouseEventArguments[7] = testSlider.ticks[4].offsetLeft + testSlider.sliderElem.offsetLeft; // clientX
mouse49.initMouseEvent.apply(mouse49, mouseEventArguments);
var mouse95 = document.createEvent('MouseEvents');
mouseEventArguments[7] = testSlider.ticks[5].offsetLeft + testSlider.sliderElem.offsetLeft; // clientX
mouse95.initMouseEvent.apply(mouse95, mouseEventArguments);
var mouse100 = document.createEvent('MouseEvents');
mouseEventArguments[7] = testSlider.ticks[6].offsetLeft + testSlider.sliderElem.offsetLeft; // clientX
mouse100.initMouseEvent.apply(mouse100, mouseEventArguments);
var mouseStart = document.createEvent('MouseEvents');
mouseEventArguments[7] = testSlider.ticks[0].offsetLeft + testSlider.sliderElem.offsetLeft; // clientX
mouseStart.initMouseEvent.apply(mouseStart, mouseEventArguments);
//Simulate random movements
testSlider.mousedown(mouse49);
testSlider.mousemove(mouse95);
// FIXME: Use 'mouseup' event type
testSlider.mouseup(mouse95);
testSlider.mousedown(mouse49);
testSlider.mousemove(mouse100);
testSlider.mousemove(mouse95);
testSlider.mousemove(mouse95);
testSlider.mousemove(mouseStart);
expect(testSlider.tooltip.style.left).toBe("0%");
});
});
describe("Always show the tooltip", function() {
it("Should always display the tooltip after hovering over a tick", function(done) {
testSlider = new Slider(document.getElementById("testSlider1"), {
id: 'mySlider',
min: 0,
max: 10,
step: 1,
value: 1,
ticks: [0, 5, 10],
tooltip: 'always',
ticks_tooltip: true,
orientation: 'horizontal'
});
var bigOffset = 100000;
var isTooltipVisible = $('#mySlider').find('.tooltip.tooltip-main').hasClass('show');
expect(isTooltipVisible).toBe(true);
var mouseenter = document.createEvent('MouseEvent');
mouseEventArguments[0] = 'mouseenter';
mouseEventArguments[7] =
testSlider.ticks[1].offsetLeft + testSlider.sliderElem.offsetLeft; // clientX
mouseenter.initMouseEvent.apply(mouseenter, mouseEventArguments);
var mouseleave = document.createEvent('MouseEvent');
mouseEventArguments[0] = 'mouseleave';
mouseEventArguments[7] = testSlider.sliderElem.offsetLeft + bigOffset;
mouseleave.initMouseEvent.apply(mouseleave, mouseEventArguments);
testSlider.ticks[1].addEventListener('mouseleave', function() {
isTooltipVisible = $('#mySlider').find('.tooltip.tooltip-main').hasClass('show');
expect(isTooltipVisible).toBe(true);
done();
});
testSlider.ticks[1].dispatchEvent(mouseenter);
testSlider.ticks[1].dispatchEvent(mouseleave);
});
});
afterEach(function() {
if(testSlider) {
if(testSlider instanceof Slider) { testSlider.destroy(); }
testSlider = null;
}
});
});
/**
* The mouse navigation tests are based on the following slider properties:
*
* initial value: 3 or [3, 7]
* ticks: [0, 3, 5, 7, 10]
* step: 1
*
* When the `ticks_tooltip` option is set to `true`, hovering over the ticks or handles
* should show the tooltip above it with the value of the tick/handle.
*
* The test logic for sliders:
* 1. Hover over the 1st tick
* 2. Check if the tooltip is positioned correctly (left, top, right)
* 3. Check if the tooltip should be showing
* 4. Check if the tooltip contains the correct value
* 5. Check if the slider value(s) haven't changed
*
*/
describe("`ticks_tooltip: true` mouse navigation test cases", function() {
var initialValue = 3;
var initialRangeValues = [3, 7];
var tickValues = [0, 3, 5, 7, 10];
var stepValue = 1;
var orientations = ['horizontal', 'vertical'];
var reversed = [false, true];
var sliderTypes = ['single', 'range'];
var rtl = [false, true];
var testCases = [];
var mouseEventArguments;
function calcMouseEventCoords(element) {
var elementBB = element.getBoundingClientRect();
return {
clientX: elementBB.left,
clientY: elementBB.top
};
}
function createMouseEvent(type, clientX, clientY) {
var mouseEvent = document.createEvent('MouseEvent');
mouseEventArguments[0] = type;
mouseEventArguments[7] = clientX;
mouseEventArguments[8] = clientY;
mouseEvent.initMouseEvent.apply(mouseEvent, mouseEventArguments);
return mouseEvent;
}
beforeEach(function() {
// Set up default set of mouse event arguments
mouseEventArguments = [
'mousemove', // type
true, // canBubble
true, // cancelable
document, // view,
0, // detail
0, // screenX
0, // screenY
undefined, // clientX
undefined, // clientY,
false, // ctrlKey
false, // altKey
false, // shiftKey
false, // metaKey,
0, // button
null // relatedTarget
];
});
sliderTypes.forEach(function(sliderType) {
orientations.forEach(function(orientation) {
rtl.forEach(function(isRTL) {
reversed.forEach(function(isReversed) {
var isHorizontal = orientation === 'horizontal';
var isVertical = orientation === 'vertical';
var isRange = sliderType === 'range';
var whichStyle;
if (isHorizontal) {
if (isRTL) {
whichStyle = 'right';
}
else {
whichStyle = 'left';
}
}
else if (isVertical) {
whichStyle = 'top';
}
testCases.push({
value: isRange ? initialRangeValues : initialValue,
step: stepValue,
range: isRange,
orientation: orientation,
reversed: isReversed,
rtl: 'auto',
isRTL: isRTL,
inputId: isRTL ? 'rtlSlider' : 'testSlider1',
expectedValue: isRange ? initialRangeValues : initialValue,
stylePos: whichStyle
});
});
});
});
});
testCases.forEach(function(testCase) {
describe("range=" + testCase.range + ", orientation=" + testCase.orientation +
", rtl=" + testCase.isRTL + ", reversed=" + testCase.reversed, function() {
var $testSlider;
var sliderElem;
var $handle1;
var $handle2;
var $ticks;
var sliderId;
var sliderOptions;
var $tooltip;
var $tooltipInner;
var lastTickIndex;
var mouseEventType = 'mouseenter';
beforeEach(function() {
sliderId = testCase.range ? 'myRangeSlider' : 'mySlider';
sliderOptions = {
id: sliderId,
step: testCase.step,
orientation: testCase.orientation,
value: testCase.value,
range: testCase.range,
reversed: testCase.reversed,
rtl: 'auto',
ticks: tickValues,
ticks_tooltip: true
};
$testSlider = $('#'+testCase.inputId).slider(sliderOptions);
sliderElem = $('#'+sliderId)[0];
$ticks = $(sliderElem).find('.slider-tick');
$handle1 = $(sliderElem).find('.slider-handle:first');
$handle2 = $(sliderElem).find('.slider-handle:last');
$tooltip = $(sliderElem).find('.tooltip.tooltip-main');
$tooltipInner = $tooltip.find('.tooltip-inner');
lastTickIndex = sliderOptions.ticks.length - 1;
});
afterEach(function() {
// Clean up any associated event listeners
$ticks = null;
$handle1 = null;
$handle2 = null;
if ($testSlider) {
$testSlider.slider('destroy');
$testSlider = null;
}
});
if (!testCase.range) {
it("Should position the tooltip correctly when hovering over each tick (single only)", function(done) {
$ticks.each(function(index, tickElem) {
var coords = calcMouseEventCoords(tickElem);
var tickCallback = function() {
// Check position
var tooltip_pos = $tooltip[0].style[testCase.stylePos];
var tick_pos = tickElem.style[testCase.stylePos];
expect(tooltip_pos).toBe(tick_pos);
if (index === lastTickIndex) {
done();
}
};
// Set up listener and dispatch event
this.addEventListener(mouseEventType, tickCallback, false);
this.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
}
it("Should show the tooltip", function(done) {
$ticks.each(function(index, tickElem) {
var coords = calcMouseEventCoords(tickElem);
var tickCallback = function() {
// Check that tooltip shows
expect($tooltip.hasClass('show')).toBe(true);
if (index === lastTickIndex) {
done();
}
};
this.addEventListener(mouseEventType, tickCallback, false);
this.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
it("Should not show the tooltip", function(done) {
var bigOffset = 100000;
$ticks.each(function(index, tickElem) {
var coords = calcMouseEventCoords(tickElem);
var tickCallback = function() {
// Check that tooltip shows
expect($tooltip.hasClass('show')).toBe(false);
if (index === lastTickIndex) {
done();
}
};
this.addEventListener('mouseleave', tickCallback, false);
this.dispatchEvent(createMouseEvent('mouseleave', coords.clientX + bigOffset, coords.clientY));
});
});
it("Should contain the correct value for the tooltip", function(done) {
$ticks.each(function(index, tickElem) {
var coords = calcMouseEventCoords(tickElem);
var tickCallback = function() {
// Check value of tooltip
expect($tooltipInner.text()).toBe(''+sliderOptions.ticks[index]);
if (index === lastTickIndex) {
done();
}
};
this.addEventListener(mouseEventType, tickCallback, false);
this.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
it("Should not modify the value(s) of the slider when displaying the tooltip", function(done) {
$ticks.each(function(index, tickElem) {
var coords = calcMouseEventCoords(tickElem);
var tickCallback = function() {
var value = $testSlider.slider('getValue');
// Check value of slider
expect(value).toEqual(testCase.expectedValue);
if (index === lastTickIndex) {
done();
}
};
this.addEventListener(mouseEventType, tickCallback, false);
this.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
describe("Test position and values of the tooltip when hovering over the handle(s)", function() {
if (testCase.range) {
it("Should position for the tooltip correctly (range)", function(done) {
var handleElems = [$handle1[0], $handle2[0]];
$.each(handleElems, function(index, handleElem) {
var coords = calcMouseEventCoords(handleElem, testCase.orientation);
var handleCallback = function() {
// Check position
var tooltip_pos = $tooltip[0].style[testCase.stylePos];
var handle_pos = handleElem.style[testCase.stylePos];
expect(tooltip_pos).toBe(handle_pos);
if (index === 1) {
done();
}
};
handleElem.addEventListener(mouseEventType, handleCallback, false);
handleElem.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
it("Should contain the correct values for the tooltip (range)", function(done) {
var handleElems = [$handle1[0], $handle2[0]];
$.each(handleElems, function(index, handleElem) {
var coords = calcMouseEventCoords(handleElem, testCase.orientation);
var handleCallback = function() {
// Check value of tooltip
expect($tooltipInner.text()).toBe(''+testCase.expectedValue[index]);
if (index === 1) {
done();
}
};
handleElem.addEventListener(mouseEventType, handleCallback, false);
handleElem.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
});
}
else {
it("Should position for the tooltip correctly (single)", function(done) {
var handleElem = $handle1[0];
var coords = calcMouseEventCoords(handleElem, testCase.orientation);
var handleCallback = function() {
// Check position
var tooltip_pos = $tooltip[0].style[testCase.stylePos];
var handle_pos = handleElem.style[testCase.stylePos];
expect(tooltip_pos).toBe(handle_pos);
done();
};
handleElem.addEventListener(mouseEventType, handleCallback, false);
handleElem.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
it("Should contain the correct value for the tooltip (single)", function(done) {
var handleElem = $handle1[0];
var coords = calcMouseEventCoords(handleElem, testCase.orientation);
var handleCallback = function() {
// Check value of tooltip
expect($tooltipInner.text()).toBe(''+testCase.expectedValue);
done();
};
handleElem.addEventListener(mouseEventType, handleCallback, false);
handleElem.dispatchEvent(createMouseEvent(mouseEventType, coords.clientX, coords.clientY));
});
}
});
});
});
}); | PypiClean |
/DevContest-0.4.tar.gz/DevContest-0.4/devcontest/controllers/user.py | import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from devcontest.lib.base import BaseController, render
from pylons.i18n import get_lang, set_lang, _
log = logging.getLogger(__name__)
from sqlalchemy import not_
from devcontest.model import *
from devcontest.model.meta import Session
class UserController(BaseController):
def index(self):
self.auth()
c.user = self.user
return render('user.mako')
def sources(self):
self.auth()
c.sources = Session.query(Source).filter_by(user_id=self.user.id).all()
c.getTaskName = self._getTaskName
return render('sources.mako')
def source(self, id=0):
self.auth()
c.source = Session.query(Source).filter_by(id=id, user_id=self.user.id).first()
if not c.source:
return redirect_to(action="sources", id=None)
c.task_name = self._getTaskName(c.source.task_id)
return render('source.mako')
def save(self):
self.auth()
id = self.user.id
user = Session.query(User).filter_by(id=id).first()
l = request.params
if l['password'] == l['cpassword'] and l['password']!="":
user.password = hash(l['password'])
if l['mail']!="":
user.mail = l['mail']
user.fname = l['fname']
user.lname = l['lname']
user.cls = l['cls']
Session.commit()
del session['user']
request.environ['REMOTE_USER'] = user
request.environ['REMOTE_USER_NAME'] = session.get('user_name')
self.user = user
session['user'] = user
session.save()
return self.index() #redirect_to(controller=None)
def top(self, id=10):
self.auth()
countShow = int(id)
users = None
if self.user:
if self.user.admin:
users = Session.query(User).all()
if not users:
users = Session.query(User).filter_by(admin=False).all()
contests = Session.query(Contest).filter_by(is_running=True).all()
runningContests = []
for contest in contests:
runningContests.append(contest.id)
c.users = []
for user in users:
count = Session.query(Source).filter_by(user_id=user.id, status=True).filter(not_(Source.contest_id.in_(runningContests))).count()
if count:
c.users.append({'user':user,'count':count})
c.users = sorted(c.users, key=lambda rank: (int(rank['count']), rank['user']), reverse=True)[:countShow]
return render('top.mako')
def admin(self, id, param):
self.auth(admin=True)
if id=="remove" and param:
Session.execute(users_table.delete().where(users_table.c.id==int(param)))
Session.commit()
if id=="save" and param:
params = request.params
self._adminSave(param, params)
return redirect_to(id="edit")
if id=="edit" and param:
c.user = Session.query(User).filter_by(id=int(param)).first()
return render("admin/userEdit.mako")
if id=="source_rerun" and param:
c.source = Session.query(Source).filter_by(id=int(param)).first()
contest = Session.query(Contest).filter_by(id=c.source.contest_id).first()
result = c.source.run(contest, Runner, Judge)
c.source.status = result['status']
c.source.points = result['points']
c.source.errors = ''
sum = len(result['judges'])
for i, result in enumerate(result['judges']):
c.source.errors += '<li>%s/%s: %s</li>' % (i+1, sum, result)
Session.commit()
return redirect_to(id="source_view")
if id=="source_view" and param:
c.source = Session.query(Source).filter_by(id=int(param)).first()
c.user = Session.query(User).filter_by(id=c.source.user_id).first()
c.task_name = self._getTaskName(c.source.task_id)
return render("admin/viewSource.mako")
if id=="sources" and param:
c.user = Session.query(User).filter_by(id=int(param)).first()
c.sources = Session.query(Source).filter_by(user_id=int(param)).order_by(sources_table.c.datetime.desc()).all()
c.getTaskName = self._getTaskName
c.taskExists = self._taskExists
return render("admin/userSources.mako")
if id=="last":
c.sources = Session.query(Source).order_by(sources_table.c.datetime.desc()).all()
c.getTaskName = self._getTaskName
c.getUser = self._getUser
return render("admin/lastSources.mako")
c.users = Session.query(User).order_by(users_table.c.lname, users_table.c.fname).all()
return render('admin/user.mako')
def _taskExists(self, id):
task = Session.query(Task).filter_by(id=id).first()
print task.name
if task:
return True
else:
return False
def _getUser(self, id):
user = Session.query(User).filter_by(id=id).first()
if user:
return user.fname+" "+user.lname
def _getTaskName(self, id):
task = Session.query(Task).filter_by(id=id).first()
if task:
return task.name
def _adminSave(self, id, params):
user = Session.query(User).filter_by(id=id).first()
user.fname = params['fname']
user.lname = params['lname']
user.mail = params['mail']
user.cls = params['cls']
if params['password']!='' and params['password']==params['cpassword']:
user.password = hash(params['password'])
if params.has_key('admin'):
user.admin = True
else:
user.admin = False
Session.commit() | PypiClean |
/Evmlab-0.3.0.0.1-py3-none-any.whl/evmlab/opcodes.py | from copy import copy
import collections
import binascii
from . import parse_int_or_hex,decode_hex,remove_0x_head,bytearray_to_bytestr,encode_hex
# Taken from https://github.com/ethereum/pyethereum/blob/develop/ethereum/opcodes.py
# Done this way to reduce dependencies a bit
# schema: [opcode, ins, outs, gas]
opcodes = {
0x00: ['STOP', 0, 0, 0],
0x01: ['ADD', 2, 1, 3],
0x02: ['MUL', 2, 1, 5],
0x03: ['SUB', 2, 1, 3],
0x04: ['DIV', 2, 1, 5],
0x05: ['SDIV', 2, 1, 5],
0x06: ['MOD', 2, 1, 5],
0x07: ['SMOD', 2, 1, 5],
0x08: ['ADDMOD', 3, 1, 8],
0x09: ['MULMOD', 3, 1, 8],
0x0a: ['EXP', 2, 1, 10],
0x0b: ['SIGNEXTEND', 2, 1, 5],
0x10: ['LT', 2, 1, 3],
0x11: ['GT', 2, 1, 3],
0x12: ['SLT', 2, 1, 3],
0x13: ['SGT', 2, 1, 3],
0x14: ['EQ', 2, 1, 3],
0x15: ['ISZERO', 1, 1, 3],
0x16: ['AND', 2, 1, 3],
0x17: ['OR', 2, 1, 3],
0x18: ['XOR', 2, 1, 3],
0x19: ['NOT', 1, 1, 3],
0x1a: ['BYTE', 2, 1, 3],
0x20: ['SHA3', 2, 1, 30],
0x30: ['ADDRESS', 0, 1, 2],
0x31: ['BALANCE', 1, 1, 20],
0x32: ['ORIGIN', 0, 1, 2],
0x33: ['CALLER', 0, 1, 2],
0x34: ['CALLVALUE', 0, 1, 2],
0x35: ['CALLDATALOAD', 1, 1, 3],
0x36: ['CALLDATASIZE', 0, 1, 2],
0x37: ['CALLDATACOPY', 3, 0, 3],
0x38: ['CODESIZE', 0, 1, 2],
0x39: ['CODECOPY', 3, 0, 3],
0x3a: ['GASPRICE', 0, 1, 2],
0x3b: ['EXTCODESIZE', 1, 1, 20],
0x3c: ['EXTCODECOPY', 4, 0, 20],
0x3d: ['RETURNDATASIZE', 0, 1, 2],
0x3e: ['RETURNDATACOPY', 3, 0, 3],
0x40: ['BLOCKHASH', 1, 1, 20],
0x41: ['COINBASE', 0, 1, 2],
0x42: ['TIMESTAMP', 0, 1, 2],
0x43: ['NUMBER', 0, 1, 2],
0x44: ['DIFFICULTY', 0, 1, 2],
0x45: ['GASLIMIT', 0, 1, 2],
0x50: ['POP', 1, 0, 2],
0x51: ['MLOAD', 1, 1, 3],
0x52: ['MSTORE', 2, 0, 3],
0x53: ['MSTORE8', 2, 0, 3],
0x54: ['SLOAD', 1, 1, 50],
0x55: ['SSTORE', 2, 0, 0],
0x56: ['JUMP', 1, 0, 8],
0x57: ['JUMPI', 2, 0, 10],
0x58: ['PC', 0, 1, 2],
0x59: ['MSIZE', 0, 1, 2],
0x5a: ['GAS', 0, 1, 2],
0x5b: ['JUMPDEST', 0, 0, 1],
0xa0: ['LOG0', 2, 0, 375],
0xa1: ['LOG1', 3, 0, 750],
0xa2: ['LOG2', 4, 0, 1125],
0xa3: ['LOG3', 5, 0, 1500],
0xa4: ['LOG4', 6, 0, 1875],
0xf0: ['CREATE', 3, 1, 32000],
0xf1: ['CALL', 7, 1, 40],
0xf2: ['CALLCODE', 7, 1, 40],
0xf3: ['RETURN', 2, 0, 0],
0xf4: ['DELEGATECALL', 6, 0, 40],
0xfa: ['STATICCALL', 6, 1, 40],
0xfd: ['REVERT', 2, 0, 0],
0xff: ['SUICIDE', 1, 0, 0],
}
opcodesMetropolis = { 0x3d, 0x3e, 0xfa, 0xfd }
for i in range(1, 33):
opcodes[0x5f + i] = ['PUSH' + str(i), 0, 1, 3]
for i in range(1, 17):
opcodes[0x7f + i] = ['DUP' + str(i), i, i + 1, 3]
opcodes[0x8f + i] = ['SWAP' + str(i), i + 1, i + 1, 3]
reverse_opcodes = {}
for o in opcodes:
vars()[opcodes[o][0]] = opcodes[o]
reverse_opcodes[opcodes[o][0]] = o
# Non-opcode gas prices
GDEFAULT = 1
GMEMORY = 3
GQUADRATICMEMDENOM = 512 # 1 gas per 512 quadwords
GSTORAGEREFUND = 15000
GSTORAGEKILL = 5000
GSTORAGEMOD = 5000
GSTORAGEADD = 20000
GEXPONENTBYTE = 10 # cost of EXP exponent per byte
GCOPY = 3 # cost to copy one 32 byte word
GCONTRACTBYTE = 200 # one byte of code in contract creation
GCALLVALUETRANSFER = 9000 # non-zero-valued call
GLOGBYTE = 8 # cost of a byte of logdata
GTXCOST = 21000 # TX BASE GAS COST
GTXDATAZERO = 4 # TX DATA ZERO BYTE GAS COST
GTXDATANONZERO = 68 # TX DATA NON ZERO BYTE GAS COST
GSHA3WORD = 6 # Cost of SHA3 per word
GSHA256BASE = 60 # Base c of SHA256
GSHA256WORD = 12 # Cost of SHA256 per word
GRIPEMD160BASE = 600 # Base cost of RIPEMD160
GRIPEMD160WORD = 120 # Cost of RIPEMD160 per word
GIDENTITYBASE = 15 # Base cost of indentity
GIDENTITYWORD = 3 # Cost of identity per word
GECRECOVER = 3000 # Cost of ecrecover op
GSTIPEND = 2300
GCALLNEWACCOUNT = 25000
GSUICIDEREFUND = 24000
# Anti-DoS HF changes
SLOAD_SUPPLEMENTAL_GAS = 150
CALL_SUPPLEMENTAL_GAS = 660
EXTCODELOAD_SUPPLEMENTAL_GAS = 680
BALANCE_SUPPLEMENTAL_GAS = 380
CALL_CHILD_LIMIT_NUM = 63
CALL_CHILD_LIMIT_DENOM = 64
SUICIDE_SUPPLEMENTAL_GAS = 5000
def parseCode(code):
code = code[2:] if code[:2] == '0x' else code
try:
codes = [c for c in decode_hex(code)]
except ValueError as e:
print(code)
raise Exception("Did you forget to link any libraries?") from e
instructions = collections.OrderedDict()
pc = 0
length = None
while pc < len(codes):
try:
opcode = opcodes[codes[pc]]
except KeyError:
opcode = ['INVALID', 0, 0, 0]
if opcode[0][:4] == 'PUSH':
opcode = copy(opcode)
length = codes[pc] - 0x5f
pushData = codes[pc + 1 : pc + length + 1]
pushData = "0x" + encode_hex(bytearray_to_bytestr(pushData))
if type(pushData) is not str:
pushData = pushData.decode()
opcode.append(pushData)
instructions[pc] = opcode
if length is not None:
pc += length
length = None
pc += 1
return instructions | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/tensor/tensor.rst | .. _tensor-tensor:
Tensor
======
.. module:: diofant.tensor.tensor
.. autoclass:: _TensorManager
:members:
.. autoclass:: TensorIndexType
:members:
.. autoclass:: TensorIndex
:members:
.. autofunction:: tensor_indices
.. autoclass:: TensorSymmetry
:members:
.. autofunction:: tensorsymmetry
.. autoclass:: TensorType
:members:
.. autoclass:: TensorHead
:members:
.. autoclass:: TensExpr
:members:
.. autoclass:: TensAdd
:members:
.. autoclass:: TensMul
:members:
.. autofunction:: canon_bp
.. autofunction:: tensor_mul
.. autofunction:: riemann_cyclic_replace
.. autofunction:: riemann_cyclic
| PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/78529.112119b9875822ce8a6f.min.js | (self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[78529],{78529:function(e){function n(e){return e?"string"==typeof e?e:e.source:null}function t(...e){return e.map((e=>n(e))).join("")}function s(...e){return"("+e.map((e=>n(e))).join("|")+")"}e.exports=function(e){const n=/[dualxmsipngr]{0,12}/,r={$pattern:/[\w.]+/,keyword:["abs","accept","alarm","and","atan2","bind","binmode","bless","break","caller","chdir","chmod","chomp","chop","chown","chr","chroot","close","closedir","connect","continue","cos","crypt","dbmclose","dbmopen","defined","delete","die","do","dump","each","else","elsif","endgrent","endhostent","endnetent","endprotoent","endpwent","endservent","eof","eval","exec","exists","exit","exp","fcntl","fileno","flock","for","foreach","fork","format","formline","getc","getgrent","getgrgid","getgrnam","gethostbyaddr","gethostbyname","gethostent","getlogin","getnetbyaddr","getnetbyname","getnetent","getpeername","getpgrp","getpriority","getprotobyname","getprotobynumber","getprotoent","getpwent","getpwnam","getpwuid","getservbyname","getservbyport","getservent","getsockname","getsockopt","given","glob","gmtime","goto","grep","gt","hex","if","index","int","ioctl","join","keys","kill","last","lc","lcfirst","length","link","listen","local","localtime","log","lstat","lt","ma","map","mkdir","msgctl","msgget","msgrcv","msgsnd","my","ne","next","no","not","oct","open","opendir","or","ord","our","pack","package","pipe","pop","pos","print","printf","prototype","push","q|0","qq","quotemeta","qw","qx","rand","read","readdir","readline","readlink","readpipe","recv","redo","ref","rename","require","reset","return","reverse","rewinddir","rindex","rmdir","say","scalar","seek","seekdir","select","semctl","semget","semop","send","setgrent","sethostent","setnetent","setpgrp","setpriority","setprotoent","setpwent","setservent","setsockopt","shift","shmctl","shmget","shmread","shmwrite","shutdown","sin","sleep","socket","socketpair","sort","splice","split","sprintf","sqrt","srand","stat","state","study","sub","substr","symlink","syscall","sysopen","sysread","sysseek","system","syswrite","tell","telldir","tie","tied","time","times","tr","truncate","uc","ucfirst","umask","undef","unless","unlink","unpack","unshift","untie","until","use","utime","values","vec","wait","waitpid","wantarray","warn","when","while","write","x|0","xor","y|0"].join(" ")},i={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:r},a={begin:/->\{/,end:/\}/},o={variants:[{begin:/\$\d/},{begin:t(/[$%@](\^\w\b|#\w+(::\w+)*|\{\w+\}|\w+(::\w*)*)/,"(?![A-Za-z])(?![@$%])")},{begin:/[$%@][^\s\w{]/,relevance:0}]},c=[e.BACKSLASH_ESCAPE,i,o],l=[/!/,/\//,/\|/,/\?/,/'/,/"/,/#/],g=(e,s,r="\\1")=>{const i="\\1"===r?r:t(r,s);return t(t("(?:",e,")"),s,/(?:\\.|[^\\\/])*?/,i,/(?:\\.|[^\\\/])*?/,r,n)},d=(e,s,r)=>t(t("(?:",e,")"),s,/(?:\\.|[^\\\/])*?/,r,n),p=[o,e.HASH_COMMENT_MODE,e.COMMENT(/^=\w/,/=cut/,{endsWithParent:!0}),a,{className:"string",contains:c,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*<",end:">",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:/\{\w+\}/,relevance:0},{begin:"-?\\w+\\s*=>",relevance:0}]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",variants:[{begin:g("s|tr|y",s(...l))},{begin:g("s|tr|y","\\(","\\)")},{begin:g("s|tr|y","\\[","\\]")},{begin:g("s|tr|y","\\{","\\}")}],relevance:2},{className:"regexp",variants:[{begin:/(m|qr)\/\//,relevance:0},{begin:d("(?:m|qr)?",/\//,/\//)},{begin:d("m|qr",s(...l),/\1/)},{begin:d("m|qr",/\(/,/\)/)},{begin:d("m|qr",/\[/,/\]/)},{begin:d("m|qr",/\{/,/\}/)}]}]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]}];return i.contains=p,a.contains=p,{name:"Perl",aliases:["pl","pm"],keywords:r,contains:p}}}}]);
//# sourceMappingURL=78529.112119b9875822ce8a6f.min.js.map | PypiClean |
/Lab-Automation-1.1.tar.gz/Lab-Automation-1.1/lab/tutor_tab.py | from Tkinter import *
import json
import tkMessageBox
from functools import partial
head_font = ("Helvetica", 22, "bold")
text_font = ("Times New Roman", 18)
class assignment(Frame):
def __init__(self, parent, controller): #assignment window fort utor to see the asssignments
Frame.__init__(self, parent)
self.controller = controller
self.enter = enter = Entry(self, bd=5)
enter.pack(side=TOP)
def load(self): #loads assignments created by Instructors
response = json.loads(self.controller.controller.client.get("http://172.16.115.106:8080/assignments/", params = {'coursename': self.controller.course}, proxies={}).content)
for child in self.winfo_children():
child.destroy()
for assign in response:
but = Button(self, text=assign,height=3,width=50, command = partial(self.controller.show_frame, "question", assign))
but.pack(side=TOP,pady=1)
class student(Frame):
def __init__(self, parent, controller): #student window
Frame.__init__(self, parent)
self.controller = controller
def add_selection(self, roll_no, studd): #For Tutor to add students under him
self.students.menu.delete(studd)
client = self.controller.controller.client
url = "http://172.16.115.106:8080/addtastud/"
client.get(url)
csrf = dict(client.cookies)['csrftoken']
response = json.loads(client.post(url, data = {'coursename': self.controller.course, 'roll_no': roll_no, 'csrfmiddlewaretoken': csrf}).content)
self.load()
def load(self): #Loads the students frame with the selected students by TA
for child in self.winfo_children():
child.destroy()
client = self.controller.controller.client
url = "http://172.16.115.106:8080/coursestudentlist/"
all_list = json.loads(client.get(url, params = {'coursename': self.controller.course}).content)
url = "http://172.16.115.106:8080/tastudents/"
client.get(url)
csrf = dict(client.cookies)['csrftoken']
ta_students = json.loads(client.post(url, data = {'coursename': self.controller.course, 'csrfmiddlewaretoken': csrf}).content)
# print response
self.students = students = Menubutton(self, text = "Select Students",width=15 )
students.grid()
students.menu = Menu(students)
students["menu"] = students.menu
for stud in all_list:
if stud not in ta_students:
students.menu.add_checkbutton(label = all_list[stud]["name"] + ":" + all_list[stud]["roll_no"], variable = all_list[stud]["roll_no"], command = partial(self.add_selection, all_list[stud]["roll_no"], all_list[stud]["name"] + ":" + all_list[stud]["roll_no"]))
students.pack()
for key in ta_students:
stud = ta_students[key]
but = Button(self, fg = "black", text = stud["name"] + ":" + stud["roll_no"], height = 2,width = 15, command = partial(self.controller.show_frame, "stud_pro", key))
but.pack(pady=1)
class marks(Frame):
def __init__(self, parent, controller): #marks frame -Sows marks of students under Tutor
Frame.__init__(self, parent)
self.controller = controller
def load(self): #loads the marks frame with marks and response collected from database
for child in self.winfo_children():
child.destroy()
client = self.controller.controller.client
url = "http://172.16.115.106:8080/tastudents/"
client.get(url)
csrf = dict(client.cookies)['csrftoken']
response = json.loads(client.post(url, data = {'coursename': self.controller.course, 'csrfmiddlewaretoken': csrf}).content)
# print response
self.grid_columnconfigure(0,weight =4)
self.grid_columnconfigure(1,weight =1)
self.grid_columnconfigure(2,weight =1)
self.grid_columnconfigure(3,weight =1)
self.grid_columnconfigure(4,weight =4)
l0 = Label(self, text = "Roll Number",font=head_font)
l1 = Label(self, text = "Name",font=head_font)
l2 = Label(self, text = "Marks",font=head_font)
l0.grid(row=0,column=1,sticky="nsew")
l1.grid(row=0,column=2,sticky="nsew")
l2.grid(row=0,column=3,sticky="nsew")
x=1
for key in response:
stud = response[key]
l0 = Label(self, text = stud["roll_no"],font=text_font)
l1 = Label(self, text = stud["name"],font=text_font)
l2 = Label(self, text = stud["marks"],font=text_font)
l0.grid(row=x,column=1,sticky="nsew")
l1.grid(row=x,column=2,sticky="nsew")
l2.grid(row=x,column=3,sticky="nsew")
x= x+1
class question(Frame):
def __init__(self, parent, controller): #question frame for tutor
Frame.__init__(self, parent)
self.controller = controller
self.canvas = canvas = Canvas(self)
self.frame = frame = Frame(canvas)
self.myscrollbar=myscrollbar=Scrollbar(self,orient="vertical",command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right",fill="y")
canvas.pack(side="left",fill = X)
canvas.create_window((0,0),window=frame,anchor='nw')
frame.bind("<Configure>", self.myfunction)
label = Label(frame,text = "Question 1")
label.pack(side = LEFT)
text = Text(frame)
text.pack()
text.insert(INSERT, "")
self.q = 1
self.ques = []
self.ques.append(text)
def myfunction(self, event):
# print event
# print dir(event)
self.canvas.configure(scrollregion=self.canvas.bbox("all"),width=600,height=600)
def load(self, assign): #loads the questions for the respective frames
self.assign = assign
# print assign
response = json.loads((((self.controller.controller)).client).get("http://172.16.115.106:8080/assignments/", params = {'coursename': self.controller.course}, proxies={}).content)
i = 0
for child in self.frame.winfo_children():
child.destroy()
self.ques = []
# print response
# print assign
# print response
# print response[assign]
for _ques in response[assign]:
i = i + 1
label = Label(self.frame, text = "Question " + str(i))
label.pack()
text = Text(self.frame)
text.pack()
text.insert(INSERT, _ques["question"])
self.ques.append(text)
self.q = i | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/calculated_bill_details_response.py |
from msrest.serialization import Model
class CalculatedBillDetailsResponse(Model):
"""Response object containing all the details related to calculated bill setup
for a meter. Includes use, cost, meter line items, and account line items.
:param version:
:type version: ~energycap.sdk.models.DistributionVersionResponse
:param use:
:type use: ~energycap.sdk.models.CalculatedBillUseResponse
:param cost:
:type cost: ~energycap.sdk.models.CalculatedBillCostResponse
:param demand:
:type demand: ~energycap.sdk.models.CalculatedBillDemandResponse
:param meter_line_items: Additional meter bodylines to be added to the
bill
The line items are returned in the correct display order
:type meter_line_items:
list[~energycap.sdk.models.GenericBodylineResponse]
:param account_line_items: Additional account bodylines to be added to the
bill
The line items are returned in the correct display order
:type account_line_items:
list[~energycap.sdk.models.GenericBodylineResponse]
"""
_attribute_map = {
'version': {'key': 'version', 'type': 'DistributionVersionResponse'},
'use': {'key': 'use', 'type': 'CalculatedBillUseResponse'},
'cost': {'key': 'cost', 'type': 'CalculatedBillCostResponse'},
'demand': {'key': 'demand', 'type': 'CalculatedBillDemandResponse'},
'meter_line_items': {'key': 'meterLineItems', 'type': '[GenericBodylineResponse]'},
'account_line_items': {'key': 'accountLineItems', 'type': '[GenericBodylineResponse]'},
}
def __init__(self, **kwargs):
super(CalculatedBillDetailsResponse, self).__init__(**kwargs)
self.version = kwargs.get('version', None)
self.use = kwargs.get('use', None)
self.cost = kwargs.get('cost', None)
self.demand = kwargs.get('demand', None)
self.meter_line_items = kwargs.get('meter_line_items', None)
self.account_line_items = kwargs.get('account_line_items', None) | PypiClean |
/EasyModeler-2.2.6.zip/EasyModeler-2.2.6/README.rst | EasyModeler is a package for calibration and
validation of Ordinary Differential Equations ODEs to sample data.
Requirements
------------
* Python 2.6 or 2.7
* SciPy and NumPy 2.6 or 2.7
* Matplotlib 2.6 or 2.7
* sas7bdat
Features
--------
* ODEINT Wrapper Intelligent non-invasive wrapper to SciPy's integrator
* ODE Calibration Auto-calibrate a series of ODEs to data
* TimeSeries Files Handling of dtInput
* Model Validation Validate using Goodness of Fit statistics
* Graphical Plotting Basic plotting via matplotlib
* Graphical Interface Coming in version 2.3
Documentation and Userguide
---------------------------
* https://dl.dropboxusercontent.com/u/66459905/site/index.html
* Supports comprehensive autodocs with example usage inside source!
* Looking for a permanent document home online *please suggest ideas to me!*
Install as python module
------------------------
from internet
~~~~~~~~~~~~~
::
$ easy_install easymodeler
from archive
~~~~~~~~~~~~
::
$ unzip easymodeler-x.x.x.zip
$ cd easymodler-x.x.x
$ python setup.py install
Change Log
2.2.6 (2016-3-29)
~~~~~~~~~~~~~~~~~
* bugfixes
* added RMSD GOF parameter
----------
2.2.5 (2015-4-23)
~~~~~~~~~~~~~~~~~
* bugfixes
2.2.4 (2015-4-22)
~~~~~~~~~~~~~~~~~
* bugfixes
2.2.3 (2015-4-1)
~~~~~~~~~~~~~~~~
* bugfixes
* dtinput fixes
* example dataset inclusion
2.2.2 (2015-3-31)
~~~~~~~~~~~~~~~~~
* SAS filetype support
* fixes to calibration
* autodocs continue to update
2.2.1 (2015-3-27)
~~~~~~~~~~~~~~~~~
* Additions to Calibration object
* GraphOpt object creation
2.2 (2015-3-26)
~~~~~~~~~~~~~~~~
* Rollout of simple plotting interface
2.1.9 (2015-3-25)
~~~~~~~~~~~~~~~~~
* autodocs continue to update
2.1.4 - 2.1.8 (2015-3-10)
~~~~~~~~~~~~~~~~~~~~~~~~~
* trying yet again to fix the pypi readme
* autodocs continue to update
* rename functions to naming conventions
2.0.0 - 2.1.3 (2015-3-6)
~~~~~~~~~~~~~~~~~~~~~~~~
* autodocs continue to update
* README change
* Sample Example
* LICENSE
Acknowledgements
----------------
Support for this project was made possible by grant number NA11NOS0120024 from NOAA
to support the Gulf of Mexico Coastal Ocean Observing System (GCOOS) via subcontract
S120015 from the TAMU Research Foundation.
Sample Usage
------------
Here is a snippet of the userguide available at https://dl.dropboxusercontent.com/u/66459905/site/index.html
Example 1
---------
Lotka Volterra Predator Prey Interaction
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Lotka Volterra system is a simple model of predator-prey dynamics and consists of two coupled differentials. http://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equation
This is a simple example highlighting **EasyModler's** ability to integrate ODEs without complication! At a minimum to integrate we require:
1. A defined ODE function
2. A set of initial conditions as a list
3. Number of times to run the integrator
Declare an ODE_INT function in your source code. This will be passed to the **scipy.integrate.odeint** integrator
::
def LV_int(t,initial,dtinput,coefficients):
x = initial[0]
y = initial[1]
A = 1
B = 1
C = 1
D = 1
x_dot = (A * x) - (B * x *y)
y_dot = (D * x * y) - (C * y)
return [x_dot, y_dot]
Pass the ODE function to **emlib.Model** as
::
>>> import emlib
>>> LV = emlib.Model(LV_int)
INFO -512- New Model(1): LV_int
INFO -524- No algorithm supplied assuming vode/bfd O12 Nsteps3000
Now lets integrate our LV function for 200 timesteps!
::
>>> LV.Integrate([1,1],maxdt=200)
DEBUG -541- ODEINT Initials:11
DEBUG -579- Ending in 200 runs
DEBUG -600- Integration dT:0 of 200 Remaining:200
DEBUG -612- Completed Integration, created np.array shape:(200, 2)
The model output is stored in the **emlib.Model** object as arrays *computedT* and *computed*
::
>>> print LV.computed
[[ 0.37758677 2.93256414]
[ 0.13075395 1.32273451]
[ 0.14707288 0.55433421]
[ 0.27406944 0.24884565]
**EasyModeler** is organized where time is stored separately from data.
This is a design feature to aid processing timeseries data.
| PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/mip_convert/esmvt_mipconv_setup.py | import argparse
import configparser
import datetime
import logging
import os
import shutil
import subprocess
import socket
import yaml
####################
# global variables #
####################
# the tool uses a specially tailored mip_convert Rose suite
# locations of the suite depends on the host
host_name = socket.gethostname().split('.')
if len(host_name) > 1:
if host_name[1] == 'ceda':
# default location for mip_convert suite on JASMIN:
# previous suite: u-ak283_esmvt; new one u-bd681
# DEFAULT_SUITE_LOCATION = "/home/users/valeriu/roses/u-ak283_esmvt"
DEFAULT_SUITE_LOCATION = "/home/users/valeriu/roses/u-bd681"
# note that you can fcm checkout it straight from the MOSRS
# stream mapping; taken from hadsdk.streams
# these are used to set defaults if not overrides
STREAM_MAP = {
'CMIP5': {
'3hr': 'apk',
'6hrPlev': 'apc',
'6hrlev': 'apg',
'Amon': 'apm',
'Lmon': 'apm',
'LImon': 'apm',
'Oday': 'opa',
'Omon': 'opm',
'Oyr': 'opy',
'CF3hr': 'apk',
'CFday': 'apa',
'CFmon': 'apm',
'CFsubhr': 'ape',
'day': 'apa'
},
'CMIP6': {
'3hr': 'ap8',
'6hrLev': 'ap7',
'6hrPlev': 'ap7',
'6hrPlevPt': 'ap7',
'AERday': 'ap6',
'AERhr': 'ap9',
'AERmon': 'ap4',
'AERmonZ': 'ap4',
'Amon': 'ap5',
'CF3hr': 'ap8',
'CFday': 'ap6',
'CFmon': 'ap5',
'E1hr': 'ap9',
'E1hrClimMon': 'ap9',
'E3hr': 'ap8',
'E3hrPt': 'ap8',
'E6hrZ': 'ap7',
'Eday': 'ap6',
'EdayZ': 'ap6',
'Efx': 'ancil',
'Emon': 'ap5',
'EmonZ': 'ap5',
'Esubhr': 'ap8',
'Eyr': 'ap5',
'LImon': 'ap5',
'Lmon': 'ap5',
'Oday': 'ond',
'Ofx': 'ancil',
'Omon': 'onm',
'SIday': 'ind',
'SImon': 'inm',
'day': 'ap6',
'fx': 'ancil',
'prim1hrpt': 'ap9',
'prim3hr': 'ap8',
'prim3hrpt': 'ap8',
'prim6hr': 'ap7',
'prim6hrpt': 'ap7',
'primDay': 'ap6',
'primMon': 'ap5',
'primSIday': 'ap6'
}
}
# set up logging
logger = logging.getLogger(__name__)
# print the header
HEADER = r"""
______________________________________________________________________
ESMValTool + mip_convert: linking mip_convert to ESMValTool
______________________________________________________________________
""" + __doc__
def get_args():
"""Define the `esmvaltool` command line."""
# parse command line args
parser = argparse.ArgumentParser(
description=HEADER,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-c',
'--config-file',
default=os.path.join(os.path.dirname(__file__), 'config-user.yml'),
help='Configuration file')
parser.add_argument(
'-r',
'--recipe-files',
type=str,
nargs='+',
help='Recipe files (list or single file)')
parser.add_argument(
'-m',
'--mode',
default='setup-only',
choices=['setup-only', 'setup-run-suites', 'postproc'],
help='How to run: setup: sets up mipconvert suites only;\n' +
'or setup-run-suites: sets up suites and runs them as well;\n' +
'or postproc: grab the output from mip_convert and use it.')
parser.add_argument(
'-l',
'--log-level',
default='info',
choices=['debug', 'info', 'warning', 'error'])
args = parser.parse_args()
return args
def _set_logger(logging, out_dir, log_file, log_level):
# set logging for screen and file output
root_logger = logging.getLogger()
out_fmt = "%(asctime)s %(levelname)-8s %(name)s,%(lineno)s\t%(message)s"
logging.basicConfig(
filename=os.path.join(out_dir, log_file),
filemode='a',
format=out_fmt,
datefmt='%H:%M:%S',
level=logging.DEBUG)
root_logger.setLevel(log_level.upper())
logfmt = logging.Formatter(out_fmt)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logfmt)
root_logger.addHandler(console_handler)
def read_yaml_file(yaml_file):
"""Read recipe into a dictionary."""
with open(yaml_file, 'r') as yfile:
loaded_file = yaml.safe_load(yfile)
return loaded_file
def map_var_to_stream(diagnostics, stream_map):
"""Map variable standard name to stream string."""
stream_list = []
for _, diag in diagnostics.items():
for var in diag['variables']:
stream = stream_map[var]
stream_list.append(stream)
stream_list = list(set(stream_list))
return stream_list
def write_rose_conf(rose_config_template, recipe_file, config_file, log_level):
"""Write the new rose conf file per suite."""
# Build the ConfigParser object
config = configparser.ConfigParser()
config.optionxform = str
config.read(rose_config_template)
recipe_object = read_yaml_file(recipe_file)
conf_file = read_yaml_file(config_file)
datasets = recipe_object['datasets']
# check if dataset needs analysis
datasets_to_analyze = []
for dataset in datasets:
if dataset['dataset'] not in conf_file['DATASET_TO_SUITE']:
logger.warning("Dataset %s has no mapping to suite",
dataset['dataset'])
logger.warning("Assuming data retrival from elsewhere.")
else:
datasets_to_analyze.append(dataset)
diagnostics = recipe_object['diagnostics']
active_streams = map_var_to_stream(diagnostics, conf_file['STREAM_MAP'])
# set stream overrides to None and set components
# also set CYCLING_FREQUENCIES to P1Y overall
stream_overrides = {}
stream_components = {}
cycling_frequencies = {}
for stream in active_streams:
stream_overrides[stream] = 'None'
stream_components[stream] = conf_file['STREAM_COMPONENTS'][stream]
cycling_frequencies[stream] = 'P1Y'
# set the logger to start outputting
if not os.path.exists(conf_file['ROSES_OUTPUT']):
os.makedirs(conf_file['ROSES_OUTPUT'])
_set_logger(logging, conf_file['ROSES_OUTPUT'], 'rose_suites_setup.log',
log_level)
logger.info(HEADER)
# store the rose suite locations
rose_suite_locations = []
# loop through datasets (different suites for different datasets)
for dataset in datasets_to_analyze:
# set correct paths
rose_suite = os.path.join(
conf_file['ROSES_ROOT'],
conf_file['DATASET_TO_SUITE'][dataset['dataset']])
rose_suite_locations.append(rose_suite)
rose_output = os.path.join(
conf_file['ROSES_OUTPUT'],
conf_file['DATASET_TO_SUITE'][dataset['dataset']])
if os.path.exists(rose_suite):
shutil.rmtree(rose_suite)
if os.path.exists(DEFAULT_SUITE_LOCATION):
shutil.copytree(DEFAULT_SUITE_LOCATION, rose_suite)
else:
logger.error("Default Suite Location not found: %s",
DEFAULT_SUITE_LOCATION)
break
if not os.path.exists(rose_output):
os.makedirs(rose_output)
new_mipconv_config = os.path.join(rose_suite, 'mip_convert_config')
# start logging
logger.info("Working on dataset: %s", dataset)
logger.info("Mapping dataset to suite: %s", rose_suite)
logger.info("Output and logs written to: %s", rose_output)
logger.info("Creating rose suite directories...")
logger.info("Use rose-suite.conf template %s", rose_config_template)
logger.info("Use user config file %s", config_file)
# write the file
config.set('jinja2:suite.rc', 'INPUT_DIR',
'"' + conf_file['INPUT_DIR'] + '"')
config.set('jinja2:suite.rc', 'OUTPUT_DIR', '"' + rose_output + '"')
config.set('jinja2:suite.rc', 'CDDS_DIR',
'"' + DEFAULT_SUITE_LOCATION + '"')
config.set('jinja2:suite.rc', 'MIP_CONVERT_CONFIG_DIR',
'"' + new_mipconv_config + '"')
config.set('jinja2:suite.rc', 'ACTIVE_STREAMS', str(active_streams))
config.set('jinja2:suite.rc', 'STREAM_TIME_OVERRIDES',
str(stream_overrides))
config.set('jinja2:suite.rc', 'FIRST_YEAR', str(dataset['start_year']))
config.set('jinja2:suite.rc', 'REF_YEAR', str(dataset['start_year']))
config.set('jinja2:suite.rc', 'FINAL_YEAR', str(dataset['end_year']))
config.set('jinja2:suite.rc', 'STREAM_COMPONENTS',
str(stream_components))
config.set('jinja2:suite.rc', 'CYCLING_FREQUENCIES',
str(cycling_frequencies))
config.set(
'jinja2:suite.rc', 'TARGET_SUITE_NAME',
'"' + conf_file['DATASET_TO_SUITE'][dataset['dataset']] + '"')
with open(os.path.join(rose_suite, 'rose-suite.conf'), 'w') as r_c:
logger.info("Writing rose-suite.conf file %s",
os.path.join(rose_suite, 'rose-suite.conf'))
config.write(r_c)
# now that we have to conf file set up we need to
# edit the mip_convert configuration file with the correct data
for key, values in conf_file['STREAM_COMPONENTS'].items():
for comp in values:
mipconv_config = os.path.join(new_mipconv_config,
'mip_convert.cfg.' + comp)
_edit_mip_convert_config(mipconv_config, conf_file, dataset,
key)
return rose_suite_locations
def _edit_mip_convert_config(mipconv_config, conf_file, dataset, stream):
"""Edit the mip_convert file for correct runs."""
# set the correct variables
base_date = str(dataset['start_year']) + '-01-01-00-00-00'
suite_id = conf_file['DATASET_TO_SUITE'][dataset['dataset']]
cdds_dir = os.path.join(DEFAULT_SUITE_LOCATION, 'mip_convert_aux')
# Build the ConfigParser object
config = configparser.ConfigParser()
config.optionxform = str
config.read(mipconv_config)
# set the correct fields
config.set('COMMON', 'cdds_dir', cdds_dir)
config.set('request', 'base_date', base_date)
config.set('request', 'suite_id', suite_id)
stream_section = '_'.join(['stream', stream])
# add the section if not there already
if not config.has_section(stream_section):
config.add_section(stream_section)
if 'mip' not in dataset:
# can work without any mip in dataset
# will not take it from diagnostic (will assemble
# all possible mappings instead)
logger.warning("No mip in the recipe dataset section.")
logger.warning("Assigning mapping from default dictionary.")
stream_map_default = STREAM_MAP[dataset['project']]
variables = []
cmip_types = []
for key, val in conf_file['STREAM_MAP'].items():
for key_def, val_def in stream_map_default.items():
if val == val_def:
cmip_types.append('_'.join([dataset['project'], key_def]))
variables.append(key)
str_variables = ' '.join(list(set([v for v in variables])))
if variables:
for cmip_type in cmip_types:
config.set(stream_section, cmip_type, str_variables)
else:
cmip_type = '_'.join([dataset['project'], dataset['mip']])
all_vars = conf_file['STREAM_MAP'].keys()
str_variables = ' '.join(
[v for v in all_vars if conf_file['STREAM_MAP'][v] == stream])
config.set(stream_section, cmip_type, str_variables)
# write to file
with open(mipconv_config, 'w') as r_c:
logger.info("Writing mip_convert config file %s", mipconv_config)
config.write(r_c)
def _put_in_env(env_script):
"""Put new system vars in environment."""
logger.info("Setting environment for suite submission...")
# First make it executable.
chmod_command = ["chmod", "+x", env_script]
proc = subprocess.Popen(chmod_command, stdout=subprocess.PIPE)
proc.communicate()
logger.info("Script %s is now executable.", env_script)
# set the environment
for line in open(env_script, 'r'):
if line.split("=")[0] == 'export PATH':
logger.info("Appending %s to path...",
line.split("=")[1].strip("\n"))
add_path = line.split("=")[1].strip("\n").strip(":$PATH")
os.environ["PATH"] += os.pathsep + add_path
elif line.split("=")[0] == 'export PYTHONPATH':
logger.info("Exporting %s as PYTHONPATH...",
line.split("=")[1].strip("\n"))
os.environ["PYTHONPATH"] = line.split("=")[1].strip("\n")
# print and check
logger.info("New path: %s", str(os.environ["PATH"]))
logger.info("mip_convert PYTHONPATH: %s", str(os.environ["PYTHONPATH"]))
proc = subprocess.Popen(["which", "rose"], stdout=subprocess.PIPE)
out, err = proc.communicate()
logger.info("rose: %s %s", out, err)
proc = subprocess.Popen(["which", "mip_convert"], stdout=subprocess.PIPE)
out, err = proc.communicate()
logger.info("mip_convert: %s %s", out, err)
def _source_envs(suite):
"""Source relevant environments."""
# source the Met Office rose/cylc environment
# and the suite specific environment
suite_env = os.path.join(suite, 'env_setup_command_line.sh') # suite env
env_file_mo = os.path.join(suite, 'sourcepaths.sh') # metomi env
_put_in_env(suite_env)
_put_in_env(env_file_mo)
def _run_suite(suite):
"""Run the mip_convert suite."""
os.chdir(suite)
logger.info("Submitting suite from %s", suite)
proc = subprocess.Popen(["rose", "suite-run"], stdout=subprocess.PIPE)
out, err = proc.communicate()
logger.info("Rose communications: %s %s", str(out), str(err))
def symlink_data(recipe_file, config_file, log_level):
"""Grab the mip_converted output and manage it for ESMValTool."""
# get configuration and recipe
recipe_object = read_yaml_file(recipe_file)
conf_file = read_yaml_file(config_file)
datasets = recipe_object['datasets']
# create directory that stores all the output netCDF files
now = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
new_subdir = '_'.join((recipe_file.strip('.yml'), now))
sym_output_dir = os.path.join(conf_file['ROSES_OUTPUT'],
'mip_convert_symlinks', new_subdir)
if not os.path.exists(sym_output_dir):
os.makedirs(sym_output_dir)
# set the logger to start outputting
_set_logger(logging, conf_file['ROSES_OUTPUT'], 'file_simlink.log',
log_level)
logger.info(HEADER)
# loop through all datasets to symlink output
for dataset in datasets:
rose_output = os.path.join(
conf_file['ROSES_OUTPUT'],
conf_file['DATASET_TO_SUITE'][dataset['dataset']])
logger.info("Working on dataset: %s", dataset)
logger.info("Output and logs written to: %s", rose_output)
# create the dataset dir
dataset_output = os.path.join(sym_output_dir, dataset['dataset'])
if os.path.exists(dataset_output):
shutil.rmtree(dataset_output)
os.makedirs(dataset_output)
# loop through files
for root, _, files in os.walk(rose_output):
for xfile in files:
real_file = os.path.join(root, xfile)
imag_file = os.path.join(dataset_output, xfile)
# symlink it if nc file
if real_file.endswith('.nc') and \
xfile.split('_')[2] == dataset['dataset']:
if not os.path.islink(imag_file):
logger.info("File to symlink: %s", real_file)
logger.info("Symlinked file: %s", imag_file)
os.symlink(real_file, imag_file)
else:
logger.info("Symlinked file exists...")
logger.info("Original file: %s", real_file)
logger.info("Symlinked file: %s", imag_file)
def main():
"""Run the the meat of the code."""
logger.info("Running main function...")
args = get_args()
rose_config_template = os.path.join(
os.path.dirname(__file__), "rose-suite-template.conf")
# make sure the file is retrieved nonetheless
if not os.path.isfile(rose_config_template):
logger.info("Fetching rose template config from suite %s",
DEFAULT_SUITE_LOCATION)
rose_config_template = os.path.join(DEFAULT_SUITE_LOCATION,
"rose-suite-template.conf")
recipe_files = args.recipe_files
config_file = args.config_file
log_level = args.log_level
for recipe_file in recipe_files:
if args.mode == 'setup-only':
# set up the rose suites
write_rose_conf(rose_config_template, recipe_file, config_file,
log_level)
elif args.mode == 'setup-run-suites':
# setup roses
roses = write_rose_conf(rose_config_template, recipe_file,
config_file, log_level)
# set up the environment and submit
for rose in roses:
_source_envs(rose)
_run_suite(rose)
elif args.mode == 'postproc':
symlink_data(recipe_file, config_file, log_level)
if __name__ == '__main__':
main() | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/aui.py | from ._aui import *
import wx
def _AuiPaneInfoArray___repr__(self):
return "AuiPaneInfoArray: " + repr(list(self))
AuiPaneInfoArray.__repr__ = _AuiPaneInfoArray___repr__
del _AuiPaneInfoArray___repr__
EVT_AUI_PANE_BUTTON = wx.PyEventBinder( wxEVT_AUI_PANE_BUTTON )
EVT_AUI_PANE_CLOSE = wx.PyEventBinder( wxEVT_AUI_PANE_CLOSE )
EVT_AUI_PANE_MAXIMIZE = wx.PyEventBinder( wxEVT_AUI_PANE_MAXIMIZE )
EVT_AUI_PANE_RESTORE = wx.PyEventBinder( wxEVT_AUI_PANE_RESTORE )
EVT_AUI_PANE_ACTIVATED = wx.PyEventBinder( wxEVT_AUI_PANE_ACTIVATED )
EVT_AUI_RENDER = wx.PyEventBinder( wxEVT_AUI_RENDER )
EVT_AUI_FIND_MANAGER = wx.PyEventBinder( wxEVT_AUI_FIND_MANAGER )
def _AuiDockInfoArray___repr__(self):
return "AuiDockInfoArray: " + repr(list(self))
AuiDockInfoArray.__repr__ = _AuiDockInfoArray___repr__
del _AuiDockInfoArray___repr__
def _AuiDockUIPartArray___repr__(self):
return "AuiDockUIPartArray: " + repr(list(self))
AuiDockUIPartArray.__repr__ = _AuiDockUIPartArray___repr__
del _AuiDockUIPartArray___repr__
def _AuiPaneInfoPtrArray___repr__(self):
return "AuiPaneInfoPtrArray: " + repr(list(self))
AuiPaneInfoPtrArray.__repr__ = _AuiPaneInfoPtrArray___repr__
del _AuiPaneInfoPtrArray___repr__
def _AuiDockInfoPtrArray___repr__(self):
return "AuiDockInfoPtrArray: " + repr(list(self))
AuiDockInfoPtrArray.__repr__ = _AuiDockInfoPtrArray___repr__
del _AuiDockInfoPtrArray___repr__
EVT_AUITOOLBAR_TOOL_DROPDOWN = wx.PyEventBinder( wxEVT_AUITOOLBAR_TOOL_DROPDOWN, 1 )
EVT_AUITOOLBAR_OVERFLOW_CLICK = wx.PyEventBinder( wxEVT_AUITOOLBAR_OVERFLOW_CLICK, 1 )
EVT_AUITOOLBAR_RIGHT_CLICK = wx.PyEventBinder( wxEVT_AUITOOLBAR_RIGHT_CLICK, 1 )
EVT_AUITOOLBAR_MIDDLE_CLICK = wx.PyEventBinder( wxEVT_AUITOOLBAR_MIDDLE_CLICK, 1 )
EVT_AUITOOLBAR_BEGIN_DRAG = wx.PyEventBinder( wxEVT_AUITOOLBAR_BEGIN_DRAG, 1 )
def _AuiToolBarItemArray___repr__(self):
return "AuiToolBarItemArray: " + repr(list(self))
AuiToolBarItemArray.__repr__ = _AuiToolBarItemArray___repr__
del _AuiToolBarItemArray___repr__
def _AuiNotebookPageArray___repr__(self):
return "AuiNotebookPageArray: " + repr(list(self))
AuiNotebookPageArray.__repr__ = _AuiNotebookPageArray___repr__
del _AuiNotebookPageArray___repr__
def _AuiTabContainerButtonArray___repr__(self):
return "AuiTabContainerButtonArray: " + repr(list(self))
AuiTabContainerButtonArray.__repr__ = _AuiTabContainerButtonArray___repr__
del _AuiTabContainerButtonArray___repr__
EVT_AUINOTEBOOK_PAGE_CLOSE = wx.PyEventBinder( wxEVT_AUINOTEBOOK_PAGE_CLOSE, 1 )
EVT_AUINOTEBOOK_PAGE_CLOSED = wx.PyEventBinder( wxEVT_AUINOTEBOOK_PAGE_CLOSED, 1 )
EVT_AUINOTEBOOK_PAGE_CHANGED = wx.PyEventBinder( wxEVT_AUINOTEBOOK_PAGE_CHANGED, 1 )
EVT_AUINOTEBOOK_PAGE_CHANGING = wx.PyEventBinder( wxEVT_AUINOTEBOOK_PAGE_CHANGING, 1 )
EVT_AUINOTEBOOK_BUTTON = wx.PyEventBinder( wxEVT_AUINOTEBOOK_BUTTON, 1 )
EVT_AUINOTEBOOK_BEGIN_DRAG = wx.PyEventBinder( wxEVT_AUINOTEBOOK_BEGIN_DRAG, 1 )
EVT_AUINOTEBOOK_END_DRAG = wx.PyEventBinder( wxEVT_AUINOTEBOOK_END_DRAG, 1 )
EVT_AUINOTEBOOK_DRAG_MOTION = wx.PyEventBinder( wxEVT_AUINOTEBOOK_DRAG_MOTION, 1 )
EVT_AUINOTEBOOK_ALLOW_DND = wx.PyEventBinder( wxEVT_AUINOTEBOOK_ALLOW_DND, 1 )
EVT_AUINOTEBOOK_DRAG_DONE = wx.PyEventBinder( wxEVT_AUINOTEBOOK_DRAG_DONE, 1 )
EVT_AUINOTEBOOK_TAB_MIDDLE_DOWN = wx.PyEventBinder( wxEVT_AUINOTEBOOK_TAB_MIDDLE_DOWN, 1 )
EVT_AUINOTEBOOK_TAB_MIDDLE_UP = wx.PyEventBinder( wxEVT_AUINOTEBOOK_TAB_MIDDLE_UP, 1 )
EVT_AUINOTEBOOK_TAB_RIGHT_DOWN = wx.PyEventBinder( wxEVT_AUINOTEBOOK_TAB_RIGHT_DOWN, 1 )
EVT_AUINOTEBOOK_TAB_RIGHT_UP = wx.PyEventBinder( wxEVT_AUINOTEBOOK_TAB_RIGHT_UP, 1 )
EVT_AUINOTEBOOK_BG_DCLICK = wx.PyEventBinder( wxEVT_AUINOTEBOOK_BG_DCLICK, 1 ) | PypiClean |
/FlowTutor-0.9.0.tar.gz/FlowTutor-0.9.0/src/flowtutor/flowchart/node.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional, Union
from uuid import uuid4
import dearpygui.dearpygui as dpg
from shapely.geometry import Polygon
from shapely.geometry import Point
from flowtutor.gui.themes import theme_colors
if TYPE_CHECKING:
from flowtutor.flowchart.connection import Connection
from flowtutor.flowchart.flowchart import Flowchart
FLOWCHART_TAG = 'flowchart'
class Node(ABC):
def __init__(self) -> None:
self._tag = str(uuid4())
self._connections: list[Connection] = []
self._scope: list[str] = []
self._pos = (0, 0)
self._comment = ''
self._break_point = False
self._is_comment = False
self._lines: list[int] = []
self._has_debug_cursor = False
def __repr__(self) -> str:
return f'({self.tag})'
@property
def tag(self) -> str:
return self._tag
@tag.setter
def tag(self, tag: str) -> None:
self._tag = tag
@property
def scope(self) -> list[str]:
return self._scope
@scope.setter
def scope(self, scope: list[str]) -> None:
self._scope = scope
@property
def shape(self) -> Polygon: # pragma: no cover
pos_x, pos_y = self.pos
delta = self.width - self.shape_width
if delta > 0:
points = []
for p in self.shape_points:
x, y = p
if x < self.shape_width / 2:
points.append((x - delta//2, y))
elif x > self.shape_width / 2:
points.append((x + delta//2, y))
else:
points.append((x, y))
else:
points = self.shape_points.copy()
return Polygon(list(map(lambda p: (p[0] + pos_x, p[1] + pos_y), points)))
@property
def width(self) -> int:
label_width, _ = dpg.get_text_size(self.label)
return int(max(self.shape_width, label_width + 40))
@property
@abstractmethod
def shape_width(self) -> int:
pass
@property
@abstractmethod
def shape_height(self) -> int:
pass
@property
def pos(self) -> tuple[int, int]:
return self._pos
@pos.setter
def pos(self, pos: tuple[int, int]) -> None:
self._pos = pos
@property
def bounds(self) -> tuple[int, int, int, int]:
result: tuple[int, int, int, int] = self.shape.bounds
return result
@property
@abstractmethod
def raw_in_points(self) -> list[tuple[float, float]]:
pass
@property
@abstractmethod
def raw_out_points(self) -> list[tuple[float, float]]:
pass
@property
def in_points(self) -> list[tuple[float, float]]:
pos_x, pos_y = self.pos
return list(map(lambda p: (p[0] + pos_x, p[1] + pos_y), self.raw_in_points))
@property
def out_points(self) -> list[tuple[float, float]]:
pos_x, pos_y = self.pos
return list(map(lambda p: (p[0] + pos_x, p[1] + pos_y), self.raw_out_points))
@property
def lines(self) -> list[int]:
return self._lines
@lines.setter
def lines(self, lines: list[int]) -> None:
self._lines = lines
@property
def has_debug_cursor(self) -> bool:
return self._has_debug_cursor
@has_debug_cursor.setter
def has_debug_cursor(self, has_debug_cursor: bool) -> None:
self._has_debug_cursor = has_debug_cursor
@property
@abstractmethod
def color(self) -> tuple[int, int, int]:
pass
@property
@abstractmethod
def shape_points(self) -> list[tuple[float, float]]:
pass
@property
@abstractmethod
def label(self) -> str:
pass
@property
def connections(self) -> list[Connection]:
return self._connections
@connections.setter
def connections(self, connections: list[Connection]) -> None:
self._connections = connections
@property
@abstractmethod
def is_initialized(self) -> bool:
pass
@property
def comment(self) -> str:
return self._comment
@comment.setter
def comment(self, comment: str) -> None:
self._comment = comment
@property
def break_point(self) -> bool:
return self._break_point
@break_point.setter
def break_point(self, break_point: bool) -> None:
self._break_point = break_point
@property
def is_comment(self) -> bool:
return self._is_comment
@is_comment.setter
def is_comment(self, is_comment: bool) -> None:
self._is_comment = is_comment
def get_disabled_inherited(self, flowchart: Optional[Flowchart]) -> bool:
if not flowchart:
return False
containing_node = flowchart.find_containing_node(self)
if not containing_node:
return False
return containing_node.is_comment or containing_node.get_disabled_inherited(flowchart)
def get_left_x(self) -> int:
return self.shape_width//2 - self.width//2
def get_right_x(self) -> int:
return (self.width + self.shape_width)//2
def find_connection(self, index: int) -> Optional[Connection]:
return next(filter(lambda c: c is not None and c.src_ind == index, self.connections), None)
def draw(self,
flowchart: Flowchart,
mouse_pos: Optional[tuple[int, int]],
is_selected: bool = False) -> None: # pragma: no cover
color = (150, 150, 150) if self.is_comment or self.get_disabled_inherited(flowchart) else self.color
pos_x, pos_y = self.pos
with dpg.draw_node(
tag=self.tag,
parent=FLOWCHART_TAG):
text_color = theme_colors[(dpg.mvThemeCol_Text, 0)]
thickness = 3 if is_selected else 2 if self.is_hovered(
mouse_pos) else 1
dpg.draw_polygon(list(self.shape.exterior.coords),
fill=color)
dpg.draw_polygon(list(self.shape.exterior.coords),
color=(255, 0, 0) if self.break_point else text_color,
thickness=thickness)
text_width, text_height = dpg.get_text_size(self.label)
if self.__class__.__name__ == 'DoWhileLoop':
dpg.draw_circle((pos_x + 75, pos_y + 25), 25, fill=self.color)
dpg.draw_circle((pos_x + 75, pos_y + 25), 25, thickness=2, color=text_color)
dpg.draw_text((pos_x + self.shape_width / 2 - text_width / 2,
pos_y + self.shape_height / 2 + 50 - text_height / 2),
self.label, color=(0, 0, 0), size=18)
else:
dpg.draw_text((pos_x + self.shape_width / 2 - text_width / 2,
pos_y + self.shape_height / 2 - text_height / 2),
self.label, color=(0, 0, 0), size=18)
if self.has_debug_cursor:
cursor_pos = self.pos
cursor_pos_x, cursor_pos_y = cursor_pos
dpg.draw_polygon([
cursor_pos,
(cursor_pos_x - 15, cursor_pos_y + 15),
(cursor_pos_x - 15, cursor_pos_y + 5),
(cursor_pos_x - 30, cursor_pos_y + 5),
(cursor_pos_x - 30, cursor_pos_y - 5),
(cursor_pos_x - 15, cursor_pos_y - 5),
(cursor_pos_x - 15, cursor_pos_y - 15),
cursor_pos
],
fill=(0, 255, 0))
dpg.draw_polygon([
cursor_pos,
(cursor_pos_x - 15, cursor_pos_y + 15),
(cursor_pos_x - 15, cursor_pos_y + 5),
(cursor_pos_x - 30, cursor_pos_y + 5),
(cursor_pos_x - 30, cursor_pos_y - 5),
(cursor_pos_x - 15, cursor_pos_y - 5),
(cursor_pos_x - 15, cursor_pos_y - 15),
cursor_pos
],
color=text_color)
for connection in self.connections:
connection.draw(self)
def redraw(self, flowchart: Flowchart, mouse_pos: Optional[tuple[int, int]], selected_nodes: list[Node]) -> None:
'''Deletes the node and draws a new version of it.'''
self.delete()
self.draw(flowchart, mouse_pos, self in selected_nodes)
def is_hovered(self, mouse_pos: Union[tuple[int, int], None]) -> bool:
if not mouse_pos:
return False
point = Point(*mouse_pos)
result: bool = self.shape.contains(point)
return result
def has_nested_nodes(self) -> bool:
return False
def delete(self) -> None: # pragma: no cover
if dpg.does_item_exist(self.tag):
dpg.delete_item(self.tag) | PypiClean |
/MailerLiteSDK-0.0.5.tar.gz/MailerLiteSDK-0.0.5/README.md | This is a very early project for mailer lite
# USAGE
step 1:
Create a connect with your MailerLite API Key
```python
from MailerLiteSDK import MailerClient
mailer = MailerClient(api_key=os.getenv('MAILER_API_KEY'))
```
Step 2:
Let user be subscriber first
```python
mailer.subscribe(email=sample_email)
>>>>>>> ad4c11537def6cf60f845ec13ab23e3c9bb74b9c
```
### REF
api document: https://developers.mailerlite.com/reference#update-subscriber
### UPLOAD TO PYPI
```
# change setup.py version
python setup.py sdist
twine upload -r pypi dist/* --verbose
``` | PypiClean |
/Assimulo-3.0.tar.gz/Assimulo-3.0/assimulo/solvers/dasp3.py |
# Copyright (C) 2011 Modelon AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as N
import scipy as S
import scipy.linalg as LIN
from assimulo.problem import SingPerturbed_Problem
from assimulo.exception import *
from assimulo.ode import *
from assimulo.explicit_ode import Explicit_ODE
try:
from assimulo.lib import dasp3dp
except ImportError:
pass
class DASP3ODE(Explicit_ODE):
"""
DASP3 Solver by Gustaf Söderlind (1980-10-22). Originally published
in,::
DASP3 - A Program for the Numerical Integration of Partitioned:
Stiff Ode:s and Differential-Algebraic Systems.
By, Gustaf Söderlind, Department of Numerical Analysis, and
Computing Science, The Royal Institute of Technology, 1980.
Stockholm, Sweden.
DASP3 solves system on the form,
.. math::
\\frac{\mathrm{d}y}{\mathrm{d}t} &= f(t,y,z) \;\;\; \\text{(N equations)} \\\\
\\varepsilon\\frac{\mathrm{d}z}{\mathrm{d}t} &= G(t,y,z)\;\;\; \\text{(M equations)}
If is assumed that the first system is non-stiff and that
the stiffness of the second system is due to the parameter
epsilon, possibly a diagonal matrix.
"""
def __init__(self, problem):
"""
Initiates the solver.
Parameters::
problem
- The problem to be solved. Should be an instance
of the 'Explicit_Problem' class.
"""
Explicit_ODE.__init__(self, problem) #Calls the base class
if not isinstance(problem, SingPerturbed_Problem):
raise Explicit_ODE_Exception('The problem needs to be a subclass of a SingPerturbed_Problem.')
self.n=self.problem.n
self.m=self.problem.m
# Set initial values
self.wsy=N.empty((10*self.n,))
self.wsy[:self.n]=self.problem.yy0
self.wsz=N.empty((max(9*self.m,1),)) # array must be at least 1 element long
self.wsz[:self.m]=self.problem.zz0
# - Default values
self.options["atol"] = 1.0e-6*N.ones(self.problem_info["dim"]) #Absolute tolerance
self.options["rtol"] = 1.0e-6 #Relative tolerance
self.statistics.add_key("nyder", "Number of slow function evaluations (Y)")
self.statistics.add_key("nzder", "Number of fast function evaluations (Z)")
def initialize(self):
#Reset statistics
self.statistics.reset()
self._tlist = []
self._ylist = []
def _solout(self, t, wsy, wsz, n, m, jstop):
"""
This method is called after every successful step taken by DASP3
"""
self._tlist.append(t)
self._ylist.append(N.hstack((wsy[:n],wsz[:m])))
if self._opts["report_continuously"]:
initialize_flag = self.report_solution(t, N.hstack((wsy[:n],wsz[:m])), self._opts)
if initialize_flag:
jstop = -1
else:
self._tlist.append(t)
self._ylist.append(N.hstack((wsy[:n],wsz[:m])))
return jstop
def integrate(self, t, y, tf, opts):
atol=self.options["atol"]
tol=self.options["rtol"]
absrel=atol/tol
m = self.problem.m
n = self.problem.n
a = N.empty((m,m))
w = N.empty((m,m))
slu= N.empty((2*m,))
ips= N.empty((m,),'int32')
ind = N.empty((2*m,),'int32')
eq= N.empty((m,),'bool')
wght=N.ones((m+n,))
#Store the opts
self._opts = opts
t,lflag=dasp3dp.dasp3(self.problem.rhs1,self.problem.rhs2,self._solout,t,tf,self.wsy,self.wsz,n,m,tol,
absrel,wght,self.problem.eps,a,w,slu,ips,eq,ind)
#Checking return
if lflag == 0:
flag = ID_PY_COMPLETE
else:
raise Exception("DASP3 failed with flag %d"%lflag)
#Retrieving statistics
self.statistics["nsteps"] += dasp3dp.COUNTS.NSTEP
self.statistics["nyder"] += dasp3dp.COUNTS.NYDER
self.statistics["nzder"] += dasp3dp.COUNTS.NZDER
self.statistics["nerrfails"] += dasp3dp.COUNTS.NREJ
self.statistics["nlus"] += 0
return flag, self._tlist, self._ylist
def print_statistics(self, verbose=NORMAL):
"""
Prints the run-time statistics for the problem.
"""
self.log_message('Final Run Statistics: %s \n' % self.problem.name, verbose)
self.log_message('\nSolver options:\n', verbose)
self.log_message(' Solver : DASP3 ', verbose)
self.log_message(' Tolerances (absolute) : ' + str(self._compact_atol()), verbose)
self.log_message(' Tolerances (relative) : ' + str(self.options["rtol"]), verbose)
self.log_message('', verbose)
def _set_atol(self,atol):
self.options["atol"] = N.array(atol,dtype=N.float) if len(N.array(atol,dtype=N.float).shape)>0 else N.array([atol],dtype=N.float)
if len(self.options["atol"]) == 1:
self.options["atol"] = self.options["atol"]*N.ones(self.problem_info["dim"])
elif len(self.options["atol"]) != self.problem_info["dim"]:
raise DASP3_Exception("atol must be of length one or same as the dimension of the problem.")
def _get_atol(self):
"""
Defines the absolute tolerance(s) that is to be used by the solver.
Can be set differently for each variable.
Parameters::
atol
- Default '1.0e-6'.
- Should be a positive float or a numpy vector
of floats.
Example:
atol = [1.0e-4, 1.0e-6]
"""
return self.options["atol"]
atol=property(_get_atol,_set_atol)
def _set_rtol(self,rtol):
try:
self.options["rtol"] = float(rtol)
except (ValueError, TypeError):
raise DASP3_Exception('Relative tolerance must be a (scalar) float.')
if self.options["rtol"] <= 0.0:
raise DASP3_Exception('Relative tolerance must be a positive (scalar) float.')
def _get_rtol(self):
"""
Defines the relative tolerance that is to be used by the solver.
Parameters::
rtol
- Default '1.0e-6'.
- Should be a positive float.
Example:
rtol = 1.0e-4
"""
return self.options["rtol"]
rtol=property(_get_rtol,_set_rtol) | PypiClean |
/FRB-1.1.4.tar.gz/FRB-1.1.4/fred/__init__.py | from fred.clients.categories import CategoriesClient
from fred.clients.releases import ReleasesClient
from fred.clients.tags import TagsClient
from fred.clients.sources import SourcesClient
from fred.clients.eseries import ESeriesClient
import fred.config as c
import weakref
## Establish Federal Reserve Economic Data (Fred) wrapper for Python
class Fred(object):
"""
Fred client. Provides a straightforward mapping from Python to FRED REST endpoints.
The instance has attributes ``cateogry``, ``release``, ``series``, ``tag``
and ``source`` that provide access to instances of
:class:`fred.clients.categories.CategoriesClient`,
:class:`fred.clients.releases.ReleasesClient`,
:class:`fred.clients.eseries.ESeriesClient`,
:class:`fred.clients.tags.TagsClient` and
:class:`fred.clients.sources.SourcesClient` respectively. This is the
preferred (and only supported) way to get access to those classes and their
methods.
:arg str api_key: 32 character alpha-numeric lowercase string. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
"""
def __init__(self,api_key=c.api_key,response_type=c.response_type):
## Set root URL
self.url_root = 'https://api.stlouisfed.org/fred'
## Set default API key
self.api_key = api_key if api_key else None
## Set default file type
self.response_type = response_type if response_type else None
## Initiate clients
self.category = CategoriesClient(weakref.proxy(self),self.api_key,self.url_root,self.response_type)
self.release = ReleasesClient(weakref.proxy(self),self.api_key,self.url_root,self.response_type)
self.series = ESeriesClient(weakref.proxy(self),self.api_key,self.url_root,self.response_type)
self.tag = TagsClient(weakref.proxy(self),self.api_key,self.url_root,self.response_type)
self.source = SourcesClient(weakref.proxy(self),self.api_key,self.url_root,self.response_type) | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/database/langlib.py | from __future__ import absolute_import
import sys
import os
from os.path import (join, dirname, exists, expanduser, splitext, basename,
split, abspath, isabs, isdir, isfile, normpath)
import threading
import time
from glob import glob
from pprint import pprint, pformat
import logging
from io import BytesIO
import codecs
import copy
# import apsw # for SQLite symbol database
import ciElementTree as ET
from codeintel2.common import *
from codeintel2 import util
from codeintel2.database.util import rmdir
from codeintel2.database.langlibbase import LangDirsLibBase
import six
#---- globals
log = logging.getLogger("codeintel.db")
#log.setLevel(logging.DEBUG)
#---- Database zone and lib implementations
class LangDirsLib(LangDirsLibBase):
"""A zone providing a view into an ordered list of dirs in a
db/$lang/... area of the db.
These are dished out via Database.get_lang_lib(), which indirectly
then is dished out by the LangZone.get_lib(). Mostly this is just a
view on the LangZone singleton for this particular language.
Dev Notes:
- The goal is to provide quick has_blob() and get_blob() -- i.e.
some caching is involved (if 'foo' referred to
'some/path/to/foo.py' a minute ago then it still does). As well,
scanning/loading is done automatically as necessary. For example,
if a request for Perl blob 'Bar' is made but there is no 'Bar' in
the database yet, this code looks for a 'Bar.pm' on the file
system and will scan it, load it and return the blob for it.
"""
def __init__(self, lang_zone, lock, lang, name, dirs):
LangDirsLibBase.__init__(self)
self.lang_zone = lang_zone
self._lock = lock
self.mgr = lang_zone.mgr
self.lang = lang
self.name = name
self.dirs = dirs
self.import_handler \
= self.mgr.citadel.import_handler_from_lang(self.lang)
self._blob_imports_from_prefix_cache = {}
self._importables_from_dir_cache = {}
# We keep a "weak" merged cache of blobname lookup for all dirs
# in this zone -- where "weak" means that we verify a hit by
# checking the current real blob_index for that dir (which may
# have changed). This caching slows down lookup for single-dir
# LangDirsZones, but should scale better for LangDirsZones with
# many dirs. (TODO-PERF: test this assertion.)
self._dir_and_blobbase_from_blobname = {}
def __repr__(self):
return "<%s %s>" % (self.lang, self.name)
def _acquire_lock(self):
self._lock.acquire()
def _release_lock(self):
self._lock.release()
def has_blob(self, blobname, ctlr=None):
dbsubpath = self._dbsubpath_from_blobname(blobname, ctlr=ctlr)
return dbsubpath is not None
def has_blob_in_db(self, blobname, ctlr=None):
"""Return true if the blobname is in the database.
Typically this method is only used for debugging and .has_blob()
is what you want.
"""
dbsubpath = self._dbsubpath_from_blobname(
blobname, ctlr=ctlr, only_look_in_db=True)
return dbsubpath is not None
def get_blob(self, blobname, ctlr=None):
self._acquire_lock()
try:
dbsubpath = self._dbsubpath_from_blobname(blobname, ctlr=ctlr)
if dbsubpath is not None:
return self.lang_zone.load_blob(dbsubpath)
else:
return None
finally:
self._release_lock()
def get_blob_imports(self, prefix):
"""Return the set of imports under the given prefix.
"prefix" is a tuple of import name parts. E.g. ("xml", "sax")
for "import xml.sax." in Python. Or ("XML", "Parser") for
"use XML::Parser::" in Perl.
See description in database.py docstring for details.
"""
self._acquire_lock()
try:
if prefix not in self._blob_imports_from_prefix_cache:
if prefix:
for dir in self.dirs:
importables = self._importables_from_dir(dir)
if prefix[0] in importables:
sub_importables = self._importables_from_dir(
join(dir, *prefix))
imports = set(
(name, is_dir_import)
for name, (_, _, is_dir_import)
in sub_importables.items()
)
break
else:
imports = set()
else:
imports = set()
for dir in self.dirs:
importables = self._importables_from_dir(dir)
imports.update(
(name, is_dir_import)
for name, (_, _, is_dir_import)
in importables.items()
)
self._blob_imports_from_prefix_cache[prefix] = imports
return self._blob_imports_from_prefix_cache[prefix]
finally:
self._release_lock()
def blobs_with_basename(self, basename, ctlr=None):
"""Return all blobs that match the given base path.
I.e. a filename lookup across all files in the dirs of this lib.
"basename" is a string, e.g. 'Http.js'
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
A "blob" is a global scope-tag hit in all of the blobs for the execution
set buffers.
Returns the empty list if no hits.
"""
self.ensure_all_dirs_scanned(ctlr=ctlr)
blobs = []
# we can't use self.get_blob because that only returns one answer; we
# we need all of them.
self._acquire_lock()
try:
for dir in self.dirs:
dbfile_from_blobname = self.lang_zone.dfb_from_dir(dir, {})
blobbase = dbfile_from_blobname.get(basename)
if blobbase is not None:
dhash = self.lang_zone.dhash_from_dir(dir)
dbsubpath = join(dhash, blobbase)
blobs.append(self.lang_zone.load_blob(dbsubpath))
finally:
self._release_lock()
return blobs
def hits_from_lpath(self, lpath, ctlr=None, curr_buf=None):
"""Return all hits of the given lookup path.
I.e. a symbol table lookup across all files in the dirs of this
lib.
"lpath" is a lookup name list, e.g. ['Casper', 'Logging']
or ['dojo', 'animation'].
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
"curr_buf" (optional), if specified, is the current buf for
which this query is being made. Hits from it should be
skipped (i.e. don't bother searching it).
A "hit" is (<CIX node>, <scope-ref>). Each one represent a
scope-tag or variable-tag hit in all of the blobs for the
execution set buffers.
Returns the empty list if no hits.
"""
assert isinstance(lpath, tuple) # common mistake to pass in a string
# Need to have (at least once) scanned all importables.
# Responsibility for ensuring the scan data is *up-to-date*
# is elsewhere.
self.ensure_all_dirs_scanned(ctlr=ctlr)
if curr_buf:
curr_blobname = curr_buf.blob_from_lang.get(self.lang, {}).get("name")
curr_buf_dir = dirname(curr_buf.path)
# Naive implementation (no caching)
hits = []
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("ctlr aborted")
break
toplevelname_index = self.lang_zone.load_index(
dir, "toplevelname_index", {})
for blobname in toplevelname_index.get_blobnames(lpath[0], ()):
if curr_buf and curr_buf_dir == dir and blobname == curr_blobname:
continue
blob = self.get_blob(blobname, ctlr=ctlr)
try:
elem = blob
for p in lpath:
#LIMITATION: *Imported* names at each scope are
# not being included here. This is fine while we
# just care about JavaScript.
if curr_buf:
if "__file_local__" in elem.get("attributes", "").split():
# this is a file-local element in a different blob,
# don't look at it
raise KeyError
elem = elem.names[p]
except KeyError:
continue
hits.append( (elem, (blob, list(lpath[:-1]))) )
return hits
def toplevel_cplns(self, prefix=None, ilk=None, ctlr=None):
"""Return completion info for all top-level names matching the
given prefix and ilk in all blobs in this lib.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
Returns a list of 2-tuples: (<ilk>, <name>).
Note: the list is not sorted, because often some special sorting
is required for the different completion evaluators that might use
this API.
"""
self.ensure_all_dirs_scanned(ctlr=ctlr)
cplns = []
# Naive implementation (no caching)
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("ctlr aborted")
break
try:
toplevelname_index = self.lang_zone.load_index(
dir, "toplevelname_index")
except EnvironmentError:
# No toplevelname_index for this dir likely indicates that
# there weren't any files of the current lang in this dir.
continue
cplns += toplevelname_index.toplevel_cplns(prefix=prefix, ilk=ilk)
return cplns
def _importables_from_dir(self, dir):
if dir not in self._importables_from_dir_cache:
self._importables_from_dir_cache[dir] \
= self.import_handler.find_importables_in_dir(dir)
return self._importables_from_dir_cache[dir]
def _dbsubpath_from_blobname(self, blobname, ctlr=None,
only_look_in_db=False):
"""Return the subpath to the dbfile for the given blobname,
or None if not found.
Remember that this is complicated by possible multi-level
imports. E.g. "import foo.bar" or "import foo" where 'foo'
refers to 'foo/__init__.py'.
"""
assert blobname is not None, "'blobname' cannot be None"
lang_zone = self.lang_zone
self._acquire_lock()
try:
# Use our weak cache to try to return quickly.
if blobname in self._dir_and_blobbase_from_blobname:
blobdir, blobbase \
= self._dir_and_blobbase_from_blobname[blobname]
# Check it. The actual info for that dir may have changed.
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir)
if blobbase in dbfile_from_blobname:
log.debug("have blob '%s' in '%s'? yes (in weak cache)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
# Drop from weak cache.
del self._dir_and_blobbase_from_blobname[blobname]
# Brute force: look in each dir.
blobparts = blobname.split(self.import_handler.sep)
blobbase = blobparts[-1]
for dir in self.dirs:
if ctlr and ctlr.is_aborted():
log.debug("aborting search for blob '%s' on %s: "
"ctlr aborted", blobname, self)
return None
# Is the blob in 'blobdir' (i.e. a non-multi-level import
# that has been scanned already).
blobdir = join(dir, *blobparts[:-1])
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if self.lang == "Perl":
# Perl uses the full blob name - not just the blob base,
# see bug 89106 for details.
if blobname in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobname)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobname])
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
importables = self._importables_from_dir(blobdir)
# 'importables' look like, for Python:
# {'foo': ('foo.py', None, False),
# 'pkg': ('pkg/__init__.py', '__init__', False)}
# for Perl:
# {'LWP': ('LWP.pm', None, True),
# 'File': (None, None, True)}
# | | | `-- is-dir-import
# | | `-- subdir-blobbase
# | `-- blobfile
# `-- blobbase
if blobbase not in importables:
continue
blobfile, subdir_blobbase, is_dir_import = importables[blobbase]
if blobfile is None:
# There isn't an actual importable file here -- just
# a dir prefix to a multidir import.
log.debug("have blob '%s' in %s? no", blobname, self)
continue
elif os.sep in blobfile:
# This is an import from a subdir. We need to get a new dbf.
blobdir = join(blobdir, dirname(blobfile))
blobfile = basename(blobfile)
blobbase = subdir_blobbase
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
# The file isn't loaded.
if not only_look_in_db:
log.debug("%s importables in '%s':\n %s", self.lang,
blobdir, importables)
log.debug("'%s' likely provided by '%s' in '%s': "
"attempting load", blobname, blobfile, blobdir)
try:
buf = self.mgr.buf_from_path(
join(blobdir, blobfile), self.lang)
except (EnvironmentError, CodeIntelError) as ex:
# This can occur if the path does not exist, such as a
# broken symlink, or we don't have permission to read
# the file, or the file does not contain text.
continue
buf.scan_if_necessary()
dbfile_from_blobname = lang_zone.dfb_from_dir(blobdir, {})
if self.lang == "Perl":
# Perl uses the full blob name - not just the blob base,
# see bug 89106 for details.
if blobname in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobname)
log.debug("have blob '%s' in '%s'? yes (in dir index)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobname])
if blobbase in dbfile_from_blobname:
self._dir_and_blobbase_from_blobname[blobname] \
= (blobdir, blobbase)
log.debug("have blob '%s' in '%s'? yes (after load)",
blobname, blobdir)
return join(lang_zone.dhash_from_dir(blobdir),
dbfile_from_blobname[blobbase])
log.debug("have blob '%s' in %s? no", blobname, self)
return None
finally:
self._release_lock()
class LangTopLevelNameIndex(object):
"""A wrapper around the plain-dictionary toplevelname_index for a
LangZone dir to provide better performance for continual updating
and some simpler access.
{ilk -> toplevelname -> blobnames}
# Problem
A 'toplevelname_index' is a merge of {blobname -> ilk -> toplevelnames}
data for all resources in its dir. As those resources are
continually re-scanned (e.g. as a file is edited in Komodo), it
would be too expensive to update this index everytime.
# Solution
Keep a list of "recent updates" and only merge them into the main
data when that buf hasn't been updated in "a while" and when needed
for saving the index. Note: Buffer *removals* are not put on-deck,
but removed immediately.
# .get_blobnames(..., ilk=None)
Originally the toplevelname_index stored {toplevelname -> blobnames}.
The per-"ilk" level was added afterwards to support occassional ilk
filtering for PHP (and possible eventually other langs).
.get_blobnames() still behaves like a {toplevelname -> blobnames}
mapping, but it provides an optional "ilk" keyword arg to limit the
results to that ilk.
# Notes on locking
This class does not guard its datastructures with locking. It is up
to the LangZone using this to guard against simultaneous access on
separate threads.
"""
def __init__(self, data=None, timeout=90):
# toplevelname_index data: {ilk -> toplevelname -> blobnames}
if data is None:
self._data = {}
else:
self._data = data
# Time (in seconds) to hold a change "on deck".
# Timed-out changes are merged on .get() and .update().
self.timeout = timeout
self._on_deck = {
# basename # the basename of the buf path
# -> [timestamp, # time of the last update
# # The dict in res_index, a.k.a. 'res_data'
# {blobname -> ilk -> toplevelnames},
# # Lazily generated pivot, a.k.a. 'res_data_pivot'
# {ilk -> toplevelname -> blobnames}
# ]
}
def __repr__(self):
num_toplevelnames = sum(len(v) for v in six.itervalues(self._data))
return ("<LangTopLevelNameIndex: %d top-level name(s), "
"%d update(s) on-deck>"
% (num_toplevelnames, len(self._on_deck)))
def merge(self):
"""Merge all on-deck changes with `self.data'."""
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
# "bft" means blobnames_from_toplevelname
for ilk, bft in six.iteritems(res_data_pivot):
data_bft = self._data.setdefault(ilk, {})
for toplevelname, blobnames in six.iteritems(bft):
if toplevelname not in data_bft:
data_bft[toplevelname] = blobnames
else:
data_bft[toplevelname].update(blobnames)
del self._on_deck[base]
def merge_expired(self, now):
"""Merge expired on-deck changes with `self.data'."""
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if now - timestamp < self.timeout:
continue
if res_data_pivot is None:
res_data_pivot = self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
# "bft" means blobnames_from_toplevelname
for ilk, bft in six.iteritems(res_data_pivot):
data_bft = self._data.setdefault(ilk, {})
for toplevelname, blobnames in six.iteritems(bft):
if toplevelname not in data_bft:
data_bft[toplevelname] = blobnames
else:
data_bft[toplevelname].update(blobnames)
del self._on_deck[base]
@property
def data(self):
self.merge()
return self._data
def update(self, base, old_res_data, new_res_data):
now = time.time()
self.remove(base, old_res_data)
self._on_deck[base] = [now, new_res_data, None]
self.merge_expired(now)
def remove(self, base, old_res_data):
if base in self._on_deck:
del self._on_deck[base]
else:
# Remove old refs from current data.
# old_res_data: {blobname -> ilk -> toplevelnames}
# self._data: {ilk -> toplevelname -> blobnames}
for blobname, toplevelnames_from_ilk in six.iteritems(old_res_data):
for ilk, toplevelnames in six.iteritems(toplevelnames_from_ilk):
for toplevelname in toplevelnames:
try:
self._data[ilk][toplevelname].remove(blobname)
except KeyError:
pass # ignore this for now, might indicate corruption
else:
if not self._data[ilk][toplevelname]:
del self._data[ilk][toplevelname]
if not self._data.get(ilk):
del self._data[ilk]
def _pivot_res_data(self, res_data):
# res_data: {blobname -> ilk -> toplevelnames}
# res_data_pivot: {ilk -> toplevelname -> blobnames}
res_data_pivot = {}
for blobname, toplevelnames_from_ilk in six.iteritems(res_data):
for ilk, toplevelnames in six.iteritems(toplevelnames_from_ilk):
pivot_bft = res_data_pivot.setdefault(ilk, {})
for toplevelname in toplevelnames:
if toplevelname not in pivot_bft:
pivot_bft[toplevelname] = set([blobname])
else:
pivot_bft[toplevelname].add(blobname)
return res_data_pivot
def toplevel_cplns(self, prefix=None, ilk=None):
"""Return completion info for all top-level names matching the
given prefix and ilk.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
Returns a list of 2-tuples: (<ilk>, <name>).
"""
self.merge_expired(time.time())
# Need to check merged and on-deck items:
cplns = []
# ...on-deck items
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._on_deck[base][2] \
= self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
if ilk is None:
for i, bft in six.iteritems(res_data_pivot):
cplns += [(i, toplevelname) for toplevelname in bft]
elif ilk in res_data_pivot:
cplns += [(ilk, toplevelname)
for toplevelname in res_data_pivot[ilk]]
# ...merged data
# self._data: {ilk -> toplevelname -> blobnames}
if ilk is None:
for i, bft in six.iteritems(self._data):
cplns += [(i, toplevelname) for toplevelname in bft]
elif ilk in self._data:
cplns += [(ilk, toplevelname)
for toplevelname in self._data[ilk]]
# Naive implementation: Instead of maintaining a separate
# 'toplevelprefix_index' (as we do for StdLibsZone and CatalogsZone)
# for now we'll just gather all results and filter on the prefix
# here. Only if this proves to be a perf issue will we add the
# complexity of another index:
# {ilk -> prefix -> toplevelnames}
if prefix is not None:
cplns = [(i, t) for i, t in cplns if t.startswith(prefix)]
return cplns
def get_blobnames(self, toplevelname, default=None, ilk=None):
"""Return the blobnames defining the given toplevelname.
If "ilk" is given then only symbols of that ilk will be considered.
If not match is found the "default" is returned.
"""
self.merge_expired(time.time())
blobnames = set()
# First check on-deck items.
for base, (timestamp, res_data,
res_data_pivot) in list(self._on_deck.items()):
if res_data_pivot is None:
res_data_pivot = self._on_deck[base][2] \
= self._pivot_res_data(res_data)
# res_data_pivot: {ilk -> toplevelname -> blobnames}
if ilk is None:
for bft in six.itervalues(res_data_pivot):
if toplevelname in bft:
blobnames.update(bft[toplevelname])
elif ilk in res_data_pivot:
if toplevelname in res_data_pivot[ilk]:
blobnames.update(res_data_pivot[ilk][toplevelname])
#TODO: Put lookup in merged data ahead of lookup in on-deck -- so
# we don't do on-deck work if not necessary.
# Then, fallback to already merged data.
# self._data: {ilk -> toplevelname -> blobnames}
if ilk is None:
for bft in six.itervalues(self._data):
if toplevelname in bft:
blobnames.update(bft[toplevelname])
elif ilk in self._data:
if toplevelname in self._data[ilk]:
blobnames.update(self._data[ilk][toplevelname])
if blobnames:
return blobnames
return default
class LangZone(object):
"""Singleton zone managing a particular db/$lang/... area.
# caching and memory control
We cache all retrieved indices and blobs and maintain their latest
access time. To try to manage memory consumption, we rely on a
bookkeeper thread (the indexer) to periodically call .cull_mem() --
which unloads cache items that have not been accessed in a while.
(TODO:
- Get the indexer to actually call .cull_mem() and .save()
periodically.
- Test that .cull_mem() actually results in the process releasing
memory.)
# robustness (TODO)
Work should be done to improve robustness.
- Collect filesystem interactions in one place.
- Rationalize OSError handling.
- Consider a journal system, if necessary/feasible. My hope is to
get away without one and rely on graceful recovery. The db does
not store critical info so can allow some loss of data (it can all
be regenerated).
"""
toplevelname_index_class = LangTopLevelNameIndex
def __init__(self, mgr, lang):
self.mgr = mgr
self.db = mgr.db
self.lang = lang
self.base_dir = join(self.db.base_dir, "db",
util.safe_lang_from_lang(lang))
self._check_lang(lang)
self._hook_handlers = self.mgr.hook_handlers_from_lang(lang)
self._lock = threading.RLock()
self._dhash_from_dir_cache = {}
self._dirslib_cache = {}
# We cache the set of recent indeces and blobs in memory.
# {db-subpath: [index-object, <atime>]),
# ...}
# For example:
# {'7bce640bc48751b128af5c8bf5df8412/res_index':
# [<res-index>, 1158289000]),
# ...}
self._index_and_atime_from_dbsubpath = {}
#TODO-PERF: Use set() object for this? Compare perf.
self._is_index_dirty_from_dbsubpath = {} # set of dirty indeces
##TODO: blob caching and *use* this
#self._blob_and_atime_from_dbsubpath = {}
#XXX Need a 'dirty-set' for blobs? No, because currently
# .update_buf_data() saves blob changes to disk immediately. Not
# sure that is best for perf. Definitely not ideal for the
# "editset".
def __repr__(self):
return "<%s lang db>" % self.lang
def _acquire_lock(self):
self._lock.acquire()
def _release_lock(self):
self._lock.release()
def _check_lang(self, lang):
"""Ensure that the given lang matches case exactly with the lang
in the db. If this invariant is broken, then weird things with
caching can result.
"""
if exists(self.base_dir):
lang_path = join(self.base_dir, "lang")
try:
fin = open(lang_path, 'r')
except EnvironmentError as ex:
self.db.corruption("LangZone._check_lang",
"could not open `%s': %s" % (lang_path, ex),
"recover")
fin = open(lang_path, 'w')
try:
fin.write(lang)
finally:
fin.close()
else:
try:
lang_on_disk = fin.read().strip()
finally:
fin.close()
assert lang_on_disk == lang
#TODO: If Database.dhash_from_dir() grows caching, then this
# shouldn't bother.
def dhash_from_dir(self, dir):
if dir not in self._dhash_from_dir_cache:
self._dhash_from_dir_cache[dir] = self.db.dhash_from_dir(dir)
return self._dhash_from_dir_cache[dir]
def dfb_from_dir(self, dir, default=None):
"""Get the {blobname -> dbfile} mapping index for the given dir.
'dfb' stands for 'dbfile_from_blobname'.
This must be called with the lock held.
"""
return self.load_index(dir, "blob_index", default)
def get_buf_scan_time(self, buf):
#TODO Canonicalize path (or assert that it is canonicalized)
self._acquire_lock()
try:
dir, base = split(buf.path)
res_index = self.load_index(dir, "res_index", {})
if base not in res_index:
return None
return res_index[base][0]
finally:
self._release_lock()
def get_buf_data(self, buf):
#TODO Canonicalize path (or assert that it is canonicalized)
# Should have a Resource object that we pass around that
# handles all of this.
self._acquire_lock()
try:
dir, base = split(buf.path)
res_index = self.load_index(dir, "res_index", {})
if base not in res_index:
raise NotFoundInDatabase("%s buffer '%s' not found in database"
% (buf.lang, buf.path))
scan_time, scan_error, res_data = res_index[base]
blob_from_lang = {}
if res_data:
try:
dbfile_from_blobname = self.dfb_from_dir(dir)
except EnvironmentError as ex:
# DB corruption will be noted in remove_buf_data()
self.remove_buf_data(buf)
raise NotFoundInDatabase("%s buffer '%s' not found in database"
% (buf.lang, buf.path))
dhash = self.dhash_from_dir(dir)
for blobname in res_data:
dbsubpath = join(dhash, dbfile_from_blobname[blobname])
try:
blob = self.load_blob(dbsubpath)
except ET.XMLParserError as ex:
self.db.corruption("LangZone.get_buf_data",
"could not parse dbfile for '%s' blob: %s"\
% (blobname, ex),
"recover")
self.remove_buf_data(buf)
raise NotFoundInDatabase(
"`%s' buffer `%s' blob was corrupted in database"
% (buf.path, blobname))
except EnvironmentError as ex:
self.db.corruption("LangZone.get_buf_data",
"could not read dbfile for '%s' blob: %s"\
% (blobname, ex),
"recover")
self.remove_buf_data(buf)
raise NotFoundInDatabase(
"`%s' buffer `%s' blob not found in database"
% (buf.path, blobname))
lang = blob.get("lang")
assert lang is not None
blob_from_lang[lang] = blob
return scan_time, scan_error, blob_from_lang
finally:
self._release_lock()
def remove_path(self, path):
"""Remove the given resource from the database."""
#TODO Canonicalize path (or assert that it is canonicalized)
# Should have a Resource object that we pass around that
# handles all of this.
self._acquire_lock()
try:
dir, base = split(path)
res_index = self.load_index(dir, "res_index", {})
try:
scan_time, scan_error, res_data = res_index[base]
except KeyError:
# This resource isn't loaded in the db. Nothing to remove.
return
try:
blob_index = self.load_index(dir, "blob_index")
except EnvironmentError as ex:
self.db.corruption("LangZone.remove_path",
"could not read blob_index for '%s' dir: %s" % (dir, ex),
"recover")
blob_index = {}
is_hits_from_lpath_lang = self.lang in self.db.import_everything_langs
if is_hits_from_lpath_lang:
try:
toplevelname_index = self.load_index(dir, "toplevelname_index")
except EnvironmentError as ex:
self.db.corruption("LangZone.remove_path",
"could not read toplevelname_index for '%s' dir: %s"
% (dir, ex),
"recover")
toplevelname_index = self.toplevelname_index_class()
dhash = self.dhash_from_dir(dir)
del res_index[base]
for blobname in res_data:
try:
dbfile = blob_index[blobname]
except KeyError:
blob_index_path = join(dhash, "blob_index")
self.db.corruption("LangZone.remove_path",
"'%s' blob not in '%s'" \
% (blobname, blob_index_path),
"ignore")
continue
del blob_index[blobname]
for path in glob(join(self.base_dir, dhash, dbfile+".*")):
log.debug("fs-write: remove %s blob file '%s/%s'",
self.lang, dhash, basename(path))
os.remove(path)
if is_hits_from_lpath_lang:
toplevelname_index.remove(base, res_data)
self.changed_index(dir, "res_index")
self.changed_index(dir, "blob_index")
if is_hits_from_lpath_lang:
self.changed_index(dir, "toplevelname_index")
finally:
self._release_lock()
#TODO Database.clean() should remove dirs that have no
# blob_index entries.
def remove_buf_data(self, buf):
"""Remove the given buffer from the database."""
self.remove_path(buf.path)
def _get_symbols_db_conn(self):
"""
Returns a connection to the SQLite3 database that houses known symbols.
If the database does not exist yet, create it.
This database is used for quick "Go to Symbol"-type lookups and is
located in the root of the "codeintel/" directory.
@return apsw.Connection
"""
try:
apsw
except NameError:
return None
symbols_db = join(self.db.base_dir, "symbols.db")
exists = isfile(symbols_db)
try:
# apsw.Connection() requires a unicode filename, which is not
# always the case -- particularly on Windows.
if isinstance(symbols_db, six.binary_type):
symbols_db = symbols_db.decode(sys.getfilesystemencoding())
conn = apsw.Connection(symbols_db)
except:
log.exception("Unable to create/open symbols.db")
return None
cursor = conn.cursor()
if not exists:
cursor.execute("""
CREATE TABLE symbols ('symbol_id' INTEGER PRIMARY KEY,
'symbol' TEXT NOT NULL,
'type' TEXT NOT NULL,
'filepath' TEXT NOT NULL,
'lineno' INTEGER NOT NULL,
'lang' TEXT NOT NULL,
'parent' TEXT)
""")
cursor.execute("CREATE UNIQUE INDEX 'symbol_id' on symbols (symbol_id ASC)")
try:
cursor.execute("COMMIT")
except apsw.SQLError:
log.warn("Unable to create symbols.db: possibly exists?")
return None
return conn
def _update_symbols_db(self, conn, action, blob):
"""
Update the symbols in the SQLite3 symbol database contained in the given
blob's filename.
@param conn The SQLite3 database connection returned by
`self._get_symbols_db_conn()`.
@param action The database action to perform, either "add", "remove", or
"update".
@param blob The blob that contains the file whose symbols are to be
stored in the database.
"""
if not conn:
return
cursor = conn.cursor()
cursor.execute("BEGIN")
# Delete any symbols in the blob's file.
if action == "update" or action == "remove":
cursor.execute("DELETE FROM symbols WHERE filepath=?",
(blob.get("src"),))
# Add all of blob's symbols recursively.
if action == "add" or action == "update":
def insert_all(elem, parent=None):
"""
Inserts the given element (assuming it is a valid symbol) and
all of its valid children into the symbol database.
@param elem The element to insert.
@param parent Optional parent of `elem` for context.
"""
symbol = elem.get("name", "")
type = elem.get("ilk", "")
src = blob.get("src", "")
lineno = elem.get("line", "")
lang = blob.get("lang")
valid = symbol and type and src and lineno and \
type != 'argument'
if valid:
cursor.execute("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?, ?)",
(None, symbol, type, src, lineno, lang,
parent and parent.get("name", "") or None))
for n in elem:
insert_all(n, valid and elem)
insert_all(blob)
try:
cursor.execute("commit")
except apsw.SQLError:
log.error("Unable to save to symbols db.")
def _close_symbols_db_conn(self, conn):
"""
Closes the connection to the SQLite3 symbol database.
@param conn The SQLite3 database connection returned by
`self._get_symbols_db_conn()`.
"""
if conn:
conn.close()
def update_buf_data(self, buf, scan_tree, scan_time, scan_error,
skip_scan_time_check=False):
"""Update this LangZone with the buffer data.
@param buf {CitadelBuffer} the buffer whose data is being added
to the database.
@param scan_tree {ciElementTree} the CIX scan data. Might be None
if there was an early scanning failure.
@param scan_time {timestamp} the time of the scan, typically the
mtime of the file
@param scan_error {str} an error string if scanning failed, or
None if it was succesful.
@param skip_scan_time_check {boolean} (default False) is a
boolean indicating if the buffer data should be updated even
if `scan_time` is <= that in the database.
"""
self._acquire_lock()
try:
#TODO: Canonicalize path (or assert that it is canonicalized)
dir, base = split(buf.path)
# Get the current data, if any.
res_index = self.load_index(dir, "res_index", {})
res_index_has_changed = False
blob_index = self.load_index(dir, "blob_index", {})
blob_index_has_changed = False
is_hits_from_lpath_lang = self.lang in self.db.import_everything_langs
if is_hits_from_lpath_lang:
#TODO: Not sure {} for a default is correct here.
toplevelname_index = self.load_index(dir, "toplevelname_index", {})
toplevelname_index_has_changed = False
try:
(old_scan_time, old_scan_error, old_res_data) = res_index[base]
except KeyError: # adding a new entry
(old_scan_time, old_scan_error, old_res_data) = None, None, {}
else: # updating an existing entry
if not skip_scan_time_check and scan_time is not None \
and scan_time <= old_scan_time:
log.debug("skipping db update for '%s': %s < %s and "
"no 'skip_scan_time_check' option",
base, scan_time, old_scan_time)
return
log.debug("update from %s buf '%s'", buf.lang, buf.path)
# Parse the tree and get the list of blobnames.
# res_data: {blobname -> ilk -> toplevelnames}
new_res_data = {}
new_blobnames_and_blobs = []
if scan_tree:
for blob in scan_tree[0]:
lang = blob.get("lang")
assert blob.get("lang") == self.lang, "'%s' != '%s' (blob %r)" % (blob.get("lang"), self.lang, blob)
blobname = blob.get("name")
toplevelnames_from_ilk = new_res_data.setdefault(blobname, {})
for toplevelname, elem in six.iteritems(blob.names):
if "__file_local__" in elem.get("attributes", "").split():
# don't put file local things in toplevel names
continue
ilk = elem.get("ilk") or elem.tag
if ilk not in toplevelnames_from_ilk:
toplevelnames_from_ilk[ilk] = set([toplevelname])
else:
toplevelnames_from_ilk[ilk].add(toplevelname)
new_blobnames_and_blobs.append((blobname, blob))
# Determine necessary changes to res_index.
if scan_error:
if (scan_time != old_scan_time
or scan_error != old_scan_error):
res_index[base] = (scan_time, scan_error,
old_res_data)
res_index_has_changed = True
log.error("db scan error on '%s': %s", buf.path, scan_error)
else:
# Only consider new blobs if there wasn't a scan error.
# I.e., we want to preserve the last good scan info.
if (scan_time != old_scan_time
or scan_error != old_scan_error
or new_res_data != old_res_data):
res_index[base] = (scan_time, scan_error,
new_res_data)
res_index_has_changed = True
if is_hits_from_lpath_lang:
if new_res_data != old_res_data:
toplevelname_index.update(base,
old_res_data, new_res_data)
toplevelname_index_has_changed = True
# Determine necessary changes to blob_index and the
# dbfiles and then make them.
dbfile_changes = []
for blobname, blob in new_blobnames_and_blobs:
if blobname in old_res_data:
dbfile_changes.append(("update", blobname, blob))
else:
dbfile_changes.append(("add", blobname, blob))
for blobname in old_res_data:
if blobname not in new_res_data:
dbfile_changes.append(("remove", blobname, None))
dhash = self.dhash_from_dir(dir)
conn = self._get_symbols_db_conn()
for action, blobname, blob in dbfile_changes:
if action == "add":
dbfile = self.db.bhash_from_blob_info(
buf.path, self.lang, blobname)
blob_index[blobname] = dbfile
blob_index_has_changed = True
dbdir = join(self.base_dir, dhash)
if not exists(dbdir):
self._mk_dbdir(dbdir, dir)
#XXX What to do on write failure?
log.debug("fs-write: %s blob '%s/%s'",
self.lang, dhash, dbfile)
if blob.get("src") is None:
blob.set("src", buf.path) # for defns_from_pos() support
ET.ElementTree(blob).write(join(dbdir, dbfile+".blob"))
elif action == "remove":
dbfile = blob_index[blobname]
del blob_index[blobname]
blob_index_has_changed = True
#XXX What to do on removal failure?
log.debug("fs-write: remove %s blob '%s/%s'",
self.lang, dhash, dbfile)
os.remove(join(self.base_dir, dhash, dbfile+".blob"))
elif action == "update":
# Try to only change the dbfile on disk if it is
# different.
s = BytesIO()
if blob.get("src") is None:
blob.set("src", buf.path) # for defns_from_pos() support
ET.ElementTree(blob).write(s)
new_dbfile_content = s.getvalue()
dbfile = blob_index[blobname]
dbpath = join(self.base_dir, dhash, dbfile+".blob")
# PERF: Might be nice to cache the new dbfile
# content for the next time this resource is
# updated. For files under edit this will be
# common. I.e. just for the "editset".
try:
fin = open(dbpath, 'rb')
except (OSError, IOError) as ex:
# Technically if the dbfile doesn't exist, this
# is a sign of database corruption. No matter
# though (for this blob anyway), we are about to
# replace it.
old_dbfile_content = None
else:
try:
old_dbfile_content = fin.read()
finally:
fin.close()
if new_dbfile_content != old_dbfile_content:
if not exists(dirname(dbpath)):
self._mk_dbdir(dirname(dbpath), dir)
#XXX What to do if fail to write out file?
log.debug("fs-write: %s blob '%s/%s'",
self.lang, dhash, dbfile)
fout = open(dbpath, 'wb')
try:
fout.write(new_dbfile_content)
finally:
fout.close()
self._update_symbols_db(conn, action, blob)
self._close_symbols_db_conn(conn)
if res_index_has_changed:
self.changed_index(dir, "res_index")
if blob_index_has_changed:
self.changed_index(dir, "blob_index")
if is_hits_from_lpath_lang and toplevelname_index_has_changed:
self.changed_index(dir, "toplevelname_index")
finally:
self._release_lock()
#TODO Database.clean() should remove dirs that have no
# blob_index entries.
def _mk_zone_skel(self):
log.debug("fs-write: mkdir '%s'", self.base_dir)
os.makedirs(self.base_dir)
log.debug("fs-write: create 'lang'")
fout = codecs.open(join(self.base_dir, "lang"), 'wb', 'utf-8')
try:
fout.write(self.lang)
finally:
fout.close()
def _mk_dbdir(self, dbdir, dir):
if not exists(self.base_dir):
self._mk_zone_skel()
log.debug("fs-write: mkdir '%s'", dbdir[len(self.base_dir)+1:])
os.mkdir(dbdir)
log.debug("fs-write: '%s/path'", dbdir[len(self.base_dir)+1:])
fout = codecs.open(join(dbdir, "path"), 'wb', 'utf-8')
try:
fout.write(dir)
finally:
fout.close()
def load_blob(self, dbsubpath):
"""This must be called with the lock held."""
log.debug("TODO: LangZone.load_blob: add blob caching!")
log.debug("fs-read: load %s blob '%s'", self.lang, dbsubpath)
dbpath = join(self.base_dir, dbsubpath+".blob")
blob = ET.parse(dbpath).getroot()
for hook_handler in self._hook_handlers:
try:
hook_handler.post_db_load_blob(blob)
except:
log.exception("error running hook: %r.post_db_load_blob(%r)",
hook_handler, blob)
return blob
def load_index(self, dir, index_name, default=None):
"""Get the indicated index.
"dir" is the dir path this index represents.
"index_name" is the name of the index.
"default" (default None) indicate the value to return for
the index if the index doesn't exist. If not set (or
None) then an OSError is raised if the index doesn't exist.
The index is loaded from a pickle on disk, if necessary, put
into the cache system, and returned.
This must be called with the lock held.
"""
self._acquire_lock()
try:
dbsubpath = join(self.db.dhash_from_dir(dir), index_name)
# If index path is in the cache: return it, update its atime.
now = time.time()
if dbsubpath in self._index_and_atime_from_dbsubpath:
log.debug("cache-read: load %s index '%s'", self.lang, dbsubpath)
self._index_and_atime_from_dbsubpath[dbsubpath][1] = now
return self._index_and_atime_from_dbsubpath[dbsubpath][0]
# Otherwise, load it.
log.debug("fs-read: load %s index '%s'", self.lang, dbsubpath)
dbpath = join(self.base_dir, dbsubpath)
index = self.db.load_pickle(dbpath, default)
if index_name == "toplevelname_index":
index = self.toplevelname_index_class(index)
self._index_and_atime_from_dbsubpath[dbsubpath] = [index, now]
return index
finally:
self._release_lock()
def changed_index(self, dir, index_name):
"""Note that we've changed this index (so it can be saved as
appropriate).
"""
self._acquire_lock()
try:
now = time.time()
dbsubpath = join(self.db.dhash_from_dir(dir), index_name)
self._index_and_atime_from_dbsubpath[dbsubpath][1] = now
self._is_index_dirty_from_dbsubpath[dbsubpath] = True
finally:
self._release_lock()
def save_index(self, dbsubpath, index):
if isinstance(index, self.toplevelname_index_class):
index = index.data
self.db.save_pickle(join(self.base_dir, dbsubpath), index)
def save(self):
self._acquire_lock()
try:
for dbsubpath in self._is_index_dirty_from_dbsubpath:
self.save_index(dbsubpath,
self._index_and_atime_from_dbsubpath[dbsubpath][0])
self._is_index_dirty_from_dbsubpath = {}
finally:
self._release_lock()
def cull_mem(self):
"""Drop indeces and tree from cache that have not been
accessed in over 5 minutes.
To attempt to keep memory consumption under control we want to
ensure we don't keep everything cached from the db in memory
until process completion. The plan is to have a thread
periodically cull memory.
"""
#TOTEST: Does Python/Komodo actually release this memory or
# are we kidding ourselves?
log.debug("LangZone: culling memory")
TIME_SINCE_ACCESS = 300.0 # 5 minutes since last access
self._acquire_lock()
try:
N = 30
if len(self._index_and_atime_from_dbsubpath) < N:
# Too few indeces in memory to bother culling.
return
now = time.time()
for dbsubpath, (index, atime) \
in list(self._index_and_atime_from_dbsubpath.items()):
if now - atime > TIME_SINCE_ACCESS:
if dbsubpath in self._is_index_dirty_from_dbsubpath:
self.save_index(dbsubpath, index)
del self._is_index_dirty_from_dbsubpath[dbsubpath]
del self._index_and_atime_from_dbsubpath[dbsubpath]
except:
log.exception("Exception culling memory")
finally:
self._release_lock()
#XXX Database.clean(): Go through each $lang/dir/res_index and
# clean out files in the index but that don't actually exist
# anymore.
#XXX Database.clean(): drop memory for indeces that are quite
# old (say haven't been accessed in 20 minutes).
#XXX Database.check(): Shouldn't have too many cached indeces in
# memory. How old is the oldest one? Estimate memory size
# used by all loaded indeces?
# TODO: When a directory no longer exists on the filesystem - should we
# 1) remove the db data, or
# 2) mark it as expired.
# Option 2 would work better for (network) mounted filesystems, as it
# could just be an intermittent issue.
def clean(self):
"""Clean out any expired/old codeintel information."""
base_dir = self.base_dir
if not exists(base_dir):
return
for d in os.listdir(base_dir):
path_path = join(base_dir, d, "path")
if not exists(path_path):
continue
path = codecs.open(path_path, encoding="utf-8").read()
if not exists(path):
# Referenced directory no longer exists - so remove the db info.
log.debug("clean:: scanned directory no longer exists: %r",
path)
rmdir(join(base_dir, d))
def get_lib(self, name, dirs):
"""
Dev Notes:
We make a lib for a particular sequence of dirs a singleton because:
1. The sequence of dirs for a language's import path tends to
not change, so the same object will tend to get used.
2. This allows caching of filesystem lookups to be done naturally
on the LangDirsLib instance.
To ensure that this cache doesn't grow unboundedly we only allow
there to be N cached LangDirsLib's. A good value for N is when
there are relatively few cache misses. Ideally we'd want to
count the number of cache misses (i.e. LangDirsLib instance
creations) for a number of "typical" uses of codeintel -- i.e. a
long running Komodo profile. Failing that we'll just use N=10.
"""
assert isinstance(dirs, (tuple, list))
canon_dirs = tuple(set(abspath(normpath(expanduser(d))) for d in dirs))
if canon_dirs in self._dirslib_cache:
return self._dirslib_cache[canon_dirs]
langdirslib = LangDirsLib(self, self._lock, self.lang, name,
canon_dirs)
# Ensure that these directories are all *up-to-date*.
langdirslib.ensure_all_dirs_scanned()
self._dirslib_cache[canon_dirs] = langdirslib
return langdirslib
def reportMemory(self):
"""
Report on memory usage from this LangZone.
@returns {dict} memory usage; keys are the paths, values are a dict of
"amount" -> number
"units" -> "bytes" | "count"
"desc" -> str description
"""
log.debug("%s LangZone: reporting memory", self.lang)
import memutils
return {
"komodo\\codeintel/langzone/%s/index-cache" % (self.lang,): {
"amount": len(self._index_and_atime_from_dbsubpath),
"units": "count",
"desc": "Number of cached indices.",
},
"explicit/python/codeintel/%s/index-cache" % (self.lang,): {
"amount": memutils.memusage(self._index_and_atime_from_dbsubpath),
"units": "bytes",
"desc": "The number of bytes of %s codeintel index caches." % (self.lang,),
},
} | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methodcalls/groups/leave_group_call.py | import asyncio
from fipper.exception import NoActiveGroupCall
from fipper.exception import NodeJSNotRunning
from fipper.exception import ClientCallsNotSet
from fipper.exception import NotInGroupCallError
from fipper.xdcalls import NotInGroupCall
from fipper.xdcalls.session import Session
from fipper.viper import Viper
class LeaveGroupCall(Viper):
async def leave_group_call(
self,
chat_id: int,
):
"""Leave a group call
This method allow to leave a Group Call
Parameters:
chat_id (``int``):
Unique identifier (int) of the target chat.
Raises:
ClientCallsNotSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~fipper.PyTgCalls.start` before
NoActiveGroupCall: In case you try
to edit a not started group call
NotInGroupCallError: In case you try
to leave a non-joined group call
Example:
.. code-block:: python
:emphasize-lines: 10-12
from fipper import Client
from fipper import idle
...
app = PyTgCalls(client)
app.start()
... # Call API methods
app.leave_group_call(
-1001185324811,
)
idle()
"""
if self.assistant is not None:
if self._wait_until_run is not None:
chat_call = await self.assistant.get_full_chat(
chat_id,
)
if chat_call is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
await self._binding.send({
'action': 'leave_call',
'chat_id': chat_id,
'type': 'requested',
'solver_id': solver_id,
})
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
else:
raise NoActiveGroupCall()
else:
raise NodeJSNotRunning()
else:
raise ClientCallsNotSet() | PypiClean |
/Nikola-8.2.4-py3-none-any.whl/nikola/plugins/task/listings.py |
# Copyright © 2012-2023 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render code listings."""
import os
from collections import defaultdict
import natsort
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, guess_lexer, TextLexer
from nikola.plugin_categories import Task
from nikola import utils
class Listings(Task):
"""Render code listings."""
name = "render_listings"
def register_output_name(self, input_folder, rel_name, rel_output_name):
"""Register proper and improper file mappings."""
self.improper_input_file_mapping[rel_name].add(rel_output_name)
self.proper_input_file_mapping[os.path.join(input_folder, rel_name)] = rel_output_name
self.proper_input_file_mapping[rel_output_name] = rel_output_name
def set_site(self, site):
"""Set Nikola site."""
site.register_path_handler('listing', self.listing_path)
site.register_path_handler('listing_source', self.listing_source_path)
# We need to prepare some things for the listings path handler to work.
self.kw = {
"default_lang": site.config["DEFAULT_LANG"],
"listings_folders": site.config["LISTINGS_FOLDERS"],
"output_folder": site.config["OUTPUT_FOLDER"],
"index_file": site.config["INDEX_FILE"],
"strip_indexes": site.config['STRIP_INDEXES'],
"filters": site.config["FILTERS"],
}
# Verify that no folder in LISTINGS_FOLDERS appears twice (on output side)
appearing_paths = set()
for source, dest in self.kw['listings_folders'].items():
if source in appearing_paths or dest in appearing_paths:
problem = source if source in appearing_paths else dest
utils.LOGGER.error("The listings input or output folder '{0}' appears in more than one entry in LISTINGS_FOLDERS, exiting.".format(problem))
continue
appearing_paths.add(source)
appearing_paths.add(dest)
# improper_input_file_mapping maps a relative input file (relative to
# its corresponding input directory) to a list of the output files.
# Since several input directories can contain files of the same name,
# a list is needed. This is needed for compatibility to previous Nikola
# versions, where there was no need to specify the input directory name
# when asking for a link via site.link('listing', ...).
self.improper_input_file_mapping = defaultdict(set)
# proper_input_file_mapping maps relative input file (relative to CWD)
# to a generated output file. Since we don't allow an input directory
# to appear more than once in LISTINGS_FOLDERS, we can map directly to
# a file name (and not a list of files).
self.proper_input_file_mapping = {}
for input_folder, output_folder in self.kw['listings_folders'].items():
for root, _, files in os.walk(input_folder, followlinks=True):
# Compute relative path; can't use os.path.relpath() here as it returns "." instead of ""
rel_path = root[len(input_folder):]
if rel_path[:1] == os.sep:
rel_path = rel_path[1:]
for f in files + [self.kw['index_file']]:
rel_name = os.path.join(rel_path, f)
rel_output_name = os.path.join(output_folder, rel_path, f)
# Register file names in the mapping.
self.register_output_name(input_folder, rel_name, rel_output_name)
return super().set_site(site)
def gen_tasks(self):
"""Render pretty code listings."""
# Things to ignore in listings
ignored_extensions = (".pyc", ".pyo")
def render_listing(in_name, out_name, input_folder, output_folder, folders=[], files=[]):
needs_ipython_css = False
if in_name and in_name.endswith('.ipynb'):
# Special handling: render ipynbs in listings (Issue #1900)
ipynb_plugin = self.site.plugin_manager.getPluginByName("ipynb", "PageCompiler")
if ipynb_plugin is None:
msg = "To use .ipynb files as listings, you must set up the Jupyter compiler in COMPILERS and POSTS/PAGES."
utils.LOGGER.error(msg)
raise ValueError(msg)
ipynb_compiler = ipynb_plugin.plugin_object
with open(in_name, "r", encoding="utf-8-sig") as in_file:
nb_json = ipynb_compiler._nbformat_read(in_file)
code = ipynb_compiler._compile_string(nb_json)
title = os.path.basename(in_name)
needs_ipython_css = True
elif in_name:
with open(in_name, 'r', encoding='utf-8-sig') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except Exception:
try:
lexer = guess_lexer(fd.read())
except Exception:
lexer = TextLexer()
fd.seek(0)
code = highlight(
fd.read(), lexer,
utils.NikolaPygmentsHTML(in_name, linenos='table'))
title = os.path.basename(in_name)
else:
code = ''
title = os.path.split(os.path.dirname(out_name))[1]
crumbs = utils.get_crumbs(os.path.relpath(out_name,
self.kw['output_folder']),
is_file=True)
permalink = self.site.link(
'listing',
os.path.join(
input_folder,
os.path.relpath(
out_name[:-5], # remove '.html'
os.path.join(
self.kw['output_folder'],
output_folder))))
if in_name:
source_link = permalink[:-5] # remove '.html'
else:
source_link = None
context = {
'code': code,
'title': title,
'crumbs': crumbs,
'permalink': permalink,
'lang': self.kw['default_lang'],
'folders': natsort.natsorted(
folders, alg=natsort.ns.F | natsort.ns.IC),
'files': natsort.natsorted(
files, alg=natsort.ns.F | natsort.ns.IC),
'description': title,
'source_link': source_link,
'pagekind': ['listing'],
}
if needs_ipython_css:
# If someone does not have ipynb posts and only listings, we
# need to enable ipynb CSS for ipynb listings.
context['needs_ipython_css'] = True
self.site.render_template('listing.tmpl', out_name, context)
yield self.group_task()
template_deps = self.site.template_system.template_deps('listing.tmpl', self.site.GLOBAL_CONTEXT)
for input_folder, output_folder in self.kw['listings_folders'].items():
for root, dirs, files in os.walk(input_folder, followlinks=True):
files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]
uptodate = {'c': self.site.GLOBAL_CONTEXT}
for k, v in self.site.GLOBAL_CONTEXT['template_hooks'].items():
uptodate['||template_hooks|{0}||'.format(k)] = v.calculate_deps()
for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:
uptodate[k] = self.site.GLOBAL_CONTEXT[k](self.kw['default_lang'])
# save navigation links as dependencies
uptodate['navigation_links'] = uptodate['c']['navigation_links'](self.kw['default_lang'])
uptodate['kw'] = self.kw
uptodate2 = uptodate.copy()
uptodate2['f'] = files
uptodate2['d'] = dirs
# Compute relative path; can't use os.path.relpath() here as it returns "." instead of ""
rel_path = root[len(input_folder):]
if rel_path[:1] == os.sep:
rel_path = rel_path[1:]
rel_name = os.path.join(rel_path, self.kw['index_file'])
rel_output_name = os.path.join(output_folder, rel_path, self.kw['index_file'])
# Render all files
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': template_deps,
'targets': [out_name],
'actions': [(render_listing, [None, out_name, input_folder, output_folder, dirs, files])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(uptodate2, 'nikola.plugins.task.listings:folder')],
'clean': True,
}, self.kw["filters"])
for f in files:
if f == '.DS_Store':
continue
ext = os.path.splitext(f)[-1]
if ext in ignored_extensions:
continue
in_name = os.path.join(root, f)
# Record file names
rel_name = os.path.join(rel_path, f + '.html')
rel_output_name = os.path.join(output_folder, rel_path, f + '.html')
self.register_output_name(input_folder, rel_name, rel_output_name)
# Set up output name
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
# Yield task
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': template_deps + [in_name],
'targets': [out_name],
'actions': [(render_listing, [in_name, out_name, input_folder, output_folder])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(uptodate, 'nikola.plugins.task.listings:source')],
'clean': True,
}, self.kw["filters"])
rel_name = os.path.join(rel_path, f)
rel_output_name = os.path.join(output_folder, rel_path, f)
self.register_output_name(input_folder, rel_name, rel_output_name)
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': [in_name],
'targets': [out_name],
'actions': [(utils.copy_file, [in_name, out_name])],
'clean': True,
}, self.kw["filters"])
def listing_source_path(self, name, lang):
"""Return a link to the source code for a listing.
It will try to use the file name if it's not ambiguous, or the file path.
Example:
link://listing_source/hello.py => /listings/tutorial/hello.py
link://listing_source/tutorial/hello.py => /listings/tutorial/hello.py
"""
result = self.listing_path(name, lang)
if result[-1].endswith('.html'):
result[-1] = result[-1][:-5]
return result
def listing_path(self, namep, lang):
"""Return a link to a listing.
It will try to use the file name if it's not ambiguous, or the file path.
Example:
link://listing/hello.py => /listings/tutorial/hello.py.html
link://listing/tutorial/hello.py => /listings/tutorial/hello.py.html
"""
namep = namep.replace('/', os.sep)
nameh = namep + '.html'
for name in (namep, nameh):
if name in self.proper_input_file_mapping:
# If the name shows up in this dict, everything's fine.
name = self.proper_input_file_mapping[name]
break
elif name in self.improper_input_file_mapping:
# If the name shows up in this dict, we have to check for
# ambiguities.
if len(self.improper_input_file_mapping[name]) > 1:
utils.LOGGER.error("Using non-unique listing name '{0}', which maps to more than one listing name ({1})!".format(name, str(self.improper_input_file_mapping[name])))
return ["ERROR"]
if len(self.site.config['LISTINGS_FOLDERS']) > 1:
utils.LOGGER.warning("Using listings names in site.link() without input directory prefix while configuration's LISTINGS_FOLDERS has more than one entry.")
name = list(self.improper_input_file_mapping[name])[0]
break
else:
utils.LOGGER.error("Unknown listing name {0}!".format(namep))
return ["ERROR"]
if not name.endswith(os.sep + self.site.config["INDEX_FILE"]):
name += '.html'
path_parts = name.split(os.sep)
return [_f for _f in path_parts if _f] | PypiClean |
/CAL-1.4.5.tar.gz/CAL-1.4.5/Ordinal.py |
from __future__ import print_function
"""
>>> from _CAL.Date import *
>>> d1 = Date (1958, 1, 30)
>>> d2 = Date (1960, 4, 11)
>>> d3 = Date (1959, 9, 26)
>>> d4 = Date (1997, 11, 16)
>>> d5 = Date (2007, 1, 25) ### week 4
>>> d6 = Date (2007, 4, 25) ### week 17
>>> d7 = Date (2007, 7, 4) ### week 27
>>> d8 = Date (2007, 12, 24) ### week 52
>>> for U in Month, Quarter, Week, Year :
... print (U.__name__)
... for d in (d1, d2, d3, d4, d5, d6, d7, d8) :
... o = U.to_ordinal (d)
... print (" %s %8d %s" % (d, o, U.to_date (o)))
...
Month
1958-01-30 23497 1958-01-01
1960-04-11 23524 1960-04-01
1959-09-26 23517 1959-09-01
1997-11-16 23975 1997-11-01
2007-01-25 24085 2007-01-01
2007-04-25 24088 2007-04-01
2007-07-04 24091 2007-07-01
2007-12-24 24096 2008-12-01
Quarter
1958-01-30 7833 1958-01-01
1960-04-11 7842 1960-04-01
1959-09-26 7839 1959-07-01
1997-11-16 7992 1998-10-01
2007-01-25 8029 2007-01-01
2007-04-25 8030 2007-04-01
2007-07-04 8031 2007-07-01
2007-12-24 8032 2008-10-01
Week
1958-01-30 102115 1958-01-30
1960-04-11 102230 1960-04-14
1959-09-26 102201 1959-09-24
1997-11-16 104191 1997-11-13
2007-01-25 104671 2007-01-25
2007-04-25 104684 2007-04-26
2007-07-04 104694 2007-07-05
2007-12-24 104719 2007-12-27
Year
1958-01-30 1958 1958-01-01
1960-04-11 1960 1960-01-01
1959-09-26 1959 1959-01-01
1997-11-16 1997 1997-01-01
2007-01-25 2007 2007-01-01
2007-04-25 2007 2007-01-01
2007-07-04 2007 2007-01-01
2007-12-24 2007 2007-01-01
"""
from _TFL import TFL
from _CAL import CAL
import _CAL.Date
import _TFL._Meta.Object
class Month (TFL.Meta.Object) :
"""Ordinal numbers for months."""
@classmethod
def is_first (cls, mo) :
y, m = divmod (mo, 12)
return m == 1
# end def is_first
@classmethod
def to_date (cls, mo) :
"""Return date corresponding to month ordinal `mo`."""
y, m = divmod (mo, 12)
return CAL.Date (y, m or 12, 1)
# end def to_date
@classmethod
def to_ordinal (cls, d) :
"""Return month ordinal for date `d`."""
return d.year * 12 + d.month
# end def to_ordinal
@classmethod
def to_year (cls, mo) :
"""Return year corresponding to month ordinal `mo`."""
y, m = divmod (mo, 12)
return y
# end def to_year
# end class Month
class Quarter (TFL.Meta.Object) :
"""Ordinal numbers for quarters"""
@classmethod
def is_first (cls, qo) :
y, q = divmod (qo, 4)
return q == 1
# end def is_first
@classmethod
def to_date (cls, qo) :
"""Return date corresponding to quarter ordinal `qo`."""
y, q = divmod (qo, 4)
return CAL.Date (y, ((q or 4) - 1) * 3 + 1, 1)
# end def to_date
@classmethod
def to_ordinal (cls, d) :
"""Return quarter ordinal for date `d`."""
return d.year * 4 + d.quarter
# end def to_ordinal
@classmethod
def to_year (cls, qo) :
"""Return year corresponding to quarter ordinal `qo`."""
y, q = divmod (qo, 4)
return y
# end def to_year
# end class Quarter
class Week (TFL.Meta.Object) :
"""Ordinal numbers for weeks"""
tick_delta = 1
@classmethod
def is_first (cls, wo) :
d = cls.to_date (wo)
return 1 <= d.week <= cls.tick_delta
# end def is_first
@classmethod
def to_date (cls, wo) :
"""Return date corresponding to week ordinal `wo`."""
return CAL.Date.from_ordinal (wo * 7 + 4)
# end def to_date
@classmethod
def to_ordinal (cls, d) :
"""Return week ordinal for date `d`."""
return d.wk_ordinal
# end def to_ordinal
@classmethod
def to_year (cls, wo) :
"""Return year corresponding to week ordinal `wo`."""
d = cls.to_date (wo)
return d.year
# end def to_year
# end class Week
class Year (TFL.Meta.Object) :
"""Ordinal numbers for years."""
@classmethod
def is_first (cls, yo) :
return False
# end def is_first
@classmethod
def to_date (cls, yo) :
"""Return date corresponding to year ordinal `yo`."""
return CAL.Date (yo, 1, 1)
# end def to_date
@classmethod
def to_ordinal (cls, d) :
"""Return year ordinal for date `d`."""
return d.year
# end def to_ordinal
@classmethod
def to_year (cls, yo) :
"""Return date corresponding to year ordinal `yo`."""
return yo
# end def to_year
# end class Year
if __name__ == "__main__" :
CAL._Export_Module ()
### __END__ CAL.Ordinal | PypiClean |
/NJU_jiaowu_helper-0.3.1.tar.gz/NJU_jiaowu_helper-0.3.1/jiaowu/core/function/upload/upload_functions.py | import re
from scrapy import Selector
from jiaowu.core.function.utils.others import get_values
from jiaowu.data.constants.status_code import StatusCode as Code
from jiaowu.core.model.spider_model import LoginSpider
def apply_for_exam_only(spider: LoginSpider, args):
# 申请免修不免靠
values=get_values(args,["course_name","course_id"])
course_name, course_id = tuple(values)
spider.update_header("Referer", 'http://elite.nju.edu.cn/jiaowu/student/elective/index.do')
response = spider.task.get(
url="http://elite.nju.edu.cn/jiaowu/student/teachinginfo/courseList.do?method=exemptionBMKList")
selector = Selector(text=response.text)
trs = selector.xpath("//tr[@align='left']")
class_id = ""
for tr in trs:
if tr.xpath("./td/text()")[0].extract() == course_id or tr.xpath("./td/text()")[1].extract() == course_name:
class_id = re.search(re.compile("\((\d+)\)"),
tr.xpath("./td")[3].xpath("./a/@href")[0].extract()).group(1)
if len(class_id) > 0:
# 检测已申请的课程是否在规定数量之内
if len(selector.xpath("//div[@id='courseList']")[0].css("table tr[align='left']")) >= 2:
print(Code.APPLICATION_BEYOND_MAX_LIMITS.get_msg())
return
spider.task.get(
url="http://elite.nju.edu.cn/jiaowu/student/teachinginfo/courseList.do?method=exemptionBMKApply&classId=%s" % class_id)
else:
print(Code.COURSE_NOT_FOUND.get_msg())
def cancel_exam_only_application(spider: LoginSpider,args):
# 取消免修不免靠申请
values = get_values(args, ["course_name", "course_id"])
course_name, course_id = tuple(values)
spider.update_header("Referer", 'http://elite.nju.edu.cn/jiaowu/student/elective/index.do')
response = spider.task.get(
url="http://elite.nju.edu.cn/jiaowu/student/teachinginfo/courseList.do?method=exemptionBMKList")
selector = Selector(text=response.text)
trs = selector.xpath("//tr[@align='left']")
class_id = ""
for tr in trs:
if tr.xpath("./td/text()")[0].extract() == course_id or tr.xpath("./td/text()")[1].extract() == course_name:
class_id = re.search(re.compile("\((\d+)\)"),
tr.xpath("./td")[3].xpath("./a/@href")[0].extract()).group(1)
if len(class_id) > 0:
# 检测是否在待审核列表
application_list = selector.xpath("//div[@id='courseList']/table")[0].css("tr[align='left']")
flag = False
for application in application_list:
tds_text = application.xpath("./td/text()").extract()
if tds_text[0] == course_id or tds_text[1] == course_name:
flag = True
if not flag:
print(Code.COURSE_NOT_EXIST_IN_LIST.get_msg())
return
spider.task.get(
url="http://elite.nju.edu.cn/jiaowu/student/teachinginfo/courseList.do?method=exemptionBMKDelete&classId=%s" % class_id)
else:
print(Code.COURSE_NOT_FOUND.get_msg())
def update_password(spider: LoginSpider, args):
# 修改密码
values = get_values(args, ["old_pwd", "new_pwd"])
old_pwd,new_pwd = tuple(values)
spider.update_header("Referer",
'http://elite.nju.edu.cn/jiaowu/student/basicinfo/ModifyPassword.do?method=editStudentPassword')
form = {
'OldPassword': old_pwd,
'NewPassword': new_pwd,
'NewPassword_d': new_pwd
}
response = spider.task.post(
url="http://elite.nju.edu.cn/jiaowu/student/basicinfo/ModifyPassword.do?method=editStudentPassword", data=form)
if len(re.findall("密码修改成功", response.text)) >= 2:
print(Code.PWD_UPDATE_SUCCESS.get_msg())
else:
print(Code.PWD_UPDATE_FAILURE.get_msg()) | PypiClean |
/MAA_PyQT5_messenger_server_app-0.0.4.tar.gz/MAA_PyQT5_messenger_server_app-0.0.4/server/server/remove_user.py | from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton, QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItemModel, QStandardItem
class DelUserDialog(QDialog):
'''
Класс - диалог выбора контакта для удаления.
'''
def __init__(self, database, server):
super().__init__()
self.database = database
self.server = server
self.setFixedSize(350, 120)
self.setWindowTitle('Удаление пользователя')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel(
'Выберите пользователя для удаления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_ok = QPushButton('Удалить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_ok.clicked.connect(self.remove_user)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
self.all_users_fill()
def all_users_fill(self):
'''Метод заполняющий список пользователей.'''
self.selector.addItems([item[0]
for item in self.database.users_list()])
def remove_user(self):
'''Метод - обработчик удаления пользователя.'''
self.database.remove_user(self.selector.currentText())
if self.selector.currentText() in self.server.names:
sock = self.server.names[self.selector.currentText()]
del self.server.names[self.selector.currentText()]
self.server.remove_client(sock)
# Рассылаем клиентам сообщение о необходимости обновить справочники
self.server.service_update_lists()
self.close()
if __name__ == '__main__':
app = QApplication([])
from server_base import Server_db
database = Server_db('../server_database.db3')
import os
import sys
path1 = os.path.join(os.getcwd(), '..')
sys.path.insert(0, path1)
from msg_processor import MessageProcessor
server = MessageProcessor('127.0.0.1', 7777, database)
dial = DelUserDialog(database, server)
dial.show()
app.exec_() | PypiClean |
/Comet-3.1.0.tar.gz/Comet-3.1.0/comet/plugins/eventwriter.py |
import os
import string
from contextlib import contextmanager
from zope.interface import implementer
from twisted.plugin import IPlugin
from twisted.python import lockfile
from comet.icomet import IHandler, IHasOptions
import comet.log as log
# Used when building filenames to avoid over-writing.
FILENAME_PAD = "_"
def string_to_filename(input_string):
# Strip weird, confusing or special characters from input_string so that
# we can safely use it as a filename.
# Replace "/" and "\" with "_" for readability.
# Allow ".", but not as the first character.
if input_string[0] == ".":
input_string = input_string[1:]
return "".join(x for x in input_string.replace("/", "_").replace("\\", "_")
if x in string.digits + string.ascii_letters + "_."
)
@contextmanager
def event_file(ivoid, dirname=None):
# Return a file object into which we can write an event.
# If a directory is specified, write into that; otherwise, use the cwd.
# We use a lock to ensure we don't clobber other files with the same name.
if not dirname:
dirname=os.getcwd()
fname = os.path.join(dirname, string_to_filename(ivoid))
lock = lockfile.FilesystemLock(string_to_filename(ivoid) + "-lock")
lock.lock()
try:
while os.path.exists(fname):
fname += FILENAME_PAD
with open(fname, 'w') as f:
yield f
finally:
lock.unlock()
# Event handlers must implement IPlugin and IHandler.
# Implementing IHasOptions enables us to use command line options.
@implementer(IPlugin, IHandler, IHasOptions)
class EventWriter(object):
# Simple example of an event handler plugin. This saves the events to
# disk.
# The name attribute enables the user to specify plugins they want on the
# command line.
name = "save-event"
def __init__(self):
self.directory = os.getcwd()
# When the handler is called, it is passed an instance of
# comet.utility.xml.xml_document.
def __call__(self, event):
"""
Save an event to disk.
"""
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with event_file(event.element.attrib['ivorn'], self.directory) as f:
log.debug("Writing to %s" % (f.name,))
f.write(event.raw_bytes.decode(event.encoding))
def get_options(self):
return [('directory', self.directory, 'Target directory')]
def set_option(self, name, value):
if name == "directory":
self.directory = value
# This instance of the handler is what actually constitutes our plugin.
save_event = EventWriter() | PypiClean |
/CausalInference-0.1.3-py3-none-any.whl/causalinference/causal.py | from __future__ import division
import numpy as np
from itertools import combinations_with_replacement
from .core import Data, Summary, Propensity, PropensitySelect, Strata
from .estimators import OLS, Blocking, Weighting, Matching, Estimators
class CausalModel(object):
"""
Class that provides the main tools of Causal Inference.
"""
def __init__(self, Y, D, X):
self.old_data = Data(Y, D, X)
self.reset()
def reset(self):
"""
Reinitializes data to original inputs, and drops any estimated
results.
"""
Y, D, X = self.old_data['Y'], self.old_data['D'], self.old_data['X']
self.raw_data = Data(Y, D, X)
self.summary_stats = Summary(self.raw_data)
self.propensity = None
self.cutoff = None
self.blocks = None
self.strata = None
self.estimates = Estimators()
def est_propensity(self, lin='all', qua=None):
"""
Estimates the propensity scores given list of covariates to
include linearly or quadratically.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
Parameters
----------
lin: string or list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to the string 'all', which
uses whole covariate matrix.
qua: list, optional
Tuples indicating which columns of the original
covariate matrix to multiply and include. E.g.,
[(1,1), (2,3)] indicates squaring the 2nd column
and including the product of the 3rd and 4th
columns. Default is to not include any
quadratic terms.
"""
lin_terms = parse_lin_terms(self.raw_data['K'], lin)
qua_terms = parse_qua_terms(self.raw_data['K'], qua)
self.propensity = Propensity(self.raw_data, lin_terms, qua_terms)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
"""
Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
lin_basic = parse_lin_terms(self.raw_data['K'], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.')
def trim_s(self):
"""
Trims data based on propensity score using the cutoff
selection algorithm suggested by [1]_.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] Crump, R., Hotz, V., Imbens, G., & Mitnik, O. (2009).
Dealing with Limited Overlap in Estimation of
Average Treatment Effects. Biometrika, 96, 187-199.
"""
pscore = self.raw_data['pscore']
g = 1.0/(pscore*(1-pscore)) # 1 over Bernoulli variance
self.cutoff = select_cutoff(g)
self.trim()
def stratify(self):
"""
Stratifies the sample based on propensity score.
By default the sample is divided into five equal-sized bins.
The number of bins can be set by modifying the object
attribute named blocks. Alternatively, custom-sized bins can
be created by setting blocks equal to a sorted list of numbers
between 0 and 1 indicating the bin boundaries.
This method should only be executed after the propensity score
has been estimated.
"""
Y, D, X = self.raw_data['Y'], self.raw_data['D'], self.raw_data['X']
pscore = self.raw_data['pscore']
if isinstance(self.blocks, int):
blocks = split_equal_bins(pscore, self.blocks)
else:
blocks = self.blocks[:] # make a copy; should be sorted
blocks[0] = 0 # avoids always dropping 1st unit
def subset(p_low, p_high):
return (p_low < pscore) & (pscore <= p_high)
subsets = [subset(*ps) for ps in zip(blocks, blocks[1:])]
strata = [CausalModel(Y[s], D[s], X[s]) for s in subsets]
self.strata = Strata(strata, subsets, pscore)
def stratify_s(self):
"""
Stratifies the sample based on propensity score using the
bin selection procedure suggested by [1]_.
The bin selection algorithm is based on a sequence of
two-sample t tests performed on the log-odds ratio.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
pscore_order = self.raw_data['pscore'].argsort()
pscore = self.raw_data['pscore'][pscore_order]
D = self.raw_data['D'][pscore_order]
logodds = np.log(pscore / (1-pscore))
K = self.raw_data['K']
blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1))
self.blocks = sorted(blocks_uniq)
self.stratify()
def est_via_ols(self, adj=2):
"""
Estimates average treatment effects using least squares.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed. Set adj = 0 to not include any
covariates. Set adj = 1 to include treatment
indicator D and covariates X separately. Set
adj = 2 to additionally include interaction
terms between D and X. Defaults to 2.
"""
self.estimates['ols'] = OLS(self.raw_data, adj)
def est_via_blocking(self, adj=1):
"""
Estimates average treatment effects using regression within
blocks.
This method should only be executed after the sample has been
stratified.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed for each within-bin regression.
Set adj = 0 to not include any covariates.
Set adj = 1 to include treatment indicator D
and covariates X separately. Set adj = 2 to
additionally include interaction terms between
D and X. Defaults to 1.
"""
self.estimates['blocking'] = Blocking(self.strata, adj)
def est_via_weighting(self):
"""
Estimates average treatment effects using doubly-robust
version of the Horvitz-Thompson weighting estimator.
"""
self.estimates['weighting'] = Weighting(self.raw_data)
def est_via_matching(self, weights='inv', matches=1, bias_adj=False):
"""
Estimates average treatment effects using nearest-
neighborhood matching.
Matching is done with replacement. Method supports multiple
matching. Correcting bias that arise due to imperfect matches
is also supported. For details on methodology, see [1]_.
Parameters
----------
weights: str or positive definite square matrix
Specifies weighting matrix used in computing
distance measures. Defaults to string 'inv',
which does inverse variance weighting. String
'maha' gives the weighting matrix used in the
Mahalanobis metric.
matches: int
Number of matches to use for each subject.
bias_adj: bool
Specifies whether bias adjustments should be
attempted.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
X, K = self.raw_data['X'], self.raw_data['K']
X_c, X_t = self.raw_data['X_c'], self.raw_data['X_t']
if weights == 'inv':
W = 1/X.var(0)
elif weights == 'maha':
V_c = np.cov(X_c, rowvar=False, ddof=0)
V_t = np.cov(X_t, rowvar=False, ddof=0)
if K == 1:
W = 1/np.array([[(V_c+V_t)/2]]) # matrix form
else:
W = np.linalg.inv((V_c+V_t)/2)
else:
W = weights
self.estimates['matching'] = Matching(self.raw_data, W,
matches, bias_adj)
def _post_pscore_init(self):
self.cutoff = 0.1
self.blocks = 5
def parse_lin_terms(K, lin):
if lin is None:
return []
elif lin == 'all':
return range(K)
else:
return lin
def parse_qua_terms(K, qua):
if qua is None:
return []
elif qua == 'all':
return list(combinations_with_replacement(range(K), 2))
else:
return qua
def sumlessthan(g, sorted_g, cumsum):
deduped_values = dict(zip(sorted_g, cumsum))
return np.array([deduped_values[x] for x in g])
def select_cutoff(g):
if g.max() <= 2*g.mean():
cutoff = 0
else:
sorted_g = np.sort(g)
cumsum_1 = range(1, len(g)+1)
LHS = g * sumlessthan(g, sorted_g, cumsum_1)
cumsum_g = np.cumsum(sorted_g)
RHS = 2 * sumlessthan(g, sorted_g, cumsum_g)
gamma = np.max(g[LHS <= RHS])
cutoff = 0.5 - np.sqrt(0.25 - 1./gamma)
return cutoff
def split_equal_bins(pscore, blocks):
q = np.linspace(0, 100, blocks+1)[1:-1] # q as in qth centiles
centiles = [np.percentile(pscore, x) for x in q]
return [0] + centiles + [1]
def calc_tstat(sample_c, sample_t):
N_c = sample_c.shape[0]
N_t = sample_t.shape[0]
var_c = sample_c.var(ddof=1)
var_t = sample_t.var(ddof=1)
return (sample_t.mean()-sample_c.mean()) / np.sqrt(var_c/N_c+var_t/N_t)
def calc_sample_sizes(D):
N = D.shape[0]
mid_index = N // 2
Nleft = mid_index
Nleft_t = D[:mid_index].sum()
Nleft_c = Nleft - Nleft_t
Nright = N - Nleft
Nright_t = D[mid_index:].sum()
Nright_c = Nright - Nright_t
return (Nleft_c, Nleft_t, Nright_c, Nright_t)
def select_blocks(pscore, logodds, D, K, p_low, p_high):
scope = (pscore >= p_low) & (pscore <= p_high)
c, t = (scope & (D==0)), (scope & (D==1))
Nleft_c, Nleft_t, Nright_c, Nright_t = calc_sample_sizes(D[scope])
if min(Nleft_c, Nleft_t, Nright_c, Nright_t) < K+1:
return [p_low, p_high]
tstat = calc_tstat(logodds[c], logodds[t])
if tstat <= 1.96:
return [p_low, p_high]
low = pscore[scope][0]
mid = pscore[scope][scope.sum() // 2]
high = pscore[scope][-1]
return select_blocks(pscore, logodds, D, K, low, mid) + \
select_blocks(pscore, logodds, D, K, mid, high) | PypiClean |
/IdracRedfishSupport-0.0.8.tar.gz/IdracRedfishSupport-0.0.8/BiosResetToDefaultsREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser=argparse.ArgumentParser(description="Python script using Redfish API DMTF action to reset BIOS to default settings")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--noreboot', help='Pass in this argument to not reboot the server now to reset BIOS to default settings. Flag will still be set and reset to defaults will happen on next server reboot.', action="store_true", required=False)
args=vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- BiosResetToDefaultsREDFISH.py -ip 192.168.0.120 -u root -p calvin, this example will reboot the server now to perform BIOS reset to defaults operation.
\n- BiosResetToDefaultsREDFISH.py -ip 192.168.0.120 -x 7041fd6528bc5d9d88a34cdc14bf133a, this example using iDRAC X-auth token session will reboot the server now to perform BIOS reset to defaults operation.""")
sys.exit(0)
def check_supported_idrac_version():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Bios' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Bios' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code == 401:
logging.warning("\n- WARNING, status code %s returned. Incorrect iDRAC username/password or invalid privilege detected." % response.status_code)
sys.exit(0)
if response.status_code != 200:
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def reset_bios():
url = "https://%s/redfish/v1/Systems/System.Embedded.1/Bios/Actions/Bios.ResetBios" % idrac_ip
payload = {}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.__dict__
message_search = str(data['_content'])
if response.status_code == 200:
logging.info("\n- PASS: status code %s returned for POST command to reset BIOS to default settings" % response.status_code)
else:
logging.error("\n- FAIL, Command failed, status code is %s" % response.status_code)
detail_message = str(response.__dict__)
logging.error(detail_message)
sys.exit(0)
def reboot_server():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
logging.info("\n- INFO, Current server power state is: %s" % data['PowerState'])
if data['PowerState'] == "On":
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset' % idrac_ip
payload = {'ResetType': 'GracefulShutdown'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, POST command passed to gracefully power OFF server, status code return is %s" % response.status_code)
logging.info("- INFO, script will now verify the server was able to perform a graceful shutdown. If the server was unable to perform a graceful shutdown, forced shutdown will be invoked in 5 minutes")
time.sleep(15)
start_time = datetime.now()
else:
logging.error("\n- FAIL, Command failed to gracefully power OFF server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
while True:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
current_time = str(datetime.now() - start_time)[0:7]
if data['PowerState'] == "Off":
logging.info("- PASS, GET command passed to verify graceful shutdown was successful and server is in OFF state")
break
elif current_time >= "0:05:00":
logging.info("- INFO, unable to perform graceful shutdown, server will now perform forced shutdown")
payload = {'ResetType': 'ForceOff'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, POST command passed to perform forced shutdown, status code return is %s" % response.status_code)
time.sleep(15)
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if data['PowerState'] == "Off":
logging.info("- PASS, GET command passed to verify forced shutdown was successful and server is in OFF state")
break
else:
logging.error("- FAIL, server not in OFF state, current power status is %s" % data['PowerState'])
sys.exit(0)
else:
continue
payload = {'ResetType': 'On'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, Command passed to power ON server, status code return is %s" % response.status_code)
else:
logging.error("\n- FAIL, Command failed to power ON server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
elif data['PowerState'] == "Off":
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset' % idrac_ip
payload = {'ResetType': 'On'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, Command passed to power ON server, code return is %s" % response.status_code)
else:
logging.error("\n- FAIL, Command failed to power ON server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
else:
logging.error("- FAIL, unable to get current server power state to perform either reboot or power on")
sys.exit(0)
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] and args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip=args["ip"]
idrac_username=args["u"]
if args["p"]:
idrac_password=args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
reset_bios()
if args["noreboot"]:
logging.info("- INFO, --noreboot argument detected. Rest to defaults flag is still set and will happen on next server manual reboot")
else:
logging.info("- INFO, rebooting server to perform BIOS reset to defaults operation")
reboot_server() | PypiClean |
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbsplit.sh |
usage(){
echo "
BBSplit
Written by Brian Bushnell, from Dec. 2010 - present
Last modified June 11, 2018
Description: Maps reads to multiple references simultaneously.
Outputs reads to a file for the reference they best match, with multiple options for dealing with ambiguous mappings.
To index: bbsplit.sh build=<1> ref_x=<reference fasta> ref_y=<another reference fasta>
To map: bbsplit.sh build=<1> in=<reads> out_x=<output file> out_y=<another output file>
To be concise, and do everything in one command:
bbsplit.sh ref=x.fa,y.fa in=reads.fq basename=o%.fq
that is equivalent to
bbsplit.sh build=1 in=reads.fq ref_x=x.fa ref_y=y.fa out_x=ox.fq out_y=oy.fq
By default paired reads will yield interleaved output, but you can use the # symbol to produce twin output files.
For example, basename=o%_#.fq will produce ox_1.fq, ox_2.fq, oy_1.fq, and oy_2.fq.
Indexing Parameters (required when building the index):
ref=<file,file> A list of references, or directories containing fasta files.
ref_<name>=<ref.fa> Alternate, longer way to specify references. e.g., ref_ecoli=ecoli.fa
These can also be comma-delimited lists of files; e.g., ref_a=a1.fa,a2.fa,a3.fa
build=<1> If multiple references are indexed in the same directory, each needs a unique build ID.
path=<.> Specify the location to write the index, if you don't want it in the current working directory.
Input Parameters:
build=<1> Designate index to use. Corresponds to the number specified when building the index.
in=<reads.fq> Primary reads input; required parameter.
in2=<reads2.fq> For paired reads in two files.
qin=<auto> Set to 33 or 64 to specify input quality value ASCII offset.
interleaved=<auto> True forces paired/interleaved input; false forces single-ended mapping.
If not specified, interleaved status will be autodetected from read names.
Mapping Parameters:
maxindel=<20> Don't look for indels longer than this. Lower is faster. Set to >=100k for RNA-seq.
minratio=<0.56> Fraction of max alignment score required to keep a site. Higher is faster.
minhits=<1> Minimum number of seed hits required for candidate sites. Higher is faster.
ambiguous=<best> Set behavior on ambiguously-mapped reads (with multiple top-scoring mapping locations).
best (use the first best site)
toss (consider unmapped)
random (select one top-scoring site randomly)
all (retain all top-scoring sites. Does not work yet with SAM output)
ambiguous2=<best> Set behavior only for reads that map ambiguously to multiple different references.
Normal 'ambiguous=' controls behavior on all ambiguous reads;
Ambiguous2 excludes reads that map ambiguously within a single reference.
best (use the first best site)
toss (consider unmapped)
all (write a copy to the output for each reference to which it maps)
split (write a copy to the AMBIGUOUS_ output for each reference to which it maps)
qtrim=<true> Quality-trim ends to Q5 before mapping. Options are 'l' (left), 'r' (right), and 'lr' (both).
untrim=<true> Undo trimming after mapping. Untrimmed bases will be soft-clipped in cigar strings.
Output Parameters:
out_<name>=<file> Output reads that map to the reference <name> to <file>.
basename=prefix%suffix Equivalent to multiple out_%=prefix%suffix expressions, in which each % is replaced by the name of a reference file.
bs=<file> Write a shell script to 'file' that will turn the sam output into a sorted, indexed bam file.
scafstats=<file> Write statistics on how many reads mapped to which scaffold to this file.
refstats=<file> Write statistics on how many reads were assigned to which reference to this file.
Unmapped reads whose mate mapped to a reference are considered assigned and will be counted.
nzo=t Only print lines with nonzero coverage.
***** Notes *****
Almost all BBMap parameters can be used; run bbmap.sh for more details.
Exceptions include the 'nodisk' flag, which BBSplit does not support.
BBSplit is recommended for fastq and fasta output, not for sam/bam output.
When the reference sequences are shorter than read length, use Seal instead of BBSplit.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
This list is not complete. For more information, please consult $DIRdocs/readme.txt
Please contact Brian Bushnell at bbushnell@lbl.gov if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
function bbsplit() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP align2.BBSplitter ow=t fastareadlen=500 minhits=1 minratio=0.56 maxindel=20 qtrim=rl untrim=t trimq=6 $@"
echo $CMD >&2
eval $CMD
}
bbsplit "$@" | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lang_go.py | from __future__ import absolute_import
import os
import sys
import json
import logging
import process
import time
try:
from codeintel2.common import LazyClassAttribute
except ImportError:
import warnings
warnings.warn("Unable to import zope.cachedescriptors.property")
# Fallback to regular properties.
LazyClassAttribute = property
import ciElementTree as ET
import which
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
from codeintel2.accessor import AccessorCache
from codeintel2.citadel import CitadelLangIntel, CitadelBuffer
from codeintel2.common import Trigger, TRG_FORM_CALLTIP, TRG_FORM_CPLN, TRG_FORM_DEFN, CILEDriver, Definition, CodeIntelError
from codeintel2.langintel import ParenStyleCalltipIntelMixin, ProgLangTriggerIntelMixin, PythonCITDLExtractorMixin
from codeintel2.udl import UDLBuffer
from codeintel2.tree import tree_from_cix
from SilverCity.ScintillaConstants import (
SCE_C_COMMENT, SCE_C_COMMENTDOC, SCE_C_COMMENTDOCKEYWORD,
SCE_C_COMMENTDOCKEYWORDERROR, SCE_C_COMMENTLINE,
SCE_C_COMMENTLINEDOC, SCE_C_DEFAULT, SCE_C_IDENTIFIER, SCE_C_NUMBER,
SCE_C_OPERATOR, SCE_C_STRING, SCE_C_CHARACTER, SCE_C_STRINGEOL, SCE_C_WORD)
try:
from xpcom.server import UnwrapObject
_xpcom_ = True
except ImportError:
_xpcom_ = False
#---- globals
lang = "Go"
log = logging.getLogger("codeintel.go")
# log.setLevel(logging.DEBUG)
try:
sys.path.append(os.path.dirname(__file__))
from langinfo_go import GoLangInfo
except:
class GoLangInfo:
reserved_keywords = set([])
predeclared_identifiers = set([])
predeclared_functions = set([])
default_encoding = "utf-8"
import styles
if lang not in styles.StateMap:
map = styles.StateMap['C++'].copy()
styles.addSharedStyles(map)
styles.StateMap[lang] = map
finally:
sys.path.pop()
#---- Lexer class
class GoLexer(Lexer):
lang = lang
def __init__(self):
self._properties = SilverCity.PropertySet()
self._lexer = SilverCity.find_lexer_module_by_id(ScintillaConstants.SCLEX_CPP)
self._keyword_lists = [
SilverCity.WordList(' '.join(sorted(GoLangInfo.reserved_keywords))),
SilverCity.WordList(' '.join(
sorted(GoLangInfo.predeclared_identifiers.
union(GoLangInfo.predeclared_functions)))),
]
#---- LangIntel class
class GoLangIntel(CitadelLangIntel,
ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin,
PythonCITDLExtractorMixin):
lang = lang
citdl_from_literal_type = {"string": "string"}
calltip_trg_chars = tuple('(')
trg_chars = tuple(' (."')
completion_name_mapping = {
'var': 'variable',
'func': 'function',
'package': 'module',
'type': 'class',
'const': 'constant',
}
def codeintel_type_from_completion_data(self, completion_entry):
"""Given a dictionary containing 'class' and 'type' keys return a
codeintel type. Used for selecting icon in completion list.
"""
completion_type = self.completion_name_mapping.get(completion_entry['class']) or completion_entry['class']
if completion_entry['type'].startswith('[]'):
completion_type = '@variable'
elif completion_entry['type'].startswith('map['):
completion_type = '%variable'
return completion_type
def trg_from_pos(self, buf, pos, implicit=True,
lang=None):
log.debug("trg_from_pos(pos=%r)", pos)
if pos < 2:
return None
accessor = buf.accessor
last_pos = pos - 1
last_char = accessor.char_at_pos(last_pos)
if last_char == '.': # must be "complete-object-members" or None
log.debug(" triggered 'complete-object-members'")
return Trigger(self.lang, TRG_FORM_CPLN,
"object-members", pos, implicit)
elif last_char == '(':
log.debug(" triggered 'calltip-call-signature'")
return Trigger(self.lang, TRG_FORM_CALLTIP, "call-signature", pos, implicit)
elif last_char in '\'"`' or last_char == "@":
# Check if it's an import
log.debug(" checking for import statement")
ac = AccessorCache(accessor, pos, fetchsize=100)
prev_style = accessor.style_at_pos(last_pos-1)
if prev_style == SCE_C_STRING:
# It's the end of a string then, not what we are looking for.
return False
# Bug in Komodo 8 - peek at the previous style to ensure the cache
# is primed. No needed in Komodo 9 onwards.
p, ch, style = ac.peekPrevPosCharStyle()
log.debug(" p1 %r, ch %r, style %r", p, ch, style)
loops_left = 100
while loops_left:
loops_left -= 1
p, ch, style = ac.getPrecedingPosCharStyle(style)
log.debug(" p %r, ch %r, style %r", p, ch, style)
if p is None:
break
if style == SCE_C_WORD:
p, text = ac.getTextBackWithStyle(style)
log.debug(" p %r, text %r", p, text)
if text == "import":
log.debug(" triggered 'complete-imports'")
return Trigger(self.lang, TRG_FORM_CPLN, "imports", pos, implicit)
break
elif style not in (SCE_C_DEFAULT, SCE_C_OPERATOR, SCE_C_STRING,
SCE_C_COMMENT, SCE_C_COMMENTDOC, SCE_C_COMMENTLINE):
break
log.debug(" triggered 'complete-any'")
return Trigger(self.lang, TRG_FORM_CPLN, "any", pos, implicit)
def preceding_trg_from_pos(self, buf, pos, curr_pos, preceding_trg_terminators=None, DEBUG=False):
#DEBUG = True
if DEBUG:
log.debug("pos: %d", pos)
log.debug("ch: %r", buf.accessor.char_at_pos(pos))
log.debug("curr_pos: %d", curr_pos)
if pos != curr_pos and self._last_trg_type == "names":
# The last trigger type was a 3-char trigger "names", we must try
# triggering from the same point as before to get other available
# trigger types defined at the same poisition or before.
trg = ProgLangTriggerIntelMixin.preceding_trg_from_pos(
self, buf, pos+2, curr_pos, preceding_trg_terminators,
DEBUG=DEBUG)
else:
trg = ProgLangTriggerIntelMixin.preceding_trg_from_pos(
self, buf, pos, curr_pos, preceding_trg_terminators,
DEBUG=DEBUG)
names_trigger = None
style = None
if pos > 0:
accessor = buf.accessor
if pos == curr_pos:
# We actually care about whats left of the cursor.
pos -= 1
style = accessor.style_at_pos(pos)
if DEBUG:
style_names = buf.style_names_from_style_num(style)
log.debug(" style: %s (%s)", style, ", ".join(style_names))
if style in (1,2):
ac = AccessorCache(accessor, pos)
prev_pos, prev_ch, prev_style = ac.getPrecedingPosCharStyle(style)
if prev_style is not None and (pos - prev_pos) > 3:
# We need at least 3 character for proper completion handling.
names_trigger = self.trg_from_pos(buf, prev_pos + 4, implicit=False)
if DEBUG:
log.debug("trg: %r", trg)
log.debug("names_trigger: %r", names_trigger)
log.debug("last_trg_type: %r", self._last_trg_type)
if names_trigger:
if not trg:
trg = names_trigger
# Two triggers, choose the best one.
elif trg.pos == names_trigger.pos:
if self._last_trg_type != "names":
# The names trigger gets priority over the other trigger
# types, unless the previous trigger was also a names trg.
trg = names_trigger
elif trg.pos < names_trigger.pos:
trg = names_trigger
if trg:
self._last_trg_type = trg.type
return trg
def async_eval_at_trg(self, buf, trg, ctlr):
# if a definition lookup, use godef
log.debug('async_eval_at_trg:: %r' % (trg, ))
if trg.form == TRG_FORM_DEFN:
return self.lookup_defn(buf, trg, ctlr)
elif trg.name == "go-complete-imports":
return self.available_imports(buf, trg, ctlr)
# otherwise use gocode
return self.invoke_gocode(buf, trg, ctlr)
def lookup_defn(self, buf, trg, ctlr):
env = buf.env
godef_path = self._get_gotool('godef', env)
# We can't store the path and watch prefs because this isn't a
# true xpcom object.
if godef_path is None:
godef_path = 'godef'
cmd = [godef_path, '-i=true', '-t=true', '-f=%s' % buf.path, '-o=%s' % trg.pos]
log.debug("running [%s]", cmd)
p = process.ProcessOpen(cmd, env=buf.env.get_all_envvars())
output, error = p.communicate(buf.accessor.text)
if error:
log.debug("'gocode' stderr: [%s]", error)
raise CodeIntelError(error)
lines = output.splitlines()
log.debug(output)
defparts = lines[0].rsplit(":",2)
if len(defparts) == 2:
# current file
path = buf.path
line = defparts[0]
else:
# other file
path = defparts[0]
line = defparts[1]
name, typeDesc = lines[1].split(' ', 1)
d = Definition("Go",path,
blobname=None,
lpath=None,
name=name,
line=line,
ilk='function' if typeDesc.startswith('func') else typeDesc,
citdl=None,
signature=typeDesc,
doc='\n'.join(lines[1:]),
)
log.debug(d)
ctlr.start(buf, trg)
ctlr.set_defns([d])
ctlr.done("success")
_packages_from_exe = {}
def available_imports(self, buf, trg, ctlr):
env = buf.env
go_exe = self.get_go_exe(env)
if not go_exe:
raise CodeIntelError("Unable to locate go executable")
if go_exe not in self._packages_from_exe:
cmd = [go_exe, 'list', 'std']
cwd = None
if buf.path != "<Unsaved>":
cwd = os.path.dirname(buf.path)
env_vars = env.get_all_envvars()
log.debug("running cmd %r", cmd)
try:
p = process.ProcessOpen(cmd, cwd=cwd, env=env_vars)
except OSError as e:
raise CodeIntelError("Error executing '%s': %s" % (cmd, e))
output, error = p.communicate()
if error:
log.warn("cmd %r error [%s]", cmd, error)
raise CodeIntelError(error)
package_names = [x.strip() for x in output.splitlines() if x]
log.debug("retrieved %d package names", len(package_names))
self._packages_from_exe[go_exe] = package_names
stdlib_imports = self._packages_from_exe.get(go_exe, [])
ctlr.start(buf, trg)
ctlr.set_cplns([("import", name) for name in stdlib_imports])
ctlr.done("success")
def invoke_gocode(self, buf, trg, ctlr):
pos = trg.pos
if trg.type == "call-signature":
pos = pos - 1
env = buf.env
gocode_path = self._get_gotool('gocode', buf.env)
# We can't store the path and watch prefs because this isn't a
# true xpcom object.
if gocode_path is None:
gocode_path = 'gocode'
cmd = [gocode_path, '-f=json', 'autocomplete', buf.path, '%s' % pos]
log.debug("running [%s]", cmd)
try:
p = process.ProcessOpen(cmd, env=env.get_all_envvars())
except OSError as e:
log.error("Error executing '%s': %s", cmd[0], e)
return
output, error = p.communicate(buf.accessor.text)
if error:
log.warn("'%s' stderr: [%s]", cmd[0], error)
try:
completion_data = json.loads(output)
log.debug('full completion_data: %r', completion_data)
completion_data = completion_data[1]
except IndexError:
# exit on empty gocode output
return
except ValueError as e:
log.exception('Exception while parsing json')
return
ctlr.start(buf, trg)
completion_data = [x for x in completion_data if x['class'] != 'PANIC'] # remove PANIC entries if present
if not completion_data:
# Only contained PANIC entries.
ctlr.error("no valid response from gocode: check gocode is running")
ctlr.done("error")
return
if trg.type == "object-members":
ctlr.set_cplns([(self.codeintel_type_from_completion_data(entry), entry['name']) for entry in completion_data])
ctlr.done("success")
elif trg.type == "call-signature":
entry = completion_data[0]
ctlr.set_calltips(['%s %s' % (entry['name'], entry['type'])])
ctlr.done("success")
elif trg.type == "any" and trg.implicit == False:
ctlr.set_cplns([(self.codeintel_type_from_completion_data(entry), entry['name']) for entry in completion_data])
ctlr.done("success")
def get_go_exe(self, env):
golang_path = env.get_pref("golangDefaultLocation", "")
if golang_path and golang_path != "":
return golang_path
path = [d.strip()
for d in env.get_envvar("PATH", "").split(os.pathsep)
if d.strip()]
try:
return which.which('go', path=path)
except which.WhichError:
return 'go' # Might as well go for broke
def _get_gotool(self, tool_name, env):
# First try the pref
# Then try which
# Finally try relative to the golang executable
tool_path = env.get_pref(tool_name + "DefaultLocation", "")
if tool_path and os.path.exists(tool_path):
return tool_path
path = [d.strip()
for d in env.get_envvar("PATH", "").split(os.pathsep)
if d.strip()]
try:
return which.which(tool_name, path=path)
except which.WhichError:
pass
go_exe = self.get_go_exe(env)
if go_exe:
ext = sys.platform.startswith("win") and ".exe" or ""
tool_path = os.path.join(os.path.dirname(go_exe), tool_name + ext)
if os.path.exists(tool_path):
return tool_path
return None
#---- Buffer class
class GoBuffer(CitadelBuffer):
lang = lang
# '/' removed as import packages use that
cpln_fillup_chars = "~`!@#$%^&()-=+{}[]|\\;:'\",.<>? "
cpln_stop_chars = "~`!@#$%^&*()-=+{}[]|\\;:'\",.<>? "
ssl_lang = lang
# The ScintillaConstants all start with this prefix:
sce_prefixes = ["SCE_C_"]
#---- CILE Driver class
class GoCILEDriver(CILEDriver):
lang = lang
_gooutline_executable_and_error = None
@LazyClassAttribute
def golib_dir(self):
ext_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return os.path.join(ext_path, "golib")
def compile_gooutline(self, buf):
go_exe = buf.langintel.get_go_exe(buf.env)
if not go_exe:
raise CodeIntelError("Unable to locate go executable")
if self._gooutline_executable_and_error is None or go_exe != self._gooutline_executable_and_error[0]:
self._gooutline_executable_and_error = (go_exe, None, "Unknown Error")
import tempfile
tempdir = tempfile.gettempdir()
outline_exe = os.path.join(tempdir, "outline")
if sys.platform.startswith("win"):
outline_exe += ".exe"
outline_src = os.path.join(self.golib_dir, "outline.go")
cmd = [go_exe, "build", outline_src]
cwd = tempdir
env = buf.env.get_all_envvars()
try:
# Compile the executable.
p = process.ProcessOpen(cmd, cwd=cwd, env=env, stdin=None)
output, error = p.communicate()
if error:
log.warn("'%s' stderr: [%s]", cmd, error)
# Remember the executable.
self._gooutline_executable_and_error = (go_exe, outline_exe, None)
except Exception as ex:
error_message = "Unable to compile 'outline.go'" + str(ex)
self._gooutline_executable_and_error = (go_exe, None, error_message)
if self._gooutline_executable_and_error[1]:
return self._gooutline_executable_and_error[1]
raise CodeIntelError(self._gooutline_executable_and_error[2])
def scan_purelang(self, buf, mtime=None, lang="Go"):
"""Scan the given GoBuffer return an ElementTree (conforming
to the CIX schema) giving a summary of its code elements.
@param buf {GoBuffer} is the Go buffer to scan
@param mtime {int} is a modified time for the file (in seconds since
the "epoch"). If it is not specified the _current_ time is used.
Note that the default is not to stat() the file and use that
because the given content might not reflect the saved file state.
"""
# Dev Notes:
# - This stub implementation of the Go CILE return an "empty"
# summary for the given content, i.e. CIX content that says "there
# are no code elements in this Go content".
# - Use the following command (in the extension source dir) to
# debug/test your scanner:
# codeintel scan -p -l Go <example-Go-file>
# "codeintel" is a script available in the Komodo SDK.
log.info("scan '%s'", buf.path)
if mtime is None:
mtime = int(time.time())
# The 'path' attribute must use normalized dir separators.
if sys.platform.startswith("win"):
path = buf.path.replace('\\', '/')
else:
path = buf.path
try:
gooutline_exe_path = self.compile_gooutline(buf)
except Exception as e:
log.error("Error compiling outline: %s", e)
raise
cmd = [gooutline_exe_path, buf.path]
env = buf.env.get_all_envvars()
log.debug("running [%s]", cmd)
try:
p = process.ProcessOpen(cmd, env=env)
except OSError as e:
log.error("Error executing '%s': %s", cmd, e)
return
output, error = p.communicate()
if error:
log.warn("'%s' stderr: [%s]", cmd[0], error)
xml = '<codeintel version="2.0">\n' + output + '</codeintel>'
return tree_from_cix(xml)
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(
lang,
silvercity_lexer=GoLexer(),
buf_class=GoBuffer,
langintel_class=GoLangIntel,
import_handler_class=None,
cile_driver_class=GoCILEDriver,
is_cpln_lang=True) | PypiClean |
/Beeswarm-0.7.18.tar.gz/Beeswarm-0.7.18/beeswarm/server/webapp/static/js/modernizr-2.6.2-respond-1.1.0.min.js | ;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d<e;d++)u[c[d]]=c[d]in k;return u.list&&(u.list=!!b.createElement("datalist")&&!!a.HTMLDataListElement),u}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),e.inputtypes=function(a){for(var d=0,e,f,h,i=a.length;d<i;d++)k.setAttribute("type",f=a[d]),e=k.type!=="text",e&&(k.value=l,k.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(f)&&k.style.WebkitAppearance!==c?(g.appendChild(k),h=b.defaultView,e=h.getComputedStyle&&h.getComputedStyle(k,null).WebkitAppearance!=="textfield"&&k.offsetHeight!==0,g.removeChild(k)):/^(search|tel)$/.test(f)||(/^(url|email)$/.test(f)?e=k.checkValidity&&k.checkValidity()===!1:e=k.value!=l)),t[a[d]]=!!e;return t}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d="2.6.2",e={},f=!0,g=b.documentElement,h="modernizr",i=b.createElement(h),j=i.style,k=b.createElement("input"),l=":)",m={}.toString,n=" -webkit- -moz- -o- -ms- ".split(" "),o="Webkit Moz O ms",p=o.split(" "),q=o.toLowerCase().split(" "),r={svg:"http://www.w3.org/2000/svg"},s={},t={},u={},v=[],w=v.slice,x,y=function(a,c,d,e){var f,i,j,k,l=b.createElement("div"),m=b.body,n=m||b.createElement("body");if(parseInt(d,10))while(d--)j=b.createElement("div"),j.id=e?e[d]:h+(d+1),l.appendChild(j);return f=["­",'<style id="s',h,'">',a,"</style>"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e<g;e++)d.createElement(f[e]);return d}function p(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?n(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+l().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function q(a){a||(a=b);var c=m(a);return r.shivCSS&&!f&&!c.hasCSS&&(c.hasCSS=!!k(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),j||p(a,c),a}var c=a.html5||{},d=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,e=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,f,g="_html5shiv",h=0,i={},j;(function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f<d;f++)g=a[f].split("="),(e=z[g.shift()])&&(c=e(c,g));for(f=0;f<b;f++)c=x[f](c);return c}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(y[i.url]?i.noexec=!0:y[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),y[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}),g(a,j,b,0,h);else if(Object(a)===a)for(n in m=function(){var b=0,c;for(c in a)a.hasOwnProperty(c)&&b++;return b}(),a)a.hasOwnProperty(n)&&(!c&&!--m&&(d(j)?j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}:j[n]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),l()}}(k[n])),g(a[n],j,b,n,h))}else!c&&l()}var h=!!a.test,i=a.load||a.both,j=a.callback||f,k=j,l=a.complete||f,m,n;c(h?a.yep:a.nope,!!i),i&&c(i)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(w(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):w(j)?B(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},B.addPrefix=function(a,b){z[a]=b},B.addFilter=function(a){x.push(a)},B.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",A=function(){b.removeEventListener("DOMContentLoaded",A,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k=b.createElement("script"),l,o,e=e||B.errorTimeout;k.src=a;for(o in d)k.setAttribute(o,d[o]);c=j?h:c||f,k.onreadystatechange=k.onload=function(){!l&&g(k.readyState)&&(l=1,c(),k.onload=k.onreadystatechange=null)},m(function(){l||(l=1,c(1))},e),i?k.onload():n.parentNode.insertBefore(k,n)},a.yepnope.injectCss=function(a,c,d,e,g,i){var e=b.createElement("link"),j,c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(n.parentNode.insertBefore(e,n),m(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
/*! matchMedia() polyfill - Test a CSS media type/query in JS. Authors & copyright (c) 2012: Scott Jehl, Paul Irish, Nicholas Zakas. Dual MIT/BSD license */
/*! NOTE: If you're already including a window.matchMedia polyfill via Modernizr or otherwise, you don't need this part */
window.matchMedia=window.matchMedia||(function(e,f){var c,a=e.documentElement,b=a.firstElementChild||a.firstChild,d=e.createElement("body"),g=e.createElement("div");g.id="mq-test-1";g.style.cssText="position:absolute;top:-100em";d.style.background="none";d.appendChild(g);return function(h){g.innerHTML='­<style media="'+h+'"> #mq-test-1 { width: 42px; }</style>';a.insertBefore(d,b);c=g.offsetWidth==42;a.removeChild(d);return{matches:c,media:h}}})(document);
/*! Respond.js v1.1.0: min/max-width media query polyfill. (c) Scott Jehl. MIT/GPLv2 Lic. j.mp/respondjs */
(function(e){e.respond={};respond.update=function(){};respond.mediaQueriesSupported=e.matchMedia&&e.matchMedia("only all").matches;if(respond.mediaQueriesSupported){return}var w=e.document,s=w.documentElement,i=[],k=[],q=[],o={},h=30,f=w.getElementsByTagName("head")[0]||s,g=w.getElementsByTagName("base")[0],b=f.getElementsByTagName("link"),d=[],a=function(){var D=b,y=D.length,B=0,A,z,C,x;for(;B<y;B++){A=D[B],z=A.href,C=A.media,x=A.rel&&A.rel.toLowerCase()==="stylesheet";if(!!z&&x&&!o[z]){if(A.styleSheet&&A.styleSheet.rawCssText){m(A.styleSheet.rawCssText,z,C);o[z]=true}else{if((!/^([a-zA-Z:]*\/\/)/.test(z)&&!g)||z.replace(RegExp.$1,"").split("/")[0]===e.location.host){d.push({href:z,media:C})}}}}u()},u=function(){if(d.length){var x=d.shift();n(x.href,function(y){m(y,x.href,x.media);o[x.href]=true;u()})}},m=function(I,x,z){var G=I.match(/@media[^\{]+\{([^\{\}]*\{[^\}\{]*\})+/gi),J=G&&G.length||0,x=x.substring(0,x.lastIndexOf("/")),y=function(K){return K.replace(/(url\()['"]?([^\/\)'"][^:\)'"]+)['"]?(\))/g,"$1"+x+"$2$3")},A=!J&&z,D=0,C,E,F,B,H;if(x.length){x+="/"}if(A){J=1}for(;D<J;D++){C=0;if(A){E=z;k.push(y(I))}else{E=G[D].match(/@media *([^\{]+)\{([\S\s]+?)$/)&&RegExp.$1;k.push(RegExp.$2&&y(RegExp.$2))}B=E.split(",");H=B.length;for(;C<H;C++){F=B[C];i.push({media:F.split("(")[0].match(/(only\s+)?([a-zA-Z]+)\s?/)&&RegExp.$2||"all",rules:k.length-1,hasquery:F.indexOf("(")>-1,minw:F.match(/\(min\-width:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/)&&parseFloat(RegExp.$1)+(RegExp.$2||""),maxw:F.match(/\(max\-width:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/)&&parseFloat(RegExp.$1)+(RegExp.$2||"")})}}j()},l,r,v=function(){var z,A=w.createElement("div"),x=w.body,y=false;A.style.cssText="position:absolute;font-size:1em;width:1em";if(!x){x=y=w.createElement("body");x.style.background="none"}x.appendChild(A);s.insertBefore(x,s.firstChild);z=A.offsetWidth;if(y){s.removeChild(x)}else{x.removeChild(A)}z=p=parseFloat(z);return z},p,j=function(I){var x="clientWidth",B=s[x],H=w.compatMode==="CSS1Compat"&&B||w.body[x]||B,D={},G=b[b.length-1],z=(new Date()).getTime();if(I&&l&&z-l<h){clearTimeout(r);r=setTimeout(j,h);return}else{l=z}for(var E in i){var K=i[E],C=K.minw,J=K.maxw,A=C===null,L=J===null,y="em";if(!!C){C=parseFloat(C)*(C.indexOf(y)>-1?(p||v()):1)}if(!!J){J=parseFloat(J)*(J.indexOf(y)>-1?(p||v()):1)}if(!K.hasquery||(!A||!L)&&(A||H>=C)&&(L||H<=J)){if(!D[K.media]){D[K.media]=[]}D[K.media].push(k[K.rules])}}for(var E in q){if(q[E]&&q[E].parentNode===f){f.removeChild(q[E])}}for(var E in D){var M=w.createElement("style"),F=D[E].join("\n");M.type="text/css";M.media=E;f.insertBefore(M,G.nextSibling);if(M.styleSheet){M.styleSheet.cssText=F}else{M.appendChild(w.createTextNode(F))}q.push(M)}},n=function(x,z){var y=c();if(!y){return}y.open("GET",x,true);y.onreadystatechange=function(){if(y.readyState!=4||y.status!=200&&y.status!=304){return}z(y.responseText)};if(y.readyState==4){return}y.send(null)},c=(function(){var x=false;try{x=new XMLHttpRequest()}catch(y){x=new ActiveXObject("Microsoft.XMLHTTP")}return function(){return x}})();a();respond.update=a;function t(){j(true)}if(e.addEventListener){e.addEventListener("resize",t,false)}else{if(e.attachEvent){e.attachEvent("onresize",t)}}})(this); | PypiClean |
/OASYS1-APS-Extensions-1.0.87.tar.gz/OASYS1-APS-Extensions-1.0.87/orangecontrib/aps/shadow/widgets/extension/ow_power_plot_xy.py |
import os, sys
import time
import numpy
import scipy.ndimage.filters as filters
import scipy.ndimage.interpolation as interpolation
import scipy.ndimage.fourier as fourier
from PyQt5.QtWidgets import QMessageBox, QFileDialog, QInputDialog
from PyQt5.QtGui import QTextCursor
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import EmittingStream
from orangecontrib.shadow.util.shadow_objects import ShadowBeam
from orangecontrib.shadow.util.shadow_util import ShadowCongruence, ShadowPlot
from orangecontrib.shadow.widgets.gui.ow_automatic_element import AutomaticElement
from orangecontrib.aps.shadow.util.gui import PowerPlotXYWidget
class PowerPlotXY(AutomaticElement):
name = "Power Plot XY"
description = "Display Data Tools: Power Plot XY"
icon = "icons/plot_xy_power.png"
maintainer = "Luca Rebuffi"
maintainer_email = "lrebuffi(@at@)anl.gov"
priority = 5.1
category = "Display Data Tools"
keywords = ["data", "file", "load", "read"]
inputs = [("Input Beam", ShadowBeam, "setBeam")]
IMAGE_WIDTH = 878
IMAGE_HEIGHT = 570
want_main_area=1
plot_canvas=None
input_beam=None
image_plane=Setting(0)
image_plane_new_position=Setting(10.0)
image_plane_rel_abs_position=Setting(0)
x_column_index=Setting(0)
y_column_index=Setting(2)
x_range=Setting(0)
x_range_min=Setting(0.0)
x_range_max=Setting(0.0)
y_range=Setting(0)
y_range_min=Setting(0.0)
y_range_max=Setting(0.0)
rays=Setting(1)
number_of_bins=Setting(100)
title=Setting("X,Z")
keep_result=Setting(1)
autosave_partial_results = Setting(0)
autosave = Setting(0)
autosave_file_name = Setting("autosave_power_density.hdf5")
kind_of_calculation = Setting(0)
replace_poor_statistic = Setting(0)
good_rays_limit = Setting(100)
center_x = Setting(0.0)
center_y = Setting(0.0)
sigma_x = Setting(0.0)
sigma_y = Setting(0.0)
gamma = Setting(0.0)
loaded_plot_file_name = "<load hdf5 file>"
new_nbins_h = Setting(25)
new_nbins_v = Setting(25)
filter = Setting(3)
filter_sigma_h = Setting(1.0)
filter_sigma_v = Setting(1.0)
filter_mode = Setting(0)
filter_cval = Setting(0.0)
filter_spline_order = Setting(2)
masking_level = Setting(1e-3)
cumulated_ticket=None
plotted_ticket = None
energy_min = None
energy_max = None
energy_step = None
total_power = None
cumulated_total_power = None
plotted_ticket_original = None
view_type=Setting(1)
autosave_file = None
def __init__(self):
super().__init__(show_automatic_box=False)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
gui.button(button_box, self, "Plot Data", callback=self.plot_cumulated_data, height=45)
gui.button(button_box, self, "Save Plot", callback=self.save_cumulated_data, height=45)
gui.separator(self.controlArea, 10)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
# graph tab
tab_set = oasysgui.createTabPage(self.tabs_setting, "Plot Settings")
tab_gen = oasysgui.createTabPage(self.tabs_setting, "Histogram Settings")
tab_post = oasysgui.createTabPage(self.tabs_setting, "Post Processing")
screen_box = oasysgui.widgetBox(tab_set, "Screen Position Settings", addSpace=True, orientation="vertical", height=120)
self.image_plane_combo = gui.comboBox(screen_box, self, "image_plane", label="Position of the Image",
items=["On Image Plane", "Retraced"], labelWidth=260,
callback=self.set_ImagePlane, sendSelectedValue=False, orientation="horizontal")
self.image_plane_box = oasysgui.widgetBox(screen_box, "", addSpace=False, orientation="vertical", height=50)
self.image_plane_box_empty = oasysgui.widgetBox(screen_box, "", addSpace=False, orientation="vertical", height=50)
oasysgui.lineEdit(self.image_plane_box, self, "image_plane_new_position", "Image Plane new Position", labelWidth=220, valueType=float, orientation="horizontal")
gui.comboBox(self.image_plane_box, self, "image_plane_rel_abs_position", label="Position Type", labelWidth=250,
items=["Absolute", "Relative"], sendSelectedValue=False, orientation="horizontal")
self.set_ImagePlane()
general_box = oasysgui.widgetBox(tab_set, "Variables Settings", addSpace=True, orientation="vertical", height=350)
self.x_column = gui.comboBox(general_box, self, "x_column_index", label="X Column",labelWidth=70,
items=["1: X",
"2: Y",
"3: Z",
],
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(general_box, self, "x_range", label="X Range", labelWidth=250,
items=["<Default>",
"Set.."],
callback=self.set_XRange, sendSelectedValue=False, orientation="horizontal")
self.xrange_box = oasysgui.widgetBox(general_box, "", addSpace=True, orientation="vertical", height=100)
self.xrange_box_empty = oasysgui.widgetBox(general_box, "", addSpace=True, orientation="vertical", height=100)
oasysgui.lineEdit(self.xrange_box, self, "x_range_min", "X min", labelWidth=220, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.xrange_box, self, "x_range_max", "X max", labelWidth=220, valueType=float, orientation="horizontal")
self.set_XRange()
self.y_column = gui.comboBox(general_box, self, "y_column_index", label="Y Column",labelWidth=70,
items=["1: X",
"2: Y",
"3: Z",
],
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(general_box, self, "y_range", label="Y Range",labelWidth=250,
items=["<Default>",
"Set.."],
callback=self.set_YRange, sendSelectedValue=False, orientation="horizontal")
self.yrange_box = oasysgui.widgetBox(general_box, "", addSpace=True, orientation="vertical", height=100)
self.yrange_box_empty = oasysgui.widgetBox(general_box, "", addSpace=True, orientation="vertical", height=100)
oasysgui.lineEdit(self.yrange_box, self, "y_range_min", "Y min", labelWidth=220, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.yrange_box, self, "y_range_max", "Y max", labelWidth=220, valueType=float, orientation="horizontal")
self.set_YRange()
self.cb_rays = gui.comboBox(general_box, self, "rays", label="Power", labelWidth=250,
items=["Transmitted", "Absorbed (Lost)", "Absorbed (Still Good)"],
sendSelectedValue=False, orientation="horizontal")
autosave_box = oasysgui.widgetBox(tab_gen, "Autosave", addSpace=True, orientation="vertical", height=85)
gui.comboBox(autosave_box, self, "autosave", label="Save automatically plot into file", labelWidth=250,
items=["No", "Yes"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_autosave)
self.autosave_box_1 = oasysgui.widgetBox(autosave_box, "", addSpace=False, orientation="horizontal", height=25)
self.autosave_box_2 = oasysgui.widgetBox(autosave_box, "", addSpace=False, orientation="horizontal", height=25)
self.le_autosave_file_name = oasysgui.lineEdit(self.autosave_box_1, self, "autosave_file_name", "File Name", labelWidth=100, valueType=str, orientation="horizontal")
gui.button(self.autosave_box_1, self, "...", callback=self.selectAutosaveFile)
incremental_box = oasysgui.widgetBox(tab_gen, "Incremental Result", addSpace=True, orientation="vertical", height=120)
gui.comboBox(incremental_box, self, "keep_result", label="Keep Result", labelWidth=250,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal", callback=self.set_autosave)
self.cb_autosave_partial_results = gui.comboBox(incremental_box, self, "autosave_partial_results", label="Save partial plots into file", labelWidth=250,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
gui.button(incremental_box, self, "Clear", callback=self.clearResults)
self.set_autosave()
histograms_box = oasysgui.widgetBox(tab_gen, "Histograms settings", addSpace=True, orientation="vertical", height=270)
oasysgui.lineEdit(histograms_box, self, "number_of_bins", "Number of Bins", labelWidth=250, valueType=int, orientation="horizontal")
gui.separator(histograms_box)
gui.comboBox(histograms_box, self, "kind_of_calculation", label="Kind of Calculation", labelWidth=200,
items=["From Rays", "Flat Distribution", "Gaussian Distribution", "Lorentzian Distribution"], sendSelectedValue=False, orientation="horizontal", callback=self.set_kind_of_calculation)
self.poor_statics_cb = gui.comboBox(histograms_box, self, "replace_poor_statistic", label="Activate on Poor Statistics", labelWidth=250,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal", callback=self.set_manage_poor_statistics)
self.poor_statistics_box_1 = oasysgui.widgetBox(histograms_box, "", addSpace=False, orientation="vertical", height=30)
self.poor_statistics_box_2 = oasysgui.widgetBox(histograms_box, "", addSpace=False, orientation="vertical", height=30)
self.le_autosave_file_name = oasysgui.lineEdit(self.poor_statistics_box_1, self, "good_rays_limit", "Good Rays Limit", labelWidth=100, valueType=int, orientation="horizontal")
self.kind_of_calculation_box_1 = oasysgui.widgetBox(histograms_box, "", addSpace=False, orientation="vertical", height=110)
self.kind_of_calculation_box_2 = oasysgui.widgetBox(histograms_box, "", addSpace=False, orientation="vertical", height=110)
self.kind_of_calculation_box_3 = oasysgui.widgetBox(histograms_box, "", addSpace=False, orientation="vertical", height=110)
self.le_g_sigma_x = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, "sigma_x", "Sigma H", labelWidth=100, valueType=float, orientation="horizontal")
self.le_g_sigma_y = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, "sigma_y", "Sigma V", labelWidth=100, valueType=float, orientation="horizontal")
self.le_g_center_x = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, "center_x", "Center H", labelWidth=100, valueType=float, orientation="horizontal")
self.le_g_center_y = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, "center_y", "Center V", labelWidth=100, valueType=float, orientation="horizontal")
self.le_l_gamma = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, "gamma", "Gamma", labelWidth=100, valueType=float, orientation="horizontal")
self.le_l_center_x = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, "center_x", "Center H", labelWidth=100, valueType=float, orientation="horizontal")
self.le_l_center_y = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, "center_y", "Center V", labelWidth=100, valueType=float, orientation="horizontal")
self.set_kind_of_calculation()
# post porcessing
post_box = oasysgui.widgetBox(tab_post, "Post Processing Setting", addSpace=False, orientation="vertical", height=500)
post_box_1 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="horizontal", height=25)
self.le_loaded_plot_file_name = oasysgui.lineEdit(post_box_1, self, "loaded_plot_file_name", "Loaded File", labelWidth=100, valueType=str, orientation="horizontal")
gui.button(post_box_1, self, "...", callback=self.selectPlotFile)
gui.separator(post_box)
button_box = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical")
gui.button(button_box, self, "Reset", callback=self.reloadPlot, height=35)
gui.separator(button_box)
gui.button(button_box, self, "Invert", callback=self.invertPlot, height=35)
gui.separator(post_box)
button_box = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="horizontal")
gui.button(button_box, self, "Rebin Plot", callback=self.rebinPlot, height=35)
post_box_0 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical", height=60)
oasysgui.lineEdit(post_box_0, self, "new_nbins_h", "Nr. Bins H", labelWidth=200, valueType=int, orientation="horizontal")
oasysgui.lineEdit(post_box_0, self, "new_nbins_v", "Nr. Bins V", labelWidth=200, valueType=int, orientation="horizontal")
button_box = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="horizontal")
gui.button(button_box, self, "Smooth Plot", callback=self.smoothPlot, height=35)
gui.separator(post_box)
gui.comboBox(post_box, self, "filter", label="Filter", labelWidth=200,
items=["Gaussian",
"Spline",
"Uniform",
"Fourier-Gaussian",
"Fourier-Ellipsoid",
"Fourier-Uniform",
"Fill Holes"
], sendSelectedValue=False, orientation="horizontal", callback=self.set_Filter)
self.post_box_1 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical", height=110)
self.post_box_2 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical", height=110)
self.post_box_3 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical", height=110)
self.post_box_4 = oasysgui.widgetBox(post_box, "", addSpace=False, orientation="vertical", height=110)
oasysgui.lineEdit(self.post_box_1, self, "filter_sigma_h", "Sigma/Size H", labelWidth=200, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.post_box_1, self, "filter_sigma_v", "Sigma/Size V", labelWidth=200, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.post_box_2, self, "filter_sigma_h", "Sigma/Size H", labelWidth=200, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.post_box_2, self, "filter_sigma_v", "Sigma/Size V", labelWidth=200, valueType=float, orientation="horizontal")
self.cb_filter_mode = gui.comboBox(self.post_box_2, self, "filter_mode", label="Mode", labelWidth=200,
items=["reflect", "constant", "nearest", "mirror", "wrap"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_FilterMode)
self.le_filter_cval = oasysgui.lineEdit(self.post_box_2, self, "filter_cval", "Constant Value", labelWidth=250, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.post_box_3, self, "filter_spline_order", "Spline Order", labelWidth=250, valueType=int, orientation="horizontal")
gui.separator(post_box)
oasysgui.lineEdit(post_box, self, "masking_level", "Mask if < factor of max value", labelWidth=250, valueType=float, orientation="horizontal")
self.set_Filter()
self.main_tabs = oasysgui.tabWidget(self.mainArea)
plot_tab = oasysgui.createTabPage(self.main_tabs, "Plots")
out_tab = oasysgui.createTabPage(self.main_tabs, "Output")
view_box = oasysgui.widgetBox(plot_tab, "Plotting", addSpace=False, orientation="vertical", width=self.IMAGE_WIDTH)
view_box_1 = oasysgui.widgetBox(view_box, "", addSpace=False, orientation="vertical", width=350)
gui.comboBox(view_box_1, self, "view_type", label="Plot Accumulated Results", labelWidth=320,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
self.image_box = gui.widgetBox(plot_tab, "Plot Result", addSpace=True, orientation="vertical")
self.image_box.setFixedHeight(self.IMAGE_HEIGHT)
self.image_box.setFixedWidth(self.IMAGE_WIDTH)
self.shadow_output = oasysgui.textArea(height=580, width=800)
out_box = gui.widgetBox(out_tab, "System Output", addSpace=True, orientation="horizontal")
out_box.layout().addWidget(self.shadow_output)
def clearResults(self, interactive=True):
if not interactive: proceed = True
else: proceed = ConfirmDialog.confirmed(parent=self)
if proceed:
self.input_beam = None
self.cumulated_ticket = None
self.plotted_ticket = None
self.energy_min = None
self.energy_max = None
self.energy_step = None
self.total_power = None
self.cumulated_total_power = None
if not self.autosave_file is None:
self.autosave_file.close()
self.autosave_file = None
if not self.plot_canvas is None:
self.plot_canvas.clear()
def set_kind_of_calculation(self):
self.kind_of_calculation_box_1.setVisible(self.kind_of_calculation<=1)
self.kind_of_calculation_box_2.setVisible(self.kind_of_calculation==2)
self.kind_of_calculation_box_3.setVisible(self.kind_of_calculation==3)
if self.kind_of_calculation > 0:
self.poor_statics_cb.setEnabled(True)
else:
self.poor_statics_cb.setEnabled(False)
self.replace_poor_statistic = 0
self.set_manage_poor_statistics()
def set_manage_poor_statistics(self):
self.poor_statistics_box_1.setVisible(self.replace_poor_statistic==1)
self.poor_statistics_box_2.setVisible(self.replace_poor_statistic==0)
def set_autosave(self):
self.autosave_box_1.setVisible(self.autosave==1)
self.autosave_box_2.setVisible(self.autosave==0)
self.cb_autosave_partial_results.setEnabled(self.autosave==1 and self.keep_result==1)
def set_ImagePlane(self):
self.image_plane_box.setVisible(self.image_plane==1)
self.image_plane_box_empty.setVisible(self.image_plane==0)
def set_XRange(self):
self.xrange_box.setVisible(self.x_range == 1)
self.xrange_box_empty.setVisible(self.x_range == 0)
def set_YRange(self):
self.yrange_box.setVisible(self.y_range == 1)
self.yrange_box_empty.setVisible(self.y_range == 0)
def set_Filter(self):
self.post_box_1.setVisible(3<=self.filter<=5)
self.post_box_2.setVisible(self.filter==0 or self.filter==2)
self.post_box_3.setVisible(self.filter==1 )
self.post_box_4.setVisible(self.filter==6)
if self.filter==0 or self.filter==2: self.set_FilterMode()
def set_FilterMode(self):
self.le_filter_cval.setEnabled(self.filter_mode==1)
def selectAutosaveFile(self):
self.le_autosave_file_name.setText(oasysgui.selectFileFromDialog(self, self.autosave_file_name, "Select File", file_extension_filter="HDF5 Files (*.hdf5 *.h5 *.hdf)"))
def selectPlotFile(self):
file_name = oasysgui.selectFileFromDialog(self, None, "Select File", file_extension_filter="HDF5 Files (*.hdf5 *.h5 *.hdf)")
if not file_name is None:
self.le_loaded_plot_file_name.setText(os.path.basename(os.path.normpath(file_name)))
plot_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(file_name), mode="r")
ticket = {}
ticket["histogram"], ticket["histogram_h"], ticket["histogram_v"], attributes = plot_file.get_last_plot(dataset_name="power_density")
ticket["bin_h_center"], ticket["bin_v_center"], ticket["h_label"], ticket["v_label"] = plot_file.get_coordinates()
ticket["intensity"] = attributes["intensity"]
ticket["nrays"] = attributes["total_rays"]
ticket["good_rays"] = attributes["good_rays"]
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
else:
if not self.plotted_ticket is None:
if QMessageBox.question(self, "Load Plot", "Merge with current Plot?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:
if ticket["histogram"].shape == self.plotted_ticket["histogram"].shape and \
ticket["bin_h_center"].shape == self.plotted_ticket["bin_h_center"].shape and \
ticket["bin_v_center"].shape == self.plotted_ticket["bin_v_center"].shape and \
ticket["bin_h_center"][0] == self.plotted_ticket["bin_h_center"][0] and \
ticket["bin_h_center"][-1] == self.plotted_ticket["bin_h_center"][-1] and \
ticket["bin_v_center"][0] == self.plotted_ticket["bin_v_center"][0] and \
ticket["bin_v_center"][-1] == self.plotted_ticket["bin_v_center"][-1]:
ticket["histogram"] += self.plotted_ticket["histogram"]
if QMessageBox.question(self, "Load Plot", "Average with current Plot?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:
ticket["histogram"] *= 0.5
else:
raise ValueError("The plots cannot be merged: the should have same dimensions and ranges")
cumulated_power_plot = numpy.sum(ticket["histogram"])*(ticket["bin_h_center"][1]-ticket["bin_h_center"][0])*(ticket["bin_v_center"][1]-ticket["bin_v_center"][0])
try:
energy_min=0.0
energy_max=0.0
energy_step=0.0
self.plot_canvas.cumulated_power_plot = cumulated_power_plot
self.plot_canvas.plot_power_density_ticket(ticket,
ticket["h_label"],
ticket["v_label"],
cumulated_total_power=0.0,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step)
self.cumulated_ticket = None
self.plotted_ticket = ticket
self.plotted_ticket_original = ticket.copy()
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
if self.IS_DEVELOP: raise e
def reloadPlot(self):
if not self.plotted_ticket_original is None:
ticket = self.plotted_ticket_original.copy()
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
cumulated_power_plot = numpy.sum(ticket["histogram"])*(ticket["bin_h_center"][1]-ticket["bin_h_center"][0])*(ticket["bin_v_center"][1]-ticket["bin_v_center"][0])
try:
energy_min=0.0
energy_max=0.0
energy_step=0.0
self.plot_canvas.cumulated_power_plot = cumulated_power_plot
self.plot_canvas.plot_power_density_ticket(ticket,
ticket["h_label"],
ticket["v_label"],
cumulated_total_power=0.0,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step)
self.plotted_ticket = ticket
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
if self.IS_DEVELOP: raise e
def invertPlot(self):
if not self.plotted_ticket is None:
try:
ticket = self.plotted_ticket.copy()
histogram = ticket["histogram"]
h_coord = ticket["bin_h_center"]
v_coord = ticket["bin_v_center"]
h_coord, v_coord, histogram = self.invert(h_coord, v_coord, histogram)
ticket["histogram"] = histogram
ticket["bin_h_center"] = h_coord
ticket["bin_v_center"] = v_coord
pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
cumulated_power_plot = numpy.sum(histogram)*pixel_area
energy_min = 0.0
energy_max = 0.0
energy_step = 0.0
self.plot_canvas.cumulated_power_plot = cumulated_power_plot
self.plot_canvas.plot_power_density_ticket(ticket,
ticket["v_label"],
ticket["h_label"],
cumulated_total_power=0.0,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step)
self.plotted_ticket = ticket
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
if self.IS_DEVELOP: raise e
def rebinPlot(self):
if not self.plotted_ticket is None:
try:
congruence.checkStrictlyPositiveNumber(self.new_nbins_h, "Nr. Bins H")
congruence.checkStrictlyPositiveNumber(self.new_nbins_v, "Nr. Bins V")
ticket = self.plotted_ticket.copy()
histogram = ticket["histogram"]
h_coord = ticket["bin_h_center"]
v_coord = ticket["bin_v_center"]
h_coord, v_coord, histogram = self.rebin(h_coord, v_coord, histogram, (int(self.new_nbins_h), int(self.new_nbins_v)))
ticket["histogram"] = histogram
ticket["bin_h_center"] = h_coord
ticket["bin_v_center"] = v_coord
pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
cumulated_power_plot = numpy.sum(histogram)*pixel_area
energy_min = 0.0
energy_max = 0.0
energy_step = 0.0
self.plot_canvas.cumulated_power_plot = cumulated_power_plot
self.plot_canvas.plot_power_density_ticket(ticket,
ticket["h_label"],
ticket["v_label"],
cumulated_total_power=0.0,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step)
self.plotted_ticket = ticket
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
if self.IS_DEVELOP: raise e
def smoothPlot(self):
if not self.plotted_ticket is None:
try:
if self.filter==0 or 2<=self.filter<=5:
congruence.checkStrictlyPositiveNumber(self.filter_sigma_h, "Sigma/Size H")
congruence.checkStrictlyPositiveNumber(self.filter_sigma_v, "Sigma/Size V")
if self.filter == 1: congruence.checkStrictlyPositiveNumber(self.filter_spline_order, "Spline Order")
ticket = self.plotted_ticket.copy()
mask = numpy.where(self.plotted_ticket["histogram"] <= self.plotted_ticket["histogram"].max()*self.masking_level)
histogram = ticket["histogram"]
h_coord = ticket["bin_h_center"]
v_coord = ticket["bin_v_center"]
norm = histogram.sum()
pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])
filter_mode = self.cb_filter_mode.currentText()
if self.filter == 0:
histogram = filters.gaussian_filter(histogram, sigma=(self.filter_sigma_h, self.filter_sigma_v), mode=filter_mode, cval=self.filter_cval)
elif self.filter == 1:
histogram = interpolation.spline_filter(histogram, order=int(self.filter_spline_order))
elif self.filter == 2:
histogram = filters.uniform_filter(histogram, size=(int(self.filter_sigma_h), int(self.filter_sigma_v)), mode=filter_mode, cval=self.filter_cval)
elif self.filter == 3:
histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_gaussian(numpy.fft.fft2(histogram), sigma=(self.filter_sigma_h, self.filter_sigma_v))))
elif self.filter == 4:
histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_ellipsoid(numpy.fft.fft2(histogram), size=(self.filter_sigma_h, self.filter_sigma_v))))
elif self.filter == 5:
histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_uniform(numpy.fft.fft2(histogram), size=(self.filter_sigma_h, self.filter_sigma_v))))
elif self.filter == 6:
histogram = self.apply_fill_holes(histogram)
histogram[mask] = 0.0
norm /= histogram.sum()
ticket["histogram"] = histogram*norm
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
cumulated_power_plot = numpy.sum(histogram)*pixel_area
energy_min=0.0
energy_max=0.0
energy_step=0.0
self.plot_canvas.cumulated_power_plot = cumulated_power_plot
self.plot_canvas.plot_power_density_ticket(ticket,
ticket["h_label"],
ticket["v_label"],
cumulated_total_power=0.0,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step)
self.plotted_ticket = ticket
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
if self.IS_DEVELOP: raise e
def rebin(self, x, y, z, new_shape):
shape = (new_shape[0], z.shape[0] // new_shape[0], new_shape[1], z.shape[1] // new_shape[1])
return numpy.linspace(x[0], x[-1], new_shape[0]), \
numpy.linspace(y[0], y[-1], new_shape[1]), \
z.reshape(shape).mean(-1).mean(1)
def invert(self, x, y, data):
return y, x, data.T
def apply_fill_holes(self, histogram):
from skimage.morphology import reconstruction
seed = numpy.copy(histogram)
seed[1:-1, 1:-1] = histogram.max()
filled = reconstruction(seed=seed, mask=histogram, method='erosion')
return filled*(histogram.sum()/filled.sum())
def save_cumulated_data(self):
file_name, _ = QFileDialog.getSaveFileName(self, "Save Current Plot", filter="HDF5 Files (*.hdf5 *.h5 *.hdf);;Text Files (*.dat *.txt)")
if not file_name is None and not file_name.strip()=="":
items = ("Hdf5 only", "Text only", "Hdf5 and Text")
item, ok = QInputDialog.getItem(self, "Select Output Format", "Formats: ", items, 2, False)
if ok and item:
if item == "Hdf5 only" or item == "Hdf5 and Text":
self.save_cumulated_data_hdf5(file_name)
if item == "Text only" or item == "Hdf5 and Text":
self.save_cumulated_data_txt(file_name)
def save_cumulated_data_hdf5(self, file_name):
if not self.plotted_ticket is None:
try:
save_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(os.path.splitext(file_name)[0] + ".hdf5"))
save_file.write_coordinates(self.plotted_ticket)
save_file.add_plot_xy(self.plotted_ticket, dataset_name="power_density")
save_file.close()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def save_cumulated_data_txt(self, file_name):
if not self.plotted_ticket is None:
try:
save_file = open(os.path.splitext(file_name)[0] + ".dat", "w")
x_values = self.plotted_ticket["bin_h_center"]
y_values = self.plotted_ticket["bin_v_center"]
z_values = self.plotted_ticket["histogram"]
for i in range(len(x_values)):
for j in range(len(y_values)):
row = str(x_values[i]) + " " + str(y_values[j]) + " " + str(z_values[i, j])
if i+j > 0: row = "\n" + row
save_file.write(row)
save_file.flush()
save_file.close()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def replace_fig(self, shadow_beam, var_x, var_y, xrange, yrange, nbins, nolost):
if self.plot_canvas is None:
self.plot_canvas = PowerPlotXYWidget()
self.image_box.layout().addWidget(self.plot_canvas)
try:
if self.autosave == 1:
if self.autosave_file is None:
self.autosave_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(self.autosave_file_name))
elif self.autosave_file.filename != congruence.checkFileName(self.autosave_file_name):
self.autosave_file.close()
self.autosave_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(self.autosave_file_name))
if self.keep_result == 1:
self.cumulated_ticket, last_ticket = self.plot_canvas.plot_power_density(shadow_beam, var_x, var_y,
self.total_power, self.cumulated_total_power,
self.energy_min, self.energy_max, self.energy_step,
nbins=nbins, xrange=xrange, yrange=yrange, nolost=nolost,
ticket_to_add=self.cumulated_ticket,
to_mm=self.workspace_units_to_mm,
show_image=self.view_type==1,
kind_of_calculation=self.kind_of_calculation,
replace_poor_statistic=self.replace_poor_statistic,
good_rays_limit=self.good_rays_limit,
center_x=self.center_x,
center_y=self.center_y,
sigma_x=self.sigma_x,
sigma_y=self.sigma_y,
gamma=self.gamma)
self.plotted_ticket = self.cumulated_ticket
self.plotted_ticket_original = self.plotted_ticket.copy()
if self.autosave == 1:
self.autosave_file.write_coordinates(self.cumulated_ticket)
dataset_name = "power_density"
self.autosave_file.add_plot_xy(self.cumulated_ticket, dataset_name=dataset_name)
if self.autosave_partial_results == 1:
if last_ticket is None:
self.autosave_file.add_plot_xy(self.cumulated_ticket,
plot_name="Energy Range: " + str(round(self.energy_max-self.energy_step, 2)) + "-" + str(round(self.energy_max, 2)),
dataset_name=dataset_name)
else:
self.autosave_file.add_plot_xy(last_ticket,
plot_name="Energy Range: " + str(round(self.energy_max-self.energy_step, 2)) + "-" + str(round(self.energy_max, 2)),
dataset_name=dataset_name)
self.autosave_file.flush()
else:
ticket, _ = self.plot_canvas.plot_power_density(shadow_beam, var_x, var_y,
self.total_power, self.cumulated_total_power,
self.energy_min, self.energy_max, self.energy_step,
nbins=nbins, xrange=xrange, yrange=yrange, nolost=nolost,
to_mm=self.workspace_units_to_mm,
show_image=self.view_type==1,
kind_of_calculation=self.kind_of_calculation,
replace_poor_statistic=self.replace_poor_statistic,
good_rays_limit=self.good_rays_limit,
center_x=self.center_x,
center_y=self.center_y,
sigma_x=self.sigma_x,
sigma_y=self.sigma_y,
gamma=self.gamma)
self.cumulated_ticket = None
self.plotted_ticket = ticket
self.plotted_ticket_original = self.plotted_ticket.copy()
if self.autosave == 1:
self.autosave_file.write_coordinates(ticket)
self.autosave_file.add_plot_xy(ticket, dataset_name="power_density")
self.autosave_file.flush()
except Exception as e:
if not self.IS_DEVELOP:
raise Exception("Data not plottable: No good rays or bad content")
else:
raise e
def plot_xy(self, var_x, var_y):
beam_to_plot = self.input_beam
if self.image_plane == 1:
new_shadow_beam = self.input_beam.duplicate(history=False)
if self.image_plane_rel_abs_position == 1: # relative
dist = self.image_plane_new_position
else: # absolute
if self.input_beam.historySize() == 0:
historyItem = None
else:
historyItem = self.input_beam.getOEHistory(oe_number=self.input_beam._oe_number)
if historyItem is None: image_plane = 0.0
elif self.input_beam._oe_number == 0: image_plane = 0.0
else: image_plane = historyItem._shadow_oe_end._oe.T_IMAGE
dist = self.image_plane_new_position - image_plane
self.retrace_beam(new_shadow_beam, dist)
beam_to_plot = new_shadow_beam
xrange, yrange = self.get_ranges()
self.replace_fig(beam_to_plot, var_x, var_y, xrange=xrange, yrange=yrange, nbins=int(self.number_of_bins), nolost=self.rays+1)
def get_ranges(self):
xrange = None
yrange = None
factor1 = self.workspace_units_to_mm
factor2 = self.workspace_units_to_mm
if self.x_range == 1:
congruence.checkLessThan(self.x_range_min, self.x_range_max, "X range min", "X range max")
xrange = [self.x_range_min / factor1, self.x_range_max / factor1]
if self.y_range == 1:
congruence.checkLessThan(self.y_range_min, self.y_range_max, "Y range min", "Y range max")
yrange = [self.y_range_min / factor2, self.y_range_max / factor2]
return xrange, yrange
def plot_cumulated_data(self):
if not self.cumulated_ticket is None:
self.plot_canvas.plot_power_density_ticket(ticket=self.cumulated_ticket,
var_x=self.x_column_index+1,
var_y=self.y_column_index+1,
cumulated_total_power=self.cumulated_total_power,
energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step,
show_image=self.view_type==1)
self.plotted_ticket_original = self.cumulated_ticket.copy()
def plot_results(self):
try:
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
if ShadowCongruence.checkEmptyBeam(self.input_beam):
self.number_of_bins = congruence.checkStrictlyPositiveNumber(self.number_of_bins, "Number of Bins")
self.plot_xy(self.x_column_index+1, self.y_column_index+1)
time.sleep(0.1) # prevents a misterious dead lock in the Orange cycle when refreshing the histogram
except Exception as exception:
QMessageBox.critical(self, "Error",
str(exception),
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def setBeam(self, input_beam):
self.cb_rays.setEnabled(True)
if not input_beam is None:
if not input_beam.scanned_variable_data is None and input_beam.scanned_variable_data.has_additional_parameter("total_power"):
self.input_beam = input_beam
self.total_power = self.input_beam.scanned_variable_data.get_additional_parameter("total_power")
if self.energy_min is None:
self.energy_min = self.input_beam.scanned_variable_data.get_scanned_variable_value()
self.cumulated_total_power = self.total_power
else:
self.cumulated_total_power += self.total_power
self.energy_step = self.input_beam.scanned_variable_data.get_additional_parameter("photon_energy_step")
self.energy_max = self.input_beam.scanned_variable_data.get_scanned_variable_value()
if self.input_beam.scanned_variable_data.has_additional_parameter("is_footprint"):
if self.input_beam.scanned_variable_data.get_additional_parameter("is_footprint"):
self.cb_rays.setEnabled(False)
self.rays = 0 # transmitted, absorbed doesn't make sense since is precalculated by footprint object
else:
self.cb_rays.setEnabled(True)
if ShadowCongruence.checkEmptyBeam(input_beam):
if ShadowCongruence.checkGoodBeam(input_beam):
self.plot_results()
def writeStdOut(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def retrace_beam(self, new_shadow_beam, dist):
new_shadow_beam._beam.retrace(dist) | PypiClean |
/GALFITools-1.0.0.tar.gz/GALFITools-1.0.0/src/galfitools/galin/xy2fits.py |
import numpy as np
import os
import subprocess as sp
import os.path
from astropy.io import fits
import argparse
# code to convert ASCII xy positions to FTIS mask
class xy2fits:
def MakeFits(self, ImageFile, AsciiFile, Value):
root_ext = os.path.splitext(AsciiFile)
namefile = root_ext[0]
maskfits = namefile + ".fits"
(ncol, nrow) = self.GetAxis(ImageFile)
self.MakeImage(maskfits, ncol, nrow)
X, Y = np.genfromtxt(AsciiFile, delimiter="", unpack=True)
X = X.astype(int)
Y = Y.astype(int)
X = X-1
Y = Y-1
self.PutPix(X,Y,Value,maskfits)
return maskfits
def GetAxis(self,Image):
"Get number of rows and columns from the image"
hdu = fits.open(Image)
ncol = hdu[0].header["NAXIS1"] # for hubble images
nrow = hdu[0].header["NAXIS2"]
hdu.close()
return ncol, nrow
def MakeImage(self,newfits, sizex, sizey):
"create a new blank Image"
if os.path.isfile(newfits):
print("{} deleted; a new one is created ".format(newfits))
runcmd = "rm {}".format(newfits)
errrm = sp.run([runcmd], shell=True, stdout=sp.PIPE,
stderr=sp.PIPE, universal_newlines=True)
hdu = fits.PrimaryHDU()
hdu.data = np.zeros((sizey, sizex),dtype=np.float64)
hdu.writeto(newfits, overwrite=True)
return True
def PutPix(self,X,Y,Value,ImageFits):
# original file
hdu=fits.open(ImageFits)
Image = hdu[0].data
## for some strange reason I have to interchange X and Y
Image[[Y],[X]]=Value
hdu[0].data=Image
hdu.writeto(ImageFits,overwrite=True)
hdu.close()
######################
#############################################################################
######################### End of program ###################################
# ______________________________________________________________________
# /___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/_/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/
##############################################################################
if __name__ == '__main__':
mainxy2fits() | PypiClean |
/aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/realtime_plotter.py |
__version__='2.0.0'
from sys import stdin,stderr,exit,argv
from os.path import basename
from time import time,sleep,strftime,localtime
import numpy
from random import random
import argparse
from matplotlib import animation,use as matplotlib_use
from matplotlib import pyplot as plt
try:
from matplotlib import __version__ as matplotlib_version
from matplotlib.patches import ConnectionPatch
from matplotlib.lines import Line2D
matplotlib_use('Qt5Agg') # TkAgg or wxAgg or Qt5Agg or Qt4Agg
except:
pass
_t0=time()
_second_call=False
def fig_timestamp(fig,author='',brand='AIMM Sim —',fontsize=6,color='blue',alpha=0.7,rotation=0,prespace=' '):
# Keith Briggs 2020-01-07
date=strftime('%Y-%m-%d %H:%M',localtime())
fig.text( # position text relative to Figure
0.01,0.005,prespace+'%s %s'%(brand+' '+author,date,),
ha='left',va='bottom',fontsize=fontsize,color=color,
rotation=rotation,
transform=fig.transFigure,alpha=alpha)
class Animate:
def __init__(s,getter,naxes,nplots,xlim=(0,1),ylims={},xlabel='',ylabels={},legends={},title='',lw=2,image_fnbase='',tmax=None,figscale=1.5,final_sleep_time=5,author='',extra='',inputfile='',column_to_axis_map={},xlabel_fontsize=10,ylabel_fontsize=10,title_fontsize=12,cmap_type='hsv'):
# https://matplotlib.org/stable/tutorials/colors/colormaps.html
s.getter=getter
s.naxes=naxes
s.nplots=nplots
s.xlim,s.ylims=xlim,ylims
s.lw=lw
s.xlabel,s.ylabels,s.title=xlabel,ylabels,title
s.image_fnbase=image_fnbase
s.legends=legends
s.tmax=tmax
s.lines=[]
s.final_sleep_time=final_sleep_time
s.x=[]
s.ys=[[] for i in range(s.nplots)]
s.fig=plt.figure(figsize=(figscale*6.4,figscale*4.8))
#s.fig.tight_layout()
if 0: # old
if column_to_axis_map:
s.column_to_axis_map=column_to_axis_map
else:
s.column_to_axis_map=dict((i,i) for i in range(s.naxes)) # default
else:
if column_to_axis_map:
s.column_to_axis_map={} # Keith Briggs 2022-08-08
else:
s.column_to_axis_map=dict((i,i) for i in range(s.naxes)) # default
if s.nplots>s.naxes: # map extra plots to last axis (so we don't lose any)
for x in range(s.naxes,s.nplots):
s.column_to_axis_map[x]=s.naxes-1
for x,y in column_to_axis_map.items():
s.column_to_axis_map[x]=y # overwrite defaults with passed argument
s.ax=[s.fig.add_subplot(s.naxes,1,1+i) for i in range(s.naxes)]
s.fig.align_ylabels(s.ax)
print(f' naxes={s.naxes} nplots={s.nplots}',file=stderr)
print(f' column_to_axis_map={s.column_to_axis_map}',file=stderr)
print(f' ylims={s.ylims}',file=stderr)
s.transfigure=s.fig.transFigure.inverted()
s.ax.reverse() # ax[0] at bottom
s.extra=extra
s.inputfile=inputfile
s.anim=None # gets created later (in run())
if 0: # old
s.colors=('r','g','b','c','y','k',)
s.ncolors=len(s.colors)
else: # better
s.ncolors=nplots
s.cmap=plt.get_cmap(cmap_type)
s.colors=tuple(s.cmap(0.9*i/s.ncolors) for i in range(s.ncolors))
props=dict(boxstyle='round',facecolor='white',alpha=0.8)
for i in range(s.naxes):
if i==0: # bottom plot
if s.xlabel: s.ax[i].set_xlabel(s.xlabel,fontsize=xlabel_fontsize)
else: # other plots
s.ax[i].xaxis.set_ticklabels([])
if i in ylims: s.ax[i].set_ylim(*ylims[i])
if i in ylabels: s.ax[i].set_ylabel(s.ylabels[i],fontsize=ylabel_fontsize)
s.ax[i].grid(lw=0.5,alpha=0.5,color='gray')
s.ax[i].set_xlim(*xlim)
s.ax[i].xaxis.set_major_locator(plt.MaxNLocator(10))
if s.naxes<4: # set number of ticks on y axes...
s.ax[i].yaxis.set_major_locator(plt.MaxNLocator(6))
else:
s.ax[i].yaxis.set_major_locator(plt.MaxNLocator(4))
if i in s.legends: # FIXME
try:
lx,ly,lt=s.legends[i].split('\t')
lx,ly=float(lx),float(ly) # legend position
s.ax[i].text(lx,ly,lt,fontsize=8,verticalalignment='top',horizontalalignment='right',bbox=props)
except:
print('legend must have format "x<tab>y<tab>text"',file=stderr)
if s.title: s.ax[-1].set_title(s.title,fontsize=title_fontsize)
s.pdf_saved=False
fig_timestamp(s.fig,author=author,rotation=0,fontsize=8)
def init(s):
for line in s.lines: line.set_data([],[])
return s.lines
def animate(s,k,dbg=True):
global _second_call
xy=next(s.getter)
if xy is None or len(xy)==0: # no more data
if dbg: print(f'{basename(__file__)}: input data exhausted.',file=stderr)
if not _second_call:
#for i in range(s.nplots): # replot; it gets deleted when show() returns
# s.ax[i].plot(s.x,s.ys[i],lw=s.lw,color=s.colors[i%5],alpha=1)
try: # Keith Briggs 2022-08-08 FIXME why is this needed?
#print(f'not _second_call: s.column_to_axis_map={s.column_to_axis_map}',file=stderr)
for i,j in s.column_to_axis_map.items(): # 2021-12-17 replot
#print(f'not _second_call: i={i} j={j}',file=stderr)
if i<len(s.ys): s.ax[j].plot(s.x,s.ys[i],lw=s.lw,color=s.colors[i%s.ncolors],alpha=1) # line
#s.ax[j].plot(s.x,s.ys[i],lw=0.5,marker='o',markersize=0.5,color=s.colors[i%s.ncolors]) # dot only
except:
print(f'not _second_call: plot failed!',file=stderr)
if s.extra: # plot "extra" again to make sure it's on top!
s.transfigure=s.fig.transFigure.inverted() # this needs updating!
try:
exec(s.extra)
print(f'"extra" executed at t={time()-_t0:.2f}',file=stderr)
s.extra=None # make sure it's only done once
except Exception as e:
print(f'extra="{s.extra}" failed with message "{str(e)}"!',file=stderr)
if s.image_fnbase:
print(f'animate: saving final image files at t={time()-_t0:.2f}...',file=stderr,end='')
s.fig.savefig(s.image_fnbase+'.png')
s.fig.savefig(s.image_fnbase+'.pdf')
print('done.',file=stderr)
print('eog '+s.image_fnbase+'.png &',file=stderr)
print('evince '+s.image_fnbase+'.pdf &',file=stderr)
_second_call=True
sleep(s.final_sleep_time)
exit(0)
# else (xy is not None)...
s.x.append(xy[0]) # time
if 1: # old way
for j in range(s.nplots): s.ys[j].append(xy[1+j])
else: # FIXME
for i,j in s.column_to_axis_map.items():
print(f'{i}->{j}',file=stderr)
s.ys[j].append(xy[1+i])
#print(f'{s.ys}',file=stderr)
#exit()
for i,ysi in enumerate(s.ys):
s.lines[i].set_data(s.x,ysi)
#s.lines[s.column_to_axis_map[i]].set_data(s.x,ysi)
return s.lines
def run_OLD(s,nframes=1000):
plt.ion()
for i in range(s.naxes):
lobj=s.ax[i].plot([],[],lw=s.lw,color=s.colors[i%s.ncolors])[0]
s.lines.append(lobj)
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=1000) #,repeat=False)
plt.show(block=True)
def run(s,nframes=1000):
# create a plot object for each plot, and map them to axes
plt.ion()
for i,j in s.column_to_axis_map.items():
print(f'run: column[{i}] is mapped to axis [{j}].',file=stderr)
s.lines.append(s.ax[j].plot([],[],lw=s.lw,color=s.colors[i%s.ncolors],alpha=1)[0])
#s.lines.append(s.ax[j].plot(s.x,s.ys[i],lw=0.0,marker='o',markersize=0.5,color=s.colors[i%s.ncolors])[0]) # dot only
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=1000) #,repeat=False)
plt.show(block=True)
def run_noshow(s,nframes=2*5000):
# FIXME need a good way to set nframes
for i in range(s.naxes):
axi=s.ax[i]
axi.plot([],[],lw=s.lw)
lobj=axi.plot([],[],lw=s.lw,color=s.colors[i%s.ncolors])[0]
s.lines.append(lobj)
s.anim=animation.FuncAnimation(s.fig,s.animate,init_func=s.init,frames=nframes,interval=0.01,blit=True,save_count=nframes)
plt.draw()
s.save_mp4()
def save_mp4(s):
print(f's.anim={s.anim}',file=stderr)
writervideo=animation.FFMpegWriter(fps=30,bitrate=2000)
#print(f'writervideo={writervideo} ...',file=stderr)
filename_mp4=f'{s.inputfile}.mp4'
print(f'Writing {filename_mp4} ...',end='',file=stderr); stderr.flush()
s.anim.save(filename_mp4,writer=writervideo)
print('done',file=stderr)
def add_line_betweenaxes(s,xy0,xy1,ax0,ax1,color='r',lw=1,arrowstyle='-',shrinkB=0): # 2021-10-28
# Draw an arrow between two points in data coordinates, possibly
# in different axes.
s.fig.add_artist(ConnectionPatch(
xyA=xy0, coordsA=s.ax[ax0].transData,
xyB=xy1, coordsB=s.ax[ax1].transData,
arrowstyle=arrowstyle,shrinkB=shrinkB,color=color,lw=lw)
)
def _getter_random(n):
global _k,last
while True:
if _k>n: yield None
_k+=1
x=numpy.random.random(3)
nxt=0.2*x+0.8*last
last=nxt
yield _k,nxt[0],nxt[1],10*nxt[2]
def getter_stdin(nrowsmax=None):
k=0
while True:
if nrowsmax and k>nrowsmax: yield None
k+=1
line=stdin.readline()
if not line: yield None
if line and line[0]=='#':
continue # 2021-10-29
else:
yield numpy.fromstring(line,sep='\t') # 2021-12-15
#yield numpy.array(list(map(float,line.split()))) # 2021-07-15
def getter_tsv(tsv,skip=10):
# Keith Briggs 2021-07-19 - return rows of a pre-loaded tsv file
k=0
nrows=tsv.shape[0]
while k<nrows:
yield tsv[k]
k+=skip
print('getter_tsv done',file=stderr)
yield None
def test_01(n=100,naxes=3):
animate=Animate(_getter_random(n),naxes=naxes,ncols=naxes,xlim=(0,n),ylims=[(0,1),(0,1),(0,10),],xlabel='time',ylabels=['random']*naxes,legends=['90\t0.9\trandom','90\t0.9\trandom','90\t0.9\trandom'])
animate.run(nframes=n)
def main():
parser=argparse.ArgumentParser()
parser.add_argument('--selftest', help='self-test',action='store_true')
parser.add_argument('-naxes',type=int, help='number of axes',default=0)
parser.add_argument('-nplots',type=int, help='number of plots',default=1)
parser.add_argument('-tmax',type=float, help='t_max',default=100.0)
parser.add_argument('-xlabel',type=str, help='x axis label',default='time')
parser.add_argument('-fst',type=float, help='final sleep time',default=5.0)
parser.add_argument('-fnb',type=str, help='filename base',default='')
parser.add_argument('-ylims',type=str, help='y limits (dict)',default='')
parser.add_argument('-ylabels',type=str,help='ylabels (dict)',default='')
parser.add_argument('-title',type=str, help='figure title',default='')
parser.add_argument('-lw',type=str, help='linewidth',default=2)
parser.add_argument('-author',type=str, help='author name for plot bottom margin',default='')
parser.add_argument('-extra',type=str, help='extra features to be added to the plot; raw python code',default='')
parser.add_argument('-inputfile',type=str, help='file to read input from instead of stdin; in this case the plot is not displayed, but written to an mp4 file',default='')
parser.add_argument('-column_to_axis_map',type=str, help='column_to_axis_map',default='{}')
args=parser.parse_args()
if args.selftest:
global _k,last,nplots
_k=0; last=numpy.zeros(3); test_01(); exit()
if args.naxes==0: # default
args.naxes=args.nplots
#if args.ncols: ncols=args.ncols
#else: ncols=nplots
xlim=(0.0,args.tmax),
ylims={i: (0.0,20.0) for i in range(args.naxes)} # default ylims
if args.ylims:
try:
d=eval(args.ylims)
if type(d) is dict:
for q in d: ylims[q]=d[q]
elif type(d) in (tuple,list):
for i,q in enumerate(d): ylims[i]=q
except:
print(f'Could not parse -ylims="{args.ylims}"',file=stderr)
ylabels={i: f'$y_{{{i}}}$' for i in range(args.naxes)}
if args.ylabels:
try:
d=eval(args.ylabels)
if type(d) is dict:
for q in d: ylabels[q]=d[q]
elif type(d) is list: # 2021-11-09 allow list of labels
for i,q in enumerate(d): ylabels[i]=q
elif type(d) is str:
for q in range(args.naxes): ylabels[q]=f'{d}$_{{{q}}}$'
except:
print(f'Could not parse -ylabels="{args.ylabels}"',file=stderr)
if args.inputfile and args.inputfile not in ('stdin','-',):
try:
tsv=numpy.loadtxt(args.inputfile)
nrows=tsv.shape[0]
print(f'Loaded tsv file "{args.inputfile}", {nrows} rows',file=stderr)
except:
print(f'Could not load tsv file "{args.inputfile}", quitting',file=stderr)
exit(1)
getter=getter_tsv(tsv)
else:
getter=getter_stdin()
if args.naxes>4: plt.rcParams.update({'font.size': 6})
#column_to_axis_map={}
#column_to_axis_map={0:0,1:0,2:1,3:2,4:2,5:3,6:4} # FIXME
#column_to_axis_map={1:0,2:1,3:2,4:2,5:3,6:4} # FIXME
try:
column_to_axis_map=eval(args.column_to_axis_map)
except:
print(f'{basename(__file__)}: could not parse column_to_axis_map={column_to_axis_map},using default',file=stderr)
column_to_axis_map={}
animate=Animate(
getter,
naxes=args.naxes,
nplots=args.nplots,
#ncols=ncols, # number of columns read from input file
xlim=xlim,
title=args.title,
lw=args.lw,
ylims=ylims,
xlabel=args.xlabel,
ylabels=ylabels,
legends=[],
final_sleep_time=args.fst,
image_fnbase=args.fnb,
author=args.author,
extra=args.extra,
inputfile=args.inputfile,
column_to_axis_map=column_to_axis_map
)
if args.inputfile in ('','stdin','-',):
animate.run(nframes=100)
else:
animate.run_noshow()
return parser
if __name__=='__main__':
print(f'matplotlib version={matplotlib_version}',file=stderr)
print(f'{basename(__file__)} starting...',file=stderr)
plt.rcParams.update({'font.size': 12})
plt.rcParams.update({'figure.autolayout': True})
# https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
# 'figure.autolayout': True,
# 'figure.constrained_layout.use': True
main() | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/gqcms/basis.py | from gqcms import Determinant
def seniorityBasis(seniority_number, sites, nalpha, nbeta):
"""
Create a basis of the seniority number
"""
if seniority_number == 0 and (nalpha+nbeta)%2 == 1:
raise ValueError("seniority number zero not possible with odd number of electrons")
ref_det = Determinant(nalpha=nalpha, nbeta=nbeta, sites=sites)
basis = [ref_det]
# Seniority zero
if seniority_number == 0:
start_sites = ref_det.alpha_occ
end_sites = [site for site in range(sites) if site not in ref_det.alpha_occ]
# n is the number of pairs to excite, the maximum number of pairs that
# can be excited is limited by the list who has the smallest number of sites.
for n in range(1, min(len(start_sites), len(end_sites))+1):
# Loop over every start site
for i in range(0, len(start_sites)):
# Loop over every end site
for j in range(0, len(end_sites)):
# Create the excitation lists
current_start_sites = start_sites[i:i+n]
current_end_sites = end_sites[j:j+n]
# Check if the current excitation lists both has length n, continue if not
if len(current_start_sites) != n or len(current_end_sites) != n:
continue
# Perform the excitations
det_copy = ref_det.copy()
# Destroy alpha and beta electrons in start_sites
for start_site in current_start_sites:
det_copy.remove_alpha_orbital(start_site)
det_copy.remove_beta_orbital(start_site)
# Create alpha and beta electrons in end_site
for end_site in current_end_sites:
det_copy.add_alpha_orbital(end_site)
det_copy.add_beta_orbital(end_site)
# Add det_copy to basis dict
basis.append(det_copy)
else:
raise NotImplementedError("Only seniority zero basis is implemented currently.")
return basis | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/ice2rect.py |
desc={"ref": {"IId": 'Nakamura, Tatsuya et al. “Thermodynamic Stability of Ice II and Its Hydrogen-Disordered Counterpart: Role of Zero-Point Energy.” The Journal of Physical Chemistry B 120.8 (2015): 1843–1848.',
},
"usage": "No options available.",
"brief": "Orthogonalized Ice II."
}
bondlen = 3 #bond threshold
coord = "absolute"
waters="""
1.8266 2.4421 0.9143
1.8266 10.0920 5.0426
8.4516 6.2671 2.9784
10.7561 0.4625 0.9143
10.7561 8.1123 5.0426
4.1311 4.2874 2.9784
8.0058 9.1855 0.9143
1.3808 5.3605 5.0426
8.0058 1.5356 2.9784
5.2740 9.4428 0.2553
11.8990 5.6179 4.3836
5.2740 1.7930 2.3194
9.5945 11.4225 0.2553
2.9695 7.5976 4.3836
9.5945 3.7726 2.3194
12.3448 2.6995 0.2553
12.3448 10.3494 4.3836
5.7198 6.5245 2.3194
8.4516 13.9170 0.9143
8.4516 21.5668 5.0426
1.8266 17.7419 2.9784
4.1311 11.9373 0.9143
4.1311 19.5872 5.0426
10.7561 15.7622 2.9784
1.3808 20.6603 0.9143
8.0058 16.8353 5.0426
1.3808 13.0104 2.9784
11.8990 20.9177 0.2553
5.2740 17.0927 4.3836
11.8990 13.2678 2.3194
2.9695 22.8973 0.2553
9.5945 19.0724 4.3836
2.9695 15.2474 2.3194
5.7198 14.1743 0.2553
5.7198 21.8242 4.3836
12.3448 17.9993 2.3194
11.6473 2.3563 3.7643
5.0223 13.8312 3.7643
11.6473 10.0062 1.7001
5.0223 6.1813 5.8284
5.9200 9.0104 3.7643
12.5450 5.1854 1.7001
5.9200 1.3605 5.8284
3.0211 0.7233 3.7643
3.0211 8.3732 1.7001
9.6461 4.5483 5.8284
8.7032 9.5286 3.5978
2.0782 5.7037 1.5336
8.7032 1.8787 5.6619
1.1806 2.8746 3.5978
1.1806 10.5245 1.5336
7.8056 6.6995 5.6619
4.0795 11.1616 3.5978
10.7045 7.3367 1.5336
4.0795 3.5118 5.6619
5.0223 21.4810 1.7001
11.6473 17.6561 5.8284
12.5450 20.4852 3.7643
5.9200 16.6603 1.7001
12.5450 12.8353 5.8284
9.6461 12.1981 3.7643
9.6461 19.8480 1.7001
3.0211 16.0231 5.8284
2.0782 21.0035 3.5978
8.7032 17.1785 1.5336
2.0782 13.3536 5.6619
7.8056 14.3494 3.5978
7.8056 21.9993 1.5336
1.1806 18.1744 5.6619
10.7045 22.6365 3.5978
4.0795 18.8115 1.5336
10.7045 14.9866 5.6619
"""
fixed="""
64 61
61 67
69 19
57 35
31 61
27 57
61 27
69 57
27 57
61 27
69 57
27 57
61 27
69 57
3 27
3 69
36 69
27 24
57 63
1 16
50 38
13 1
13 44
47 44
1 50
52 1
44 50
1 50
52 1
44 50
1 50
52 1
44 50
50 26
21 52
44 9
52 40
37 52
48 45
45 51
53 2
41 15
14 45
10 41
45 10
53 41
10 41
45 10
53 41
10 41
45 10
53 41
4 10
4 53
38 53
10 7
41 47
46 60
60 66
71 18
59 16
12 60
29 59
60 29
71 59
29 59
60 29
71 59
29 59
60 29
71 59
23 29
23 71
56 71
29 26
59 65
39 54
17 5
39 13
2 17
2 51
17 39
51 39
2 17
2 51
17 39
51 39
2 17
2 51
17 39
51 39
40 17
51 6
14 2
43 11
54 42
5 11
67 8
42 34
11 8
8 48
11 42
42 48
11 8
8 48
11 42
42 48
11 8
8 48
11 42
42 48
8 14
48 3
64 23
25 31
31 19
35 23
56 31
35 56
35 23
56 31
35 56
35 23
56 31
35 56
20 35
68 56
55 70
34 22
55 30
19 34
19 67
34 55
67 55
19 34
19 67
34 55
67 55
19 34
19 67
34 55
67 55
46 4
6 12
12 18
16 4
38 12
16 38
16 4
38 12
16 38
16 4
38 12
16 38
65 21
26 32
26 65
65 21
26 32
26 65
65 21
26 32
26 65
32 20
32 62
65 62
21 9
33 21
37 32
9 6
6 46
9 40
40 46
9 6
6 46
9 40
40 46
9 6
6 46
9 40
40 46
0 15
49 36
30 0
30 43
63 43
0 49
54 0
43 49
0 49
54 0
43 49
0 49
54 0
43 49
49 7
5 54
22 70
68 24
63 22
24 30
24 63
63 22
24 30
24 63
63 22
24 30
24 63
22 28
62 28
70 58
66 25
58 33
28 25
25 64
28 58
58 64
28 25
25 64
28 58
58 64
28 25
25 64
28 58
58 64
47 5
7 13
7 47
47 5
7 13
7 47
47 5
7 13
7 47
20 68
70 20
62 68
20 68
70 20
62 68
20 68
70 20
62 68
18 33
18 66
33 37
66 37
18 33
18 66
33 37
66 37
18 33
18 66
33 37
66 37
15 3
36 14
15 36
15 3
36 14
15 36
15 3
36 14
15 36
"""
# set pairs in this way for hydrogen-ordered ices.
pairs = fixed
from genice.cell import cellvectors
cell = cellvectors(a=13.25,
b=22.9496665056,
c=6.19244244367) | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/dense_heads/detr_head.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, Linear, build_activation_layer
from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding
from mmcv.runner import force_fp32
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply,
reduce_mean)
from mmdet.models.utils import build_transformer
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
@HEADS.register_module()
class DETRHead(AnchorFreeHead):
"""Implements the DETR transformer head.
See `paper: End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
num_classes (int): Number of categories excluding the background.
in_channels (int): Number of channels in the input feature map.
num_query (int): Number of query in Transformer.
num_reg_fcs (int, optional): Number of fully-connected layers used in
`FFN`, which is then used for the regression head. Default 2.
transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer.
Default: None.
sync_cls_avg_factor (bool): Whether to sync the avg_factor of
all ranks. Default to False.
positional_encoding (obj:`mmcv.ConfigDict`|dict):
Config for position encoding.
loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the
classification loss. Default `CrossEntropyLoss`.
loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the
regression loss. Default `L1Loss`.
loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the
regression iou loss. Default `GIoULoss`.
tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of
transformer head.
test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of
transformer head.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
_version = 2
def __init__(self,
num_classes,
in_channels,
num_query=100,
num_reg_fcs=2,
transformer=None,
sync_cls_avg_factor=False,
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=128,
normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(
type='IoUCost', iou_mode='giou', weight=2.0))),
test_cfg=dict(max_per_img=100),
init_cfg=None,
**kwargs):
# NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
# since it brings inconvenience when the initialization of
# `AnchorFreeHead` is called.
super(AnchorFreeHead, self).__init__(init_cfg)
self.bg_cls_weight = 0
self.sync_cls_avg_factor = sync_cls_avg_factor
class_weight = loss_cls.get('class_weight', None)
if class_weight is not None and (self.__class__ is DETRHead):
assert isinstance(class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(class_weight)}.'
# NOTE following the official DETR rep0, bg_cls_weight means
# relative classification weight of the no-object class.
bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)
assert isinstance(bg_cls_weight, float), 'Expected ' \
'bg_cls_weight to have type float. Found ' \
f'{type(bg_cls_weight)}.'
class_weight = torch.ones(num_classes + 1) * class_weight
# set background class as the last indice
class_weight[num_classes] = bg_cls_weight
loss_cls.update({'class_weight': class_weight})
if 'bg_cls_weight' in loss_cls:
loss_cls.pop('bg_cls_weight')
self.bg_cls_weight = bg_cls_weight
if train_cfg:
assert 'assigner' in train_cfg, 'assigner should be provided '\
'when train_cfg is set.'
assigner = train_cfg['assigner']
assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert loss_bbox['loss_weight'] == assigner['reg_cost'][
'weight'], 'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
self.assigner = build_assigner(assigner)
# DETR sampling=False, so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.num_query = num_query
self.num_classes = num_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_iou = build_loss(loss_iou)
if self.loss_cls.use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.act_cfg = transformer.get('act_cfg',
dict(type='ReLU', inplace=True))
self.activate = build_activation_layer(self.act_cfg)
self.positional_encoding = build_positional_encoding(
positional_encoding)
self.transformer = build_transformer(transformer)
self.embed_dims = self.transformer.embed_dims
assert 'num_feats' in positional_encoding
num_feats = positional_encoding['num_feats']
assert num_feats * 2 == self.embed_dims, 'embed_dims should' \
f' be exactly 2 times of num_feats. Found {self.embed_dims}' \
f' and {num_feats}.'
self._init_layers()
def _init_layers(self):
"""Initialize layers of the transformer head."""
self.input_proj = Conv2d(
self.in_channels, self.embed_dims, kernel_size=1)
self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
self.reg_ffn = FFN(
self.embed_dims,
self.embed_dims,
self.num_reg_fcs,
self.act_cfg,
dropout=0.0,
add_residual=False)
self.fc_reg = Linear(self.embed_dims, 4)
self.query_embedding = nn.Embedding(self.num_query, self.embed_dims)
def init_weights(self):
"""Initialize weights of the transformer head."""
# The initialization for transformer is important
self.transformer.init_weights()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""load checkpoints."""
# NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
# since `AnchorFreeHead._load_from_state_dict` should not be
# called here. Invoking the default `Module._load_from_state_dict`
# is enough.
# Names of some parameters in has been changed.
version = local_metadata.get('version', None)
if (version is None or version < 2) and self.__class__ is DETRHead:
convert_dict = {
'.self_attn.': '.attentions.0.',
'.ffn.': '.ffns.0.',
'.multihead_attn.': '.attentions.1.',
'.decoder.norm.': '.decoder.post_norm.'
}
state_dict_keys = list(state_dict.keys())
for k in state_dict_keys:
for ori_key, convert_key in convert_dict.items():
if ori_key in k:
convert_key = k.replace(ori_key, convert_key)
state_dict[convert_key] = state_dict[k]
del state_dict[k]
super(AnchorFreeHead,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys,
unexpected_keys, error_msgs)
def forward(self, feats, img_metas):
"""Forward function.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): List of image information.
Returns:
tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.
- all_cls_scores_list (list[Tensor]): Classification scores \
for each scale level. Each is a 4D-tensor with shape \
[nb_dec, bs, num_query, cls_out_channels]. Note \
`cls_out_channels` should includes background.
- all_bbox_preds_list (list[Tensor]): Sigmoid regression \
outputs for each scale level. Each is a 4D-tensor with \
normalized coordinate format (cx, cy, w, h) and shape \
[nb_dec, bs, num_query, 4].
"""
num_levels = len(feats)
img_metas_list = [img_metas for _ in range(num_levels)]
return multi_apply(self.forward_single, feats, img_metas_list)
def forward_single(self, x, img_metas):
""""Forward function for a single feature level.
Args:
x (Tensor): Input feature from backbone's single stage, shape
[bs, c, h, w].
img_metas (list[dict]): List of image information.
Returns:
all_cls_scores (Tensor): Outputs from the classification head,
shape [nb_dec, bs, num_query, cls_out_channels]. Note
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression
head with normalized coordinate format (cx, cy, w, h).
Shape [nb_dec, bs, num_query, 4].
"""
# construct binary masks which used for the transformer.
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
batch_size = x.size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
masks = x.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
masks[img_id, :img_h, :img_w] = 0
x = self.input_proj(x)
# interpolate masks to have the same spatial shape with x
masks = F.interpolate(
masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)
# position encoding
pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
# outs_dec: [nb_dec, bs, num_query, embed_dim]
outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,
pos_embed)
all_cls_scores = self.fc_cls(outs_dec)
all_bbox_preds = self.fc_reg(self.activate(
self.reg_ffn(outs_dec))).sigmoid()
return all_cls_scores, all_bbox_preds
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def loss(self,
all_cls_scores_list,
all_bbox_preds_list,
gt_bboxes_list,
gt_labels_list,
img_metas,
gt_bboxes_ignore=None):
""""Loss function.
Only outputs from the last feature level are used for computing
losses by default.
Args:
all_cls_scores_list (list[Tensor]): Classification outputs
for each feature level. Each is a 4D-tensor with shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds_list (list[Tensor]): Sigmoid regression
outputs for each feature level. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# NOTE defaultly only the outputs from the last feature scale is used.
all_cls_scores = all_cls_scores_list[-1]
all_bbox_preds = all_bbox_preds_list[-1]
assert gt_bboxes_ignore is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_dec_layers = len(all_cls_scores)
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
losses_cls, losses_bbox, losses_iou = multi_apply(
self.loss_single, all_cls_scores, all_bbox_preds,
all_gt_bboxes_list, all_gt_labels_list, img_metas_list,
all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
loss_dict['loss_iou'] = losses_iou[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
losses_bbox[:-1],
losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
num_dec_layer += 1
return loss_dict
def loss_single(self,
cls_scores,
bbox_preds,
gt_bboxes_list,
gt_labels_list,
img_metas,
gt_bboxes_ignore_list=None):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape [bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
gt_bboxes_list, gt_labels_list,
img_metas, gt_bboxes_ignore_list)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(
cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
# Compute the average number of gt boxes across all gpus, for
# normalization purposes
num_total_pos = loss_cls.new_tensor([num_total_pos])
num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, bbox_pred in zip(img_metas, bbox_preds):
img_h, img_w, _ = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
bbox_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# DETR regress the relative position of boxes (cxcywh) in the image,
# thus the learning target is normalized by the image size. So here
# we need to re-scale them for calculating IoU loss
bbox_preds = bbox_preds.reshape(-1, 4)
bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
# regression IoU loss, defaultly GIoU loss
loss_iou = self.loss_iou(
bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)
# regression L1 loss
loss_bbox = self.loss_bbox(
bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
return loss_cls, loss_bbox, loss_iou
def get_targets(self,
cls_scores_list,
bbox_preds_list,
gt_bboxes_list,
gt_labels_list,
img_metas,
gt_bboxes_ignore_list=None):
""""Compute regression and classification targets for a batch image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_scores_list (list[Tensor]): Box score logits from a single
decoder layer for each image with shape [num_query,
cls_out_channels].
bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
decoder layer for each image, with normalized coordinate
(cx, cy, w, h) and shape [num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
tuple: a tuple containing the following targets.
- labels_list (list[Tensor]): Labels for all images.
- label_weights_list (list[Tensor]): Label weights for all \
images.
- bbox_targets_list (list[Tensor]): BBox targets for all \
images.
- bbox_weights_list (list[Tensor]): BBox weights for all \
images.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
"""
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(cls_scores_list)
gt_bboxes_ignore_list = [
gt_bboxes_ignore_list for _ in range(num_imgs)
]
(labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single, cls_scores_list, bbox_preds_list,
gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def _get_target_single(self,
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_meta,
gt_bboxes_ignore=None):
""""Compute regression and classification targets for one image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_score (Tensor): Box score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_query, 4].
gt_bboxes (Tensor): Ground truth bboxes for one image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth class indices for one image
with shape (num_gts, ).
img_meta (dict): Meta information for one image.
gt_bboxes_ignore (Tensor, optional): Bounding boxes
which can be ignored. Default None.
Returns:
tuple[Tensor]: a tuple containing the following for one image.
- labels (Tensor): Labels of each image.
- label_weights (Tensor]): Label weights of each image.
- bbox_targets (Tensor): BBox targets of each image.
- bbox_weights (Tensor): BBox weights of each image.
- pos_inds (Tensor): Sampled positive indices for each image.
- neg_inds (Tensor): Sampled negative indices for each image.
"""
num_bboxes = bbox_pred.size(0)
# assigner and sampler
assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
gt_labels, img_meta,
gt_bboxes_ignore)
sampling_result = self.sampler.sample(assign_result, bbox_pred,
gt_bboxes)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
# label targets
labels = gt_bboxes.new_full((num_bboxes, ),
self.num_classes,
dtype=torch.long)
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = gt_bboxes.new_ones(num_bboxes)
# bbox targets
bbox_targets = torch.zeros_like(bbox_pred)
bbox_weights = torch.zeros_like(bbox_pred)
bbox_weights[pos_inds] = 1.0
img_h, img_w, _ = img_meta['img_shape']
# DETR regress the relative position of boxes (cxcywh) in the image.
# Thus the learning target should be normalized by the image size, also
# the box format should be converted from defaultly x1y1x2y2 to cxcywh.
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor
pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
bbox_targets[pos_inds] = pos_gt_bboxes_targets
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
# over-write because img_metas are needed as inputs for bbox_head.
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""Forward function for training mode.
Args:
x (list[Tensor]): Features from backbone.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert proposal_cfg is None, '"proposal_cfg" must be None'
outs = self(x, img_metas)
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self,
all_cls_scores_list,
all_bbox_preds_list,
img_metas,
rescale=False):
"""Transform network outputs for a batch into bbox predictions.
Args:
all_cls_scores_list (list[Tensor]): Classification outputs
for each feature level. Each is a 4D-tensor with shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds_list (list[Tensor]): Sigmoid regression
outputs for each feature level. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If True, return boxes in original
image space. Default False.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
The first item is an (n, 5) tensor, where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1. The second item is a \
(n,) tensor where each item is the predicted class label of \
the corresponding box.
"""
# NOTE defaultly only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
cls_scores = all_cls_scores_list[-1][-1]
bbox_preds = all_bbox_preds_list[-1][-1]
result_list = []
for img_id in range(len(img_metas)):
cls_score = cls_scores[img_id]
bbox_pred = bbox_preds[img_id]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score, bbox_pred,
img_shape, scale_factor,
rescale)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False):
"""Transform outputs from the last decoder layer into bbox predictions
for each image.
Args:
cls_score (Tensor): Box score logits from the last decoder layer
for each image. Shape [num_query, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from the last decoder layer
for each image, with coordinate format (cx, cy, w, h) and
shape [num_query, 4].
img_shape (tuple[int]): Shape of input image, (height, width, 3).
scale_factor (ndarray, optional): Scale factor of the image arange
as (w_scale, h_scale, w_scale, h_scale).
rescale (bool, optional): If True, return boxes in original image
space. Default False.
Returns:
tuple[Tensor]: Results of detected bboxes and labels.
- det_bboxes: Predicted bboxes with shape [num_query, 5], \
where the first 4 columns are bounding box positions \
(tl_x, tl_y, br_x, br_y) and the 5-th column are scores \
between 0 and 1.
- det_labels: Predicted labels of the corresponding box with \
shape [num_query].
"""
assert len(cls_score) == len(bbox_pred)
max_per_img = self.test_cfg.get('max_per_img', self.num_query)
# exclude background
if self.loss_cls.use_sigmoid:
cls_score = cls_score.sigmoid()
scores, indexes = cls_score.view(-1).topk(max_per_img)
det_labels = indexes % self.num_classes
bbox_index = indexes // self.num_classes
bbox_pred = bbox_pred[bbox_index]
else:
scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)
scores, bbox_index = scores.topk(max_per_img)
bbox_pred = bbox_pred[bbox_index]
det_labels = det_labels[bbox_index]
det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1)
return det_bboxes, det_labels
def simple_test_bboxes(self, feats, img_metas, rescale=False):
"""Test det bboxes without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is ``bboxes`` with shape (n, 5),
where 5 represent (tl_x, tl_y, br_x, br_y, score).
The shape of the second tensor in the tuple is ``labels``
with shape (n,)
"""
# forward of this head requires img_metas
outs = self.forward(feats, img_metas)
results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)
return results_list
def forward_onnx(self, feats, img_metas):
"""Forward function for exporting to ONNX.
Over-write `forward` because: `masks` is directly created with
zero (valid position tag) and has the same spatial size as `x`.
Thus the construction of `masks` is different from that in `forward`.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): List of image information.
Returns:
tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.
- all_cls_scores_list (list[Tensor]): Classification scores \
for each scale level. Each is a 4D-tensor with shape \
[nb_dec, bs, num_query, cls_out_channels]. Note \
`cls_out_channels` should includes background.
- all_bbox_preds_list (list[Tensor]): Sigmoid regression \
outputs for each scale level. Each is a 4D-tensor with \
normalized coordinate format (cx, cy, w, h) and shape \
[nb_dec, bs, num_query, 4].
"""
num_levels = len(feats)
img_metas_list = [img_metas for _ in range(num_levels)]
return multi_apply(self.forward_single_onnx, feats, img_metas_list)
def forward_single_onnx(self, x, img_metas):
""""Forward function for a single feature level with ONNX exportation.
Args:
x (Tensor): Input feature from backbone's single stage, shape
[bs, c, h, w].
img_metas (list[dict]): List of image information.
Returns:
all_cls_scores (Tensor): Outputs from the classification head,
shape [nb_dec, bs, num_query, cls_out_channels]. Note
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression
head with normalized coordinate format (cx, cy, w, h).
Shape [nb_dec, bs, num_query, 4].
"""
# Note `img_shape` is not dynamically traceable to ONNX,
# since the related augmentation was done with numpy under
# CPU. Thus `masks` is directly created with zeros (valid tag)
# and the same spatial shape as `x`.
# The difference between torch and exported ONNX model may be
# ignored, since the same performance is achieved (e.g.
# 40.1 vs 40.1 for DETR)
batch_size = x.size(0)
h, w = x.size()[-2:]
masks = x.new_zeros((batch_size, h, w)) # [B,h,w]
x = self.input_proj(x)
# interpolate masks to have the same spatial shape with x
masks = F.interpolate(
masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)
pos_embed = self.positional_encoding(masks)
outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,
pos_embed)
all_cls_scores = self.fc_cls(outs_dec)
all_bbox_preds = self.fc_reg(self.activate(
self.reg_ffn(outs_dec))).sigmoid()
return all_cls_scores, all_bbox_preds
def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas):
"""Transform network outputs into bbox predictions, with ONNX
exportation.
Args:
all_cls_scores_list (list[Tensor]): Classification outputs
for each feature level. Each is a 4D-tensor with shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds_list (list[Tensor]): Sigmoid regression
outputs for each feature level. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
img_metas (list[dict]): Meta information of each image.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
assert len(img_metas) == 1, \
'Only support one input image while in exporting to ONNX'
cls_scores = all_cls_scores_list[-1][-1]
bbox_preds = all_bbox_preds_list[-1][-1]
# Note `img_shape` is not dynamically traceable to ONNX,
# here `img_shape_for_onnx` (padded shape of image tensor)
# is used.
img_shape = img_metas[0]['img_shape_for_onnx']
max_per_img = self.test_cfg.get('max_per_img', self.num_query)
batch_size = cls_scores.size(0)
# `batch_index_offset` is used for the gather of concatenated tensor
batch_index_offset = torch.arange(batch_size).to(
cls_scores.device) * max_per_img
batch_index_offset = batch_index_offset.unsqueeze(1).expand(
batch_size, max_per_img)
# supports dynamical batch inference
if self.loss_cls.use_sigmoid:
cls_scores = cls_scores.sigmoid()
scores, indexes = cls_scores.view(batch_size, -1).topk(
max_per_img, dim=1)
det_labels = indexes % self.num_classes
bbox_index = indexes // self.num_classes
bbox_index = (bbox_index + batch_index_offset).view(-1)
bbox_preds = bbox_preds.view(-1, 4)[bbox_index]
bbox_preds = bbox_preds.view(batch_size, -1, 4)
else:
scores, det_labels = F.softmax(
cls_scores, dim=-1)[..., :-1].max(-1)
scores, bbox_index = scores.topk(max_per_img, dim=1)
bbox_index = (bbox_index + batch_index_offset).view(-1)
bbox_preds = bbox_preds.view(-1, 4)[bbox_index]
det_labels = det_labels.view(-1)[bbox_index]
bbox_preds = bbox_preds.view(batch_size, -1, 4)
det_labels = det_labels.view(batch_size, -1)
det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds)
# use `img_shape_tensor` for dynamically exporting to ONNX
img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h]
img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand(
batch_size, det_bboxes.size(1), 4)
det_bboxes = det_bboxes * img_shape_tensor
# dynamically clip bboxes
x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1)
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape)
det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1)
det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1)
return det_bboxes, det_labels | PypiClean |
/Flask-Whiteprint-0.0.1.tar.gz/Flask-Whiteprint-0.0.1/whiteprint/exceptions.py | from werkzeug.exceptions import HTTPException
def abort(exception, *args, **kwargs):
raise exception(*args, **kwargs)
class HTTPExceptionBase(HTTPException):
response = None
status = 500
code = 1000
message = "Internal server error."
def __init__(self, **kwargs):
if kwargs:
self.message = self.message % kwargs
class NotAuthorizedError(HTTPExceptionBase):
status = 401
code = 1001
message = "Not authorized."
class PageNotFoundError(HTTPExceptionBase):
status = 404
code = 1002
message = "Page not found."
class ResourceNotFoundError(HTTPExceptionBase):
status = 404
code = 1003
message = "%(resource)s not found."
class NoPermissionError(HTTPExceptionBase):
status = 403
code = 1004
message = "No permission."
class MissingParamError(HTTPExceptionBase):
status = 400
code = 0
message = "A required parameter '%(param)s' is missing."
class InvalidParamError(HTTPExceptionBase):
status = 400
code = 0
message = "A value of parameter '%(param)s' is invalid."
class AlreadySignedupError(HTTPExceptionBase):
status = 401
code = 0
message = "Already signed up."
class WrongPasswordError(HTTPExceptionBase):
status = 401
code = 0
message = "Wrong password."
class FacebookAuthError(HTTPExceptionBase):
status = 401
code = 0
message = "Facebook auth failed: %(description)s"
class TwitterAuthError(HTTPExceptionBase):
status = 401
code = 0
message = "Twitter auth failed: %(description)s"
class FriendMyselfError(HTTPExceptionBase):
status = 400
code = 0
message = "Cannot add myself as a friend."
class AlreadyFriendError(HTTPExceptionBase):
status = 400
code = 0
message = "Already a friend."
class NotFriendError(HTTPExceptionBase):
status = 400
code = 0
message = "Not a friend."
class AlreadyExistingVenueError(HTTPExceptionBase):
status = 400
code = 0
message = "Venue already exists." | PypiClean |
/Captcha-Impulse-0.0.9.tar.gz/Captcha-Impulse-0.0.9/src/impulse/yolov5/utils/loss.py | import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import de_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
self.sort_obj_iou = False
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = de_parallel(model).model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2 - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
score_iou = iou.detach().clamp(0).type(tobj.dtype)
if self.sort_obj_iou:
sort_id = torch.argsort(score_iou)
b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1 < g) & (gxy > 1)).T
l, m = ((gxi % 1 < g) & (gxi > 1)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch | PypiClean |
/Congo-0.0.1.tar.gz/Congo-0.0.1/portfolio/component/static/portfolio/vendor/mdeditor/bower_components/codemirror/mode/haskell/haskell.js | CodeMirror.defineMode("haskell", function() {
function switchState(source, setState, f) {
setState(f);
return f(source, setState);
}
// These should all be Unicode extended, as per the Haskell 2010 report
var smallRE = /[a-z_]/;
var largeRE = /[A-Z]/;
var digitRE = /[0-9]/;
var hexitRE = /[0-9A-Fa-f]/;
var octitRE = /[0-7]/;
var idRE = /[a-z_A-Z0-9']/;
var symbolRE = /[-!#$%&*+.\/<=>?@\\^|~:]/;
var specialRE = /[(),;[\]`{}]/;
var whiteCharRE = /[ \t\v\f]/; // newlines are handled in tokenizer
function normal(source, setState) {
if (source.eatWhile(whiteCharRE)) {
return null;
}
var ch = source.next();
if (specialRE.test(ch)) {
if (ch == '{' && source.eat('-')) {
var t = "comment";
if (source.eat('#')) {
t = "meta";
}
return switchState(source, setState, ncomment(t, 1));
}
return null;
}
if (ch == '\'') {
if (source.eat('\\')) {
source.next(); // should handle other escapes here
}
else {
source.next();
}
if (source.eat('\'')) {
return "string";
}
return "error";
}
if (ch == '"') {
return switchState(source, setState, stringLiteral);
}
if (largeRE.test(ch)) {
source.eatWhile(idRE);
if (source.eat('.')) {
return "qualifier";
}
return "variable-2";
}
if (smallRE.test(ch)) {
source.eatWhile(idRE);
return "variable";
}
if (digitRE.test(ch)) {
if (ch == '0') {
if (source.eat(/[xX]/)) {
source.eatWhile(hexitRE); // should require at least 1
return "integer";
}
if (source.eat(/[oO]/)) {
source.eatWhile(octitRE); // should require at least 1
return "number";
}
}
source.eatWhile(digitRE);
var t = "number";
if (source.eat('.')) {
t = "number";
source.eatWhile(digitRE); // should require at least 1
}
if (source.eat(/[eE]/)) {
t = "number";
source.eat(/[-+]/);
source.eatWhile(digitRE); // should require at least 1
}
return t;
}
if (symbolRE.test(ch)) {
if (ch == '-' && source.eat(/-/)) {
source.eatWhile(/-/);
if (!source.eat(symbolRE)) {
source.skipToEnd();
return "comment";
}
}
var t = "variable";
if (ch == ':') {
t = "variable-2";
}
source.eatWhile(symbolRE);
return t;
}
return "error";
}
function ncomment(type, nest) {
if (nest == 0) {
return normal;
}
return function(source, setState) {
var currNest = nest;
while (!source.eol()) {
var ch = source.next();
if (ch == '{' && source.eat('-')) {
++currNest;
}
else if (ch == '-' && source.eat('}')) {
--currNest;
if (currNest == 0) {
setState(normal);
return type;
}
}
}
setState(ncomment(type, currNest));
return type;
};
}
function stringLiteral(source, setState) {
while (!source.eol()) {
var ch = source.next();
if (ch == '"') {
setState(normal);
return "string";
}
if (ch == '\\') {
if (source.eol() || source.eat(whiteCharRE)) {
setState(stringGap);
return "string";
}
if (source.eat('&')) {
}
else {
source.next(); // should handle other escapes here
}
}
}
setState(normal);
return "error";
}
function stringGap(source, setState) {
if (source.eat('\\')) {
return switchState(source, setState, stringLiteral);
}
source.next();
setState(normal);
return "error";
}
var wellKnownWords = (function() {
var wkw = {};
function setType(t) {
return function () {
for (var i = 0; i < arguments.length; i++)
wkw[arguments[i]] = t;
};
}
setType("keyword")(
"case", "class", "data", "default", "deriving", "do", "else", "foreign",
"if", "import", "in", "infix", "infixl", "infixr", "instance", "let",
"module", "newtype", "of", "then", "type", "where", "_");
setType("keyword")(
"\.\.", ":", "::", "=", "\\", "\"", "<-", "->", "@", "~", "=>");
setType("builtin")(
"!!", "$!", "$", "&&", "+", "++", "-", ".", "/", "/=", "<", "<=", "=<<",
"==", ">", ">=", ">>", ">>=", "^", "^^", "||", "*", "**");
setType("builtin")(
"Bool", "Bounded", "Char", "Double", "EQ", "Either", "Enum", "Eq",
"False", "FilePath", "Float", "Floating", "Fractional", "Functor", "GT",
"IO", "IOError", "Int", "Integer", "Integral", "Just", "LT", "Left",
"Maybe", "Monad", "Nothing", "Num", "Ord", "Ordering", "Rational", "Read",
"ReadS", "Real", "RealFloat", "RealFrac", "Right", "Show", "ShowS",
"String", "True");
setType("builtin")(
"abs", "acos", "acosh", "all", "and", "any", "appendFile", "asTypeOf",
"asin", "asinh", "atan", "atan2", "atanh", "break", "catch", "ceiling",
"compare", "concat", "concatMap", "const", "cos", "cosh", "curry",
"cycle", "decodeFloat", "div", "divMod", "drop", "dropWhile", "either",
"elem", "encodeFloat", "enumFrom", "enumFromThen", "enumFromThenTo",
"enumFromTo", "error", "even", "exp", "exponent", "fail", "filter",
"flip", "floatDigits", "floatRadix", "floatRange", "floor", "fmap",
"foldl", "foldl1", "foldr", "foldr1", "fromEnum", "fromInteger",
"fromIntegral", "fromRational", "fst", "gcd", "getChar", "getContents",
"getLine", "head", "id", "init", "interact", "ioError", "isDenormalized",
"isIEEE", "isInfinite", "isNaN", "isNegativeZero", "iterate", "last",
"lcm", "length", "lex", "lines", "log", "logBase", "lookup", "map",
"mapM", "mapM_", "max", "maxBound", "maximum", "maybe", "min", "minBound",
"minimum", "mod", "negate", "not", "notElem", "null", "odd", "or",
"otherwise", "pi", "pred", "print", "product", "properFraction",
"putChar", "putStr", "putStrLn", "quot", "quotRem", "read", "readFile",
"readIO", "readList", "readLn", "readParen", "reads", "readsPrec",
"realToFrac", "recip", "rem", "repeat", "replicate", "return", "reverse",
"round", "scaleFloat", "scanl", "scanl1", "scanr", "scanr1", "seq",
"sequence", "sequence_", "show", "showChar", "showList", "showParen",
"showString", "shows", "showsPrec", "significand", "signum", "sin",
"sinh", "snd", "span", "splitAt", "sqrt", "subtract", "succ", "sum",
"tail", "take", "takeWhile", "tan", "tanh", "toEnum", "toInteger",
"toRational", "truncate", "uncurry", "undefined", "unlines", "until",
"unwords", "unzip", "unzip3", "userError", "words", "writeFile", "zip",
"zip3", "zipWith", "zipWith3");
return wkw;
})();
return {
startState: function () { return { f: normal }; },
copyState: function (s) { return { f: s.f }; },
token: function(stream, state) {
var t = state.f(stream, function(s) { state.f = s; });
var w = stream.current();
return (w in wellKnownWords) ? wellKnownWords[w] : t;
},
blockCommentStart: "{-",
blockCommentEnd: "-}",
lineComment: "--"
};
});
CodeMirror.defineMIME("text/x-haskell", "haskell"); | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/make-dir/node_modules/semver/README.md | semver(1) -- The semantic versioner for npm
===========================================
## Install
```bash
npm install semver
````
## Usage
As a node module:
```js
const semver = require('semver')
semver.valid('1.2.3') // '1.2.3'
semver.valid('a.b.c') // null
semver.clean(' =v1.2.3 ') // '1.2.3'
semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true
semver.gt('1.2.3', '9.8.7') // false
semver.lt('1.2.3', '9.8.7') // true
semver.minVersion('>=1.0.0') // '1.0.0'
semver.valid(semver.coerce('v2')) // '2.0.0'
semver.valid(semver.coerce('42.6.7.9.3-alpha')) // '42.6.7'
```
As a command-line utility:
```
$ semver -h
A JavaScript implementation of the https://semver.org/ specification
Copyright Isaac Z. Schlueter
Usage: semver [options] <version> [<version> [...]]
Prints valid versions sorted by SemVer precedence
Options:
-r --range <range>
Print versions that match the specified range.
-i --increment [<level>]
Increment a version by the specified level. Level can
be one of: major, minor, patch, premajor, preminor,
prepatch, or prerelease. Default level is 'patch'.
Only one version may be specified.
--preid <identifier>
Identifier to be used to prefix premajor, preminor,
prepatch or prerelease version increments.
-l --loose
Interpret versions and ranges loosely
-p --include-prerelease
Always include prerelease versions in range matching
-c --coerce
Coerce a string into SemVer if possible
(does not imply --loose)
--rtl
Coerce version strings right to left
--ltr
Coerce version strings left to right (default)
Program exits successfully if any valid version satisfies
all supplied ranges, and prints all satisfying versions.
If no satisfying versions are found, then exits failure.
Versions are printed in ascending order, so supplying
multiple versions to the utility will just sort them.
```
## Versions
A "version" is described by the `v2.0.0` specification found at
<https://semver.org/>.
A leading `"="` or `"v"` character is stripped off and ignored.
## Ranges
A `version range` is a set of `comparators` which specify versions
that satisfy the range.
A `comparator` is composed of an `operator` and a `version`. The set
of primitive `operators` is:
* `<` Less than
* `<=` Less than or equal to
* `>` Greater than
* `>=` Greater than or equal to
* `=` Equal. If no operator is specified, then equality is assumed,
so this operator is optional, but MAY be included.
For example, the comparator `>=1.2.7` would match the versions
`1.2.7`, `1.2.8`, `2.5.3`, and `1.3.9`, but not the versions `1.2.6`
or `1.1.0`.
Comparators can be joined by whitespace to form a `comparator set`,
which is satisfied by the **intersection** of all of the comparators
it includes.
A range is composed of one or more comparator sets, joined by `||`. A
version matches a range if and only if every comparator in at least
one of the `||`-separated comparator sets is satisfied by the version.
For example, the range `>=1.2.7 <1.3.0` would match the versions
`1.2.7`, `1.2.8`, and `1.2.99`, but not the versions `1.2.6`, `1.3.0`,
or `1.1.0`.
The range `1.2.7 || >=1.2.9 <2.0.0` would match the versions `1.2.7`,
`1.2.9`, and `1.4.6`, but not the versions `1.2.8` or `2.0.0`.
### Prerelease Tags
If a version has a prerelease tag (for example, `1.2.3-alpha.3`) then
it will only be allowed to satisfy comparator sets if at least one
comparator with the same `[major, minor, patch]` tuple also has a
prerelease tag.
For example, the range `>1.2.3-alpha.3` would be allowed to match the
version `1.2.3-alpha.7`, but it would *not* be satisfied by
`3.4.5-alpha.9`, even though `3.4.5-alpha.9` is technically "greater
than" `1.2.3-alpha.3` according to the SemVer sort rules. The version
range only accepts prerelease tags on the `1.2.3` version. The
version `3.4.5` *would* satisfy the range, because it does not have a
prerelease flag, and `3.4.5` is greater than `1.2.3-alpha.7`.
The purpose for this behavior is twofold. First, prerelease versions
frequently are updated very quickly, and contain many breaking changes
that are (by the author's design) not yet fit for public consumption.
Therefore, by default, they are excluded from range matching
semantics.
Second, a user who has opted into using a prerelease version has
clearly indicated the intent to use *that specific* set of
alpha/beta/rc versions. By including a prerelease tag in the range,
the user is indicating that they are aware of the risk. However, it
is still not appropriate to assume that they have opted into taking a
similar risk on the *next* set of prerelease versions.
Note that this behavior can be suppressed (treating all prerelease
versions as if they were normal versions, for the purpose of range
matching) by setting the `includePrerelease` flag on the options
object to any
[functions](https://github.com/npm/node-semver#functions) that do
range matching.
#### Prerelease Identifiers
The method `.inc` takes an additional `identifier` string argument that
will append the value of the string as a prerelease identifier:
```javascript
semver.inc('1.2.3', 'prerelease', 'beta')
// '1.2.4-beta.0'
```
command-line example:
```bash
$ semver 1.2.3 -i prerelease --preid beta
1.2.4-beta.0
```
Which then can be used to increment further:
```bash
$ semver 1.2.4-beta.0 -i prerelease
1.2.4-beta.1
```
### Advanced Range Syntax
Advanced range syntax desugars to primitive comparators in
deterministic ways.
Advanced ranges may be combined in the same way as primitive
comparators using white space or `||`.
#### Hyphen Ranges `X.Y.Z - A.B.C`
Specifies an inclusive set.
* `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4`
If a partial version is provided as the first version in the inclusive
range, then the missing pieces are replaced with zeroes.
* `1.2 - 2.3.4` := `>=1.2.0 <=2.3.4`
If a partial version is provided as the second version in the
inclusive range, then all versions that start with the supplied parts
of the tuple are accepted, but nothing that would be greater than the
provided tuple parts.
* `1.2.3 - 2.3` := `>=1.2.3 <2.4.0`
* `1.2.3 - 2` := `>=1.2.3 <3.0.0`
#### X-Ranges `1.2.x` `1.X` `1.2.*` `*`
Any of `X`, `x`, or `*` may be used to "stand in" for one of the
numeric values in the `[major, minor, patch]` tuple.
* `*` := `>=0.0.0` (Any version satisfies)
* `1.x` := `>=1.0.0 <2.0.0` (Matching major version)
* `1.2.x` := `>=1.2.0 <1.3.0` (Matching major and minor versions)
A partial version range is treated as an X-Range, so the special
character is in fact optional.
* `""` (empty string) := `*` := `>=0.0.0`
* `1` := `1.x.x` := `>=1.0.0 <2.0.0`
* `1.2` := `1.2.x` := `>=1.2.0 <1.3.0`
#### Tilde Ranges `~1.2.3` `~1.2` `~1`
Allows patch-level changes if a minor version is specified on the
comparator. Allows minor-level changes if not.
* `~1.2.3` := `>=1.2.3 <1.(2+1).0` := `>=1.2.3 <1.3.0`
* `~1.2` := `>=1.2.0 <1.(2+1).0` := `>=1.2.0 <1.3.0` (Same as `1.2.x`)
* `~1` := `>=1.0.0 <(1+1).0.0` := `>=1.0.0 <2.0.0` (Same as `1.x`)
* `~0.2.3` := `>=0.2.3 <0.(2+1).0` := `>=0.2.3 <0.3.0`
* `~0.2` := `>=0.2.0 <0.(2+1).0` := `>=0.2.0 <0.3.0` (Same as `0.2.x`)
* `~0` := `>=0.0.0 <(0+1).0.0` := `>=0.0.0 <1.0.0` (Same as `0.x`)
* `~1.2.3-beta.2` := `>=1.2.3-beta.2 <1.3.0` Note that prereleases in
the `1.2.3` version will be allowed, if they are greater than or
equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but
`1.2.4-beta.2` would not, because it is a prerelease of a
different `[major, minor, patch]` tuple.
#### Caret Ranges `^1.2.3` `^0.2.5` `^0.0.4`
Allows changes that do not modify the left-most non-zero element in the
`[major, minor, patch]` tuple. In other words, this allows patch and
minor updates for versions `1.0.0` and above, patch updates for
versions `0.X >=0.1.0`, and *no* updates for versions `0.0.X`.
Many authors treat a `0.x` version as if the `x` were the major
"breaking-change" indicator.
Caret ranges are ideal when an author may make breaking changes
between `0.2.4` and `0.3.0` releases, which is a common practice.
However, it presumes that there will *not* be breaking changes between
`0.2.4` and `0.2.5`. It allows for changes that are presumed to be
additive (but non-breaking), according to commonly observed practices.
* `^1.2.3` := `>=1.2.3 <2.0.0`
* `^0.2.3` := `>=0.2.3 <0.3.0`
* `^0.0.3` := `>=0.0.3 <0.0.4`
* `^1.2.3-beta.2` := `>=1.2.3-beta.2 <2.0.0` Note that prereleases in
the `1.2.3` version will be allowed, if they are greater than or
equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but
`1.2.4-beta.2` would not, because it is a prerelease of a
different `[major, minor, patch]` tuple.
* `^0.0.3-beta` := `>=0.0.3-beta <0.0.4` Note that prereleases in the
`0.0.3` version *only* will be allowed, if they are greater than or
equal to `beta`. So, `0.0.3-pr.2` would be allowed.
When parsing caret ranges, a missing `patch` value desugars to the
number `0`, but will allow flexibility within that value, even if the
major and minor versions are both `0`.
* `^1.2.x` := `>=1.2.0 <2.0.0`
* `^0.0.x` := `>=0.0.0 <0.1.0`
* `^0.0` := `>=0.0.0 <0.1.0`
A missing `minor` and `patch` values will desugar to zero, but also
allow flexibility within those values, even if the major version is
zero.
* `^1.x` := `>=1.0.0 <2.0.0`
* `^0.x` := `>=0.0.0 <1.0.0`
### Range Grammar
Putting all this together, here is a Backus-Naur grammar for ranges,
for the benefit of parser authors:
```bnf
range-set ::= range ( logical-or range ) *
logical-or ::= ( ' ' ) * '||' ( ' ' ) *
range ::= hyphen | simple ( ' ' simple ) * | ''
hyphen ::= partial ' - ' partial
simple ::= primitive | partial | tilde | caret
primitive ::= ( '<' | '>' | '>=' | '<=' | '=' ) partial
partial ::= xr ( '.' xr ( '.' xr qualifier ? )? )?
xr ::= 'x' | 'X' | '*' | nr
nr ::= '0' | ['1'-'9'] ( ['0'-'9'] ) *
tilde ::= '~' partial
caret ::= '^' partial
qualifier ::= ( '-' pre )? ( '+' build )?
pre ::= parts
build ::= parts
parts ::= part ( '.' part ) *
part ::= nr | [-0-9A-Za-z]+
```
## Functions
All methods and classes take a final `options` object argument. All
options in this object are `false` by default. The options supported
are:
- `loose` Be more forgiving about not-quite-valid semver strings.
(Any resulting output will always be 100% strict compliant, of
course.) For backwards compatibility reasons, if the `options`
argument is a boolean value instead of an object, it is interpreted
to be the `loose` param.
- `includePrerelease` Set to suppress the [default
behavior](https://github.com/npm/node-semver#prerelease-tags) of
excluding prerelease tagged versions from ranges unless they are
explicitly opted into.
Strict-mode Comparators and Ranges will be strict about the SemVer
strings that they parse.
* `valid(v)`: Return the parsed version, or null if it's not valid.
* `inc(v, release)`: Return the version incremented by the release
type (`major`, `premajor`, `minor`, `preminor`, `patch`,
`prepatch`, or `prerelease`), or null if it's not valid
* `premajor` in one call will bump the version up to the next major
version and down to a prerelease of that major version.
`preminor`, and `prepatch` work the same way.
* If called from a non-prerelease version, the `prerelease` will work the
same as `prepatch`. It increments the patch version, then makes a
prerelease. If the input version is already a prerelease it simply
increments it.
* `prerelease(v)`: Returns an array of prerelease components, or null
if none exist. Example: `prerelease('1.2.3-alpha.1') -> ['alpha', 1]`
* `major(v)`: Return the major version number.
* `minor(v)`: Return the minor version number.
* `patch(v)`: Return the patch version number.
* `intersects(r1, r2, loose)`: Return true if the two supplied ranges
or comparators intersect.
* `parse(v)`: Attempt to parse a string as a semantic version, returning either
a `SemVer` object or `null`.
### Comparison
* `gt(v1, v2)`: `v1 > v2`
* `gte(v1, v2)`: `v1 >= v2`
* `lt(v1, v2)`: `v1 < v2`
* `lte(v1, v2)`: `v1 <= v2`
* `eq(v1, v2)`: `v1 == v2` This is true if they're logically equivalent,
even if they're not the exact same string. You already know how to
compare strings.
* `neq(v1, v2)`: `v1 != v2` The opposite of `eq`.
* `cmp(v1, comparator, v2)`: Pass in a comparison string, and it'll call
the corresponding function above. `"==="` and `"!=="` do simple
string comparison, but are included for completeness. Throws if an
invalid comparison string is provided.
* `compare(v1, v2)`: Return `0` if `v1 == v2`, or `1` if `v1` is greater, or `-1` if
`v2` is greater. Sorts in ascending order if passed to `Array.sort()`.
* `rcompare(v1, v2)`: The reverse of compare. Sorts an array of versions
in descending order when passed to `Array.sort()`.
* `compareBuild(v1, v2)`: The same as `compare` but considers `build` when two versions
are equal. Sorts in ascending order if passed to `Array.sort()`.
`v2` is greater. Sorts in ascending order if passed to `Array.sort()`.
* `diff(v1, v2)`: Returns difference between two versions by the release type
(`major`, `premajor`, `minor`, `preminor`, `patch`, `prepatch`, or `prerelease`),
or null if the versions are the same.
### Comparators
* `intersects(comparator)`: Return true if the comparators intersect
### Ranges
* `validRange(range)`: Return the valid range or null if it's not valid
* `satisfies(version, range)`: Return true if the version satisfies the
range.
* `maxSatisfying(versions, range)`: Return the highest version in the list
that satisfies the range, or `null` if none of them do.
* `minSatisfying(versions, range)`: Return the lowest version in the list
that satisfies the range, or `null` if none of them do.
* `minVersion(range)`: Return the lowest version that can possibly match
the given range.
* `gtr(version, range)`: Return `true` if version is greater than all the
versions possible in the range.
* `ltr(version, range)`: Return `true` if version is less than all the
versions possible in the range.
* `outside(version, range, hilo)`: Return true if the version is outside
the bounds of the range in either the high or low direction. The
`hilo` argument must be either the string `'>'` or `'<'`. (This is
the function called by `gtr` and `ltr`.)
* `intersects(range)`: Return true if any of the ranges comparators intersect
Note that, since ranges may be non-contiguous, a version might not be
greater than a range, less than a range, *or* satisfy a range! For
example, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9`
until `2.0.0`, so the version `1.2.10` would not be greater than the
range (because `2.0.1` satisfies, which is higher), nor less than the
range (since `1.2.8` satisfies, which is lower), and it also does not
satisfy the range.
If you want to know if a version satisfies or does not satisfy a
range, use the `satisfies(version, range)` function.
### Coercion
* `coerce(version, options)`: Coerces a string to semver if possible
This aims to provide a very forgiving translation of a non-semver string to
semver. It looks for the first digit in a string, and consumes all
remaining characters which satisfy at least a partial semver (e.g., `1`,
`1.2`, `1.2.3`) up to the max permitted length (256 characters). Longer
versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). All
surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes
`3.4.0`). Only text which lacks digits will fail coercion (`version one`
is not valid). The maximum length for any semver component considered for
coercion is 16 characters; longer components will be ignored
(`10000000000000000.4.7.4` becomes `4.7.4`). The maximum value for any
semver component is `Integer.MAX_SAFE_INTEGER || (2**53 - 1)`; higher value
components are invalid (`9999999999999999.4.7.4` is likely invalid).
If the `options.rtl` flag is set, then `coerce` will return the right-most
coercible tuple that does not share an ending index with a longer coercible
tuple. For example, `1.2.3.4` will return `2.3.4` in rtl mode, not
`4.0.0`. `1.2.3/4` will return `4.0.0`, because the `4` is not a part of
any other overlapping SemVer tuple.
### Clean
* `clean(version)`: Clean a string to be a valid semver if possible
This will return a cleaned and trimmed semver version. If the provided version is not valid a null will be returned. This does not work for ranges.
ex.
* `s.clean(' = v 2.1.5foo')`: `null`
* `s.clean(' = v 2.1.5foo', { loose: true })`: `'2.1.5-foo'`
* `s.clean(' = v 2.1.5-foo')`: `null`
* `s.clean(' = v 2.1.5-foo', { loose: true })`: `'2.1.5-foo'`
* `s.clean('=v2.1.5')`: `'2.1.5'`
* `s.clean(' =v2.1.5')`: `2.1.5`
* `s.clean(' 2.1.5 ')`: `'2.1.5'`
* `s.clean('~1.0.0')`: `null`
| PypiClean |
/BigJob2-0.54.post73.tar.gz/BigJob2-0.54.post73/examples/pilot-api/example-pilot-api-multiple.py | import sys
import os
import time
import logging
logging.basicConfig(level=logging.DEBUG)
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.insert(0, os.getcwd() + "/../")
from pilot import PilotComputeService, ComputeDataService, State
COORDINATION_URL = "redis://localhost:6379"
if __name__ == "__main__":
pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
# create pilot job service and initiate a pilot job
pilot_compute_description = {
"service_url": 'fork://localhost',
"number_of_processes": 1,
"working_directory": os.path.join(os.getcwd(),"work"),
'affinity_datacenter_label': "eu-de-south",
'affinity_machine_label': "mymachine"
}
pilotjob = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
pilotjob2 = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
compute_data_service = ComputeDataService()
compute_data_service.add_pilot_compute_service(pilot_compute_service)
# start work unit
compute_unit_description = {
"executable": "/bin/date",
"arguments": [""],
"total_core_count": 1,
"number_of_processes": 1,
"output": "stdout.txt",
"error": "stderr.txt",
"affinity_datacenter_label": "eu-de-south",
"affinity_machine_label": "mymachine"
}
cus = []
for i in range(0,10):
compute_unit = compute_data_service.submit_compute_unit(compute_unit_description)
cus.append(compute_unit)
logging.debug("Finished setup. Waiting for scheduling of CU")
compute_data_service.wait()
logging.debug("Terminate Pilot Compute and Compute Data Service")
compute_data_service.cancel()
pilot_compute_service.cancel() | PypiClean |
/ChatDocument-2023.4.25.9.50.0-py3-none-any.whl/chatllm/uis/gradio_ui.py |
import gradio as gr
import mdtex2html
########################################################################
from chatllm.utils import llm_load, llm_load4chat
from chatllm.applications import Chat
MODEL = '/Users/betterme/PycharmProjects/AI/CHAT_MODEL/chatglm'
chat_func = llm_load4chat(MODEL, device='mps')
qa = Chat(chat_func)
########################################################################
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>" + line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history, knowledge_base=''):
chatbot.append((parse_text(input), ""))
qa.set_chat_kwargs(max_length=max_length, top_p=top_p, temperature=temperature)
for response, history in qa(query=input, knowledge_base=knowledge_base):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">LLM4CHAT</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
# with gr.Column(scale=2):
# knowledge_base = gr.Textbox(show_label=False, placeholder="输入知识", lines=10).style(container=False)
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="输入问题", lines=20).style(container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
knowledge_base = gr.Textbox(label='📚知识库', placeholder="输入知识", lines=10).style(container=False)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, knowledge_base],
[chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
demo.queue().launch(share=False, inbrowser=True, debug=True) | PypiClean |
/AGouTI-1.0.3.tar.gz/AGouTI-1.0.3/agouti_pkg/simplejson/decoder.py | from __future__ import absolute_import
import re
import sys
import struct
from .compat import PY3, unichr
from .scanner import make_scanner, JSONDecodeError
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
# compatibility, but it was never in the __all__
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
if sys.version_info < (2, 6):
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
nan, inf = struct.unpack('>dd', _BYTES)
else:
nan = float('nan')
inf = float('inf')
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u''.join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
value, end = scan_once(s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
elif nextchar == '':
raise JSONDecodeError("Expecting value or ']'", s, end)
_append = values.append
while True:
value, end = scan_once(s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | str, unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, bytes):
s = str(s, self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if idx < 0:
# Ensure that raw_decode bails on negative indexes, the regex
# would otherwise mask this behavior. #98
raise JSONDecodeError('Expecting value', s, idx)
if _PY3 and not isinstance(s, str):
raise TypeError("Input string must be text, not bytes")
# strip UTF-8 bom
if len(s) > idx:
ord0 = ord(s[idx])
if ord0 == 0xfeff:
idx += 1
elif ord0 == 0xef and s[idx:idx + 3] == '\xef\xbb\xbf':
idx += 3
return self.scan_once(s, idx=_w(s, idx).end()) | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/html/_build/html/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/html/_build/doctrees/nbsphinx/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/Data_HandWrite-1.1.0.tar.gz/Data_HandWrite-1.1.0/Data_HandWrite/__init__.py | import os
from PIL import Image
import numpy as np
import bz2#bz2file pypi
import zipfile
import requests
#data
#https://drive.google.com/file/d/1ZzSeuq-9ICZuFpPKM4H_gU8OGwpqKBAs/view?usp=sharing
#paint.exr
#https://drive.google.com/file/d/1EPrPilZ4XK80Cq4n9-ynO0yxZfxYTIhP/view?usp=sharing
def _save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
def _download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = _get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
_save_response_content(response, destination)
def dowloadPaintToolKit():
'''
download "paint.exe" , paint and save img with label immediately
'''
_download_file_from_google_drive('1EPrPilZ4XK80Cq4n9-ynO0yxZfxYTIhP','paint.exe')
def dowloadPaintToolKitAndExecute():
'''
dowloadPaintToolKit() + autoexecute "paint.exe"
'''
dowloadPaintToolKit()
os.system('paint.exe')
def downloadData(folder='',isPrint=False):
'''
download train set and test set to 'folder'
filename format:
a_b_c.jpg
a:Label name (must be integer)
b:Creator serial
c:Serial
source==>
https://drive.google.com/file/d/1ZzSeuq-9ICZuFpPKM4H_gU8OGwpqKBAs/view?usp=sharing
'''
#vedio==> https://drive.google.com/file/d/1076Ftdz8hxZkly-7QxYUR6kRJHSft-7s/view?usp=sharing
#images==> https://drive.google.com/file/d/1zmpZY5D5vNwcxhNmTgBprisevixexA4I/view
file_id = '1ZzSeuq-9ICZuFpPKM4H_gU8OGwpqKBAs'
destination = '1.zip'
_download_file_from_google_drive(file_id, destination)
with zipfile.ZipFile(destination, mode='r') as myzip:
for file in myzip.namelist():
if isPrint:print("extract "+file)
myzip.extract(file,folder)
print('\nfile already downloaded to the folders:')
print(os.getcwd()+'\\train\\')
print(os.getcwd()+'\\test\\')
os.remove(destination)
def LoadImgFromFile(filename):
'''
return flatten image data whitch can predict directly
'''
x = np.asarray(Image.open(filename))
x = x.flatten()/255
return np.array([x])
def LoadDataFromWeb():
'''
combine downloadData() and LoadData()
make data loading more easier
'''
downloadData()
X_train,Y_train = LoadData('train')
X_test,Y_test = LoadData('test')
return X_train,Y_train,X_test,Y_test
def LoadData(sourceFolder):
'''
Load data from source Folder
return X,Y
X:image row data of each files Loaded from sourceFolder
(auto divide by 255)
Y:Labels from filename to integer
filename format can be found in downloadData(...)
'''
X = []
Y = []
for f in os.listdir(sourceFolder):
try:
y=f.split('_')[0]
Y.append(int(y))
x = np.asarray(Image.open(sourceFolder+'/'+f))
X.append(x.flatten()/255) #將圖像陣列展開成 1 維
except:pass
X = np.array(X)
Y = np.array(Y)
return X,Y
def pickSample(X,Y,maxSize):
'''
pick maxSize of feature vectors for each classes
X:image row data of each files
(auto divide by 255)
Y:Labels of each sample
'''
dic={}
newX = []
newY = []
for i in range(len(Y)):
if Y[i] in dic:
if dic[Y[i]]<maxSize:
dic[Y[i]]+=1
newX.append(X[i])
newY.append(Y[i])
else:dic[Y[i]] = 0
return np.array(newX),np.array(newY)
def printSampleCode():
print('''import Data_HandWrite
import numpy as np
import matplotlib.pyplot as plt
#下載 1-5 數字圖片並載入至陣列變數
Data_HandWrite.downloadData()
X_train,Y_train = Data_HandWrite.LoadData('train')
X_test,Y_test = Data_HandWrite.LoadData('test')
#=====模型訓練=====
from sklearn.svm import SVC
model = SVC(decision_function_shape='ovo',gamma=0.004,probability=True)
model.fit(X_train,Y_train)
#test資料夾整體測試
result = model.predict(X_test)
print('accuracy=',model.score(X_test,Y_test)*100)
for i in range(len(result)):
print(Y_test[i],'==>',result[i])
#=====單一檔案測試=====
ans = '3'
testFile = 'test/'+ans+'_100_0000.jpg'
x = Data_HandWrite.LoadImgFromFile(testFile)
#秀出每個類別的對數機率
log_probs = model.predict_log_proba(x)
resultLabel = str(model.predict(x)[0])
#秀出該圖形
plt.xlabel('log probility of each class:\\n'+str(log_probs))
plt.title('testing result:'+resultLabel)
plt.imshow(np.reshape(x,(28,28,3)))
plt.show()
#下載手繪工具程式並執行
Data_HandWrite.dowloadPaintToolKitAndExecute()''') | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/build/lib/gqcms/matrices/Hamiltonian.py | import numpy as np
from typing import List
from gqcms import Hubbard
from gqcms.matrices import Determinant
def createHamiltonian(H_core: np.ndarray, I: np.ndarray, determinants: list) -> np.ndarray:
"""
Create the hamiltonian matrix from a list of determinants
:param H_core: core Hamiltonian i.e one electron integrals
:param I: two electron integrals
:param determinants: list of Determinants objects
"""
H = np.zeros((len(determinants), len(determinants)))
# Make use of the fact that H is hermitian and calculate only the
# upper traingle
for i, det_i in enumerate(determinants):
for j, det_j in enumerate(determinants[i:], start=i):
# Compute how many orbitals are different
num_diff_orbitals = det_i.num_different_orbitals(det_j)
output = 0
if num_diff_orbitals == 0:
# Get all orbitals in the ONV
orbital_list = det_i.get_spin_orbitals()
# One electron integrals
for u in orbital_list:
output += H_core[u, u]
# Two electron integrals
for k, p in enumerate(orbital_list):
for q in orbital_list[k+1:]:
output += I[p, q, p, q]
elif num_diff_orbitals == 1:
# Get different orbitals and sign
diff_spin_orb_i, diff_spin_orb_j, sign = det_i.get_different_orbitals(det_j)
# print(f"i: {i}\t{diff_spin_orb_i}\t j: {j}\t{diff_spin_orb_j}")
# One electron term
output += H_core[diff_spin_orb_i[0],
diff_spin_orb_j[0]]
# Two electron terms
for p in det_i.get_spin_orbitals():
if p != diff_spin_orb_i[0]:
output += I[p, diff_spin_orb_i[0],
p, diff_spin_orb_j[0]]
output *= sign
elif num_diff_orbitals == 2:
# Get different orbitals and sign
diff_spin_orb_i, diff_spin_orb_j, sign = det_i.get_different_orbitals(det_j)
output += sign * \
I[diff_spin_orb_i[0], diff_spin_orb_i[1],
diff_spin_orb_j[0], diff_spin_orb_j[1]]
# H is hermitian
H[i, j] = output
H[j, i] = H[i, j]
return H
def createHamiltonianSCI(molecule: Hubbard, result_HF, excitations: List[int] = None, basis=None, return_extra=False) -> np.ndarray:
"""
Create the selected configration interaction (SCI) Hamiltonian
:param molecule: information of the Hubbard system
:param excitations: list of the selected excitations
:param result_HF: result of an HF calculation
:param return_extra: return the spin block coefficient matrix and CI basis or not (default is False)
"""
# Create one electron intergral matrix
H_core_ao = -molecule.t*molecule.adj_mat + np.diag([molecule.potential.get(site, 0) for site in range(molecule.sites)])
# # Transform H_core to HF-MO basis
# H_core_mo_a = np.einsum('uj,vi,uv', result_HF.C_a, result_HF.C_a, H_core_ao)
# H_core_mo_b = np.einsum('uj,vi,uv', result_HF.C_b, result_HF.C_b, H_core_ao)
# # Spin block H_core_mo
# H_core_mo = np.zeros((2*molecule.sites, 2*molecule.sites))
# H_core_mo[::2, ::2] = H_core_mo_a
# H_core_mo[1::2, 1::2] = H_core_mo_b
# Create spin block coefficient matrix and sort
C = np.block([
[result_HF.C_a, np.zeros_like(result_HF.C_b)],
[np.zeros_like(result_HF.C_a), result_HF.C_b]
])
# Spin block H_core in AO basis
H_core_ao_spin_block = np.block([
[H_core_ao, np.zeros_like(H_core_ao)],
[np.zeros_like(H_core_ao), H_core_ao]
])
# Transform H_core from AO to HF-MO basis
H_core_mo = C.T @ H_core_ao_spin_block @ C
# Sort C and H_core_mo to align with the electron repulsion tenor indices
sort_indices = np.asarray([p for pair in zip(range(0, molecule.sites), range(molecule.sites, 2*molecule.sites)) for p in pair])
C = C[:, sort_indices]
H_core_mo = H_core_mo[:, sort_indices]
H_core_mo = H_core_mo[sort_indices, :]
# Create electron repulsion integrals (eri) tensor
eri_ao = np.zeros((molecule.sites, molecule.sites, molecule.sites, molecule.sites))
for site in range(molecule.sites):
eri_ao[site, site, site, site] = molecule.U
# Spin block eri
I = np.eye(2)
eri_spin_block_ao = np.kron(I, eri_ao)
eri_spin_block_ao = np.kron(I, eri_spin_block_ao.T)
# Convert to physicist's notation and antisymmetrize
eri_spin_block_ao_phys = eri_spin_block_ao.transpose(0, 2, 1, 3)
gao = eri_spin_block_ao_phys - eri_spin_block_ao_phys.transpose(0, 1, 3, 2)
# Transform gao from AO to MO basis
temp = np.einsum('pi,pqrs->iqrs', C, gao)
temp = np.einsum('qj,iqrs->ijrs', C, temp)
temp = np.einsum('ijrs,rk->ijks', temp, C)
eri_mo = np.einsum('ijks,sl->ijkl', temp, C)
if excitations is not None:
# Generate requested excitations
det_ref = Determinant(nalpha=molecule.nalpha, nbeta=molecule.nbeta, sites=molecule.sites)
basis = [det_ref]
for excitation in excitations:
basis.extend(det_ref.n_tuply_excitations(excitation, molecule.sites))
# Check if a basis is given, else return error
elif basis is None:
raise ValueError("A list of excitations or a list of determinants should be given.")
# Create Hamiltonian in ONV basis
H_onv = createHamiltonian(H_core_mo, eri_mo, basis)
# Return basis if asked
if return_extra:
return H_onv, basis
else:
return H_onv | PypiClean |
/MoorPy-1.0.1.tar.gz/MoorPy-1.0.1/moorpy/line.py | import numpy as np
from matplotlib import cm
from moorpy.Catenary import catenary
from moorpy.nonlinear import nonlinear
from moorpy.helpers import LineError, CatenaryError, rotationMatrix, makeTower, read_mooring_file, quiver_data_to_segments
from os import path
class Line():
'''A class for any mooring line that consists of a single material'''
def __init__(self, mooringSys, num, L, lineType, nSegs=100, cb=0, isRod=0, attachments = [0,0]):
'''Initialize Line attributes
Parameters
----------
mooringSys : system object
The system object that contains the point object
num : int
indentifier number
L : float
line unstretched length [m]
lineType : dict
dictionary containing the coefficients needed to describe the line (could reference an entry of System.lineTypes).
nSegs : int, optional
number of segments to split the line into. Used in MoorPy just for plotting. The default is 100.
cb : float, optional
line seabed friction coefficient (will be set negative if line is fully suspended). The default is 0.
isRod : boolean, optional
determines whether the line is a rod or not. The default is 0.
attachments : TYPE, optional
ID numbers of any Points attached to the Line. The default is [0,0]. << consider removing
Returns
-------
None.
'''
self.sys = mooringSys # store a reference to the overall mooring system (instance of System class)
self.number = num
self.isRod = isRod
self.L = L # line unstretched length
self.type = lineType # dictionary of a System.lineTypes entry
self.nNodes = int(nSegs) + 1
self.cb = float(cb) # friction coefficient (will automatically be set negative if line is fully suspended)
self.rA = np.zeros(3) # end coordinates
self.rB = np.zeros(3)
self.fA = np.zeros(3) # end forces
self.fB = np.zeros(3)
#Perhaps this could be made less intrusive by defining it using a line.addpoint() method instead, similar to point.attachline().
self.attached = attachments # ID numbers of the Points at the Line ends [a,b] >>> NOTE: not fully supported <<<<
self.th = 0 # heading of line from end A to B
self.HF = 0 # fairlead horizontal force saved for next solve
self.VF = 0 # fairlead vertical force saved for next solve
self.KA = [] # to be filled with the 2x2 end stiffness matrix from catenary
self.KB = [] # to be filled with the 2x2 end stiffness matrix from catenary
self.info = {} # to hold all info provided by catenary
self.qs = 1 # flag indicating quasi-static analysis (1). Set to 0 for time series data
self.show = True # a flag that will be set to false if we don't want to show the line (e.g. if results missing)
#print("Created Line "+str(self.number))
self.color = 'k'
self.lw=0.5
def loadData(self, dirname, rootname, sep='.MD.'):
'''Loads line-specific time series data from a MoorDyn output file'''
self.qs = 0 # signals time series data
if self.isRod==1:
strtype='Rod'
elif self.isRod==0:
strtype='Line'
filename = dirname+rootname+sep+strtype+str(self.number)+'.out'
if path.exists(filename):
# try:
# load time series data
data, ch, channels, units = read_mooring_file("", filename) # remember number starts on 1 rather than 0
# get time info
if ("Time" in ch):
self.Tdata = data[:,ch["Time"]]
self.dt = self.Tdata[1]-self.Tdata[0]
else:
raise LineError("loadData: could not find Time channel for mooring line "+str(self.number))
nT = len(self.Tdata) # number of time steps
# check for position data <<<<<<
self.xp = np.zeros([nT,self.nNodes])
self.yp = np.zeros([nT,self.nNodes])
self.zp = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.xp[:,i] = data[:, ch['Node'+str(i)+'px']]
self.yp[:,i] = data[:, ch['Node'+str(i)+'py']]
self.zp[:,i] = data[:, ch['Node'+str(i)+'pz']]
'''
if self.isRod==0:
self.Te = np.zeros([nT,self.nNodes-1]) # read in tension data if available
if "Seg1Te" in ch:
for i in range(self.nNodes-1):
self.Te[:,i] = data[:, ch['Seg'+str(i+1)+'Te']]
self.Ku = np.zeros([nT,self.nNodes]) # read in curvature data if available
if "Node0Ku" in ch:
for i in range(self.nNodes):
self.Ku[:,i] = data[:, ch['Node'+str(i)+'Ku']]
else:
# read in Rod buoyancy force data if available
if "Node0Box" in ch:
self.Bx = np.zeros([nT,self.nNodes])
self.By = np.zeros([nT,self.nNodes])
self.Bz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Bx[:,i] = data[:, ch['Node'+str(i)+'Box']]
self.By[:,i] = data[:, ch['Node'+str(i)+'Boy']]
self.Bz[:,i] = data[:, ch['Node'+str(i)+'Boz']]
if "Node0Ux" in ch:
self.Ux = np.zeros([nT,self.nNodes]) # read in fluid velocity data if available
self.Uy = np.zeros([nT,self.nNodes])
self.Uz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Ux[:,i] = data[:, ch['Node'+str(i)+'Ux']]
self.Uy[:,i] = data[:, ch['Node'+str(i)+'Uy']]
self.Uz[:,i] = data[:, ch['Node'+str(i)+'Uz']]
#Read in tension data if available
if "Seg1Ten" in ch:
self.Ten = np.zeros([nT,self.nNodes-1])
for i in range(self.nNodes-1):
self.Ten[:,i] = data[:, ch['Seg'+str(i+1)+'Ten']]
'''
# --- Read in additional data if available ---
# segment tension <<< to be changed to nodal tensions in future MD versions
#if "Seg1Te" in ch
if "Seg1Ten" in ch:
self.Tendata = True
self.Ten = np.zeros([nT,self.nNodes-1])
for i in range(self.nNodes-1):
self.Ten[:,i] = data[:, ch['Seg'+str(i+1)+'Ten']]
else:
self.Tendata = False
# curvature at node
if "Node0Ku" in ch:
self.Kudata = True
self.Ku = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Ku[:,i] = data[:, ch['Node'+str(i)+'Ku']]
else:
self.Kudata = False
# water velocity data
if "Node0Ux" in ch:
self.Udata = True
self.Ux = np.zeros([nT,self.nNodes])
self.Uy = np.zeros([nT,self.nNodes])
self.Uz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Ux[:,i] = data[:, ch['Node'+str(i)+'Ux']]
self.Uy[:,i] = data[:, ch['Node'+str(i)+'Uy']]
self.Uz[:,i] = data[:, ch['Node'+str(i)+'Uz']]
else:
self.Udata = False
# buoyancy force data
if "Node0Box" in ch:
self.Bdata = True
self.Bx = np.zeros([nT,self.nNodes])
self.By = np.zeros([nT,self.nNodes])
self.Bz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Bx[:,i] = data[:, ch['Node'+str(i)+'Box']]
self.By[:,i] = data[:, ch['Node'+str(i)+'Boy']]
self.Bz[:,i] = data[:, ch['Node'+str(i)+'Boz']]
else:
self.Bdata = False
# hydro drag data
if "Node0Dx" in ch:
self.Ddata = True
self.Dx = np.zeros([nT,self.nNodes]) # read in fluid velocity data if available
self.Dy = np.zeros([nT,self.nNodes])
self.Dz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Dx[:,i] = data[:, ch['Node'+str(i)+'Dx']]
self.Dy[:,i] = data[:, ch['Node'+str(i)+'Dy']]
self.Dz[:,i] = data[:, ch['Node'+str(i)+'Dz']]
else:
self.Ddata = False
# weight data
if "Node0Wx" in ch:
self.Wdata = True
self.Wx = np.zeros([nT,self.nNodes]) # read in fluid velocity data if available
self.Wy = np.zeros([nT,self.nNodes])
self.Wz = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.Wx[:,i] = data[:, ch['Node'+str(i)+'Wx']]
self.Wy[:,i] = data[:, ch['Node'+str(i)+'Wy']]
self.Wz[:,i] = data[:, ch['Node'+str(i)+'Wz']]
else:
self.Wdata = False
# initialize positions (is this used?)
self.xpi= self.xp[0,:]
self.ypi= self.yp[0,:]
self.zpi= self.zp[0,:]
# calculate the dynamic LBot !!!!!!! doesn't work for sloped bathymetry yet !!!!!!!!!!
for i in range(len(self.zp[0])):
if np.max(self.zp[:,i]) > self.zp[0,0]:
inode = i
break
else:
inode = i
self.LBotDyn = (inode-1)*self.L/(self.nNodes-1)
# get length (constant)
#self.L = np.sqrt( (self.xpi[-1]-self.xpi[0])**2 + (self.ypi[-1]-self.ypi[0])**2 + (self.zpi[-1]-self.zpi[0])**2 )
# ^^^^^^^ why are we changing the self.L value to not the unstretched length specified in MoorDyn?
# moved this below the dynamic LBot calculation because I wanted to use the original self.L
# >>> this is probably needed for Rods - should look into using for Rods only <<<
# check for tension data <<<<<<<
self.show = True
else:
self.Tdata = []
self.show = False
print(f"Error geting data for {'Rod' if self.isRod else 'Line'} {self.number}: {filename}")
print("dirname: {} or rootname: {} is incorrect".format(dirname, rootname))
# >>> this was another option for handling issues - maybe no longer needed <<<
#except Exception as e:
# # don't fail if there's an issue finding data, just flag that the line shouldn't be shown/plotted
# print(f"Error geting data for {'Rod' if self.isRod else 'Line'} {self.number}: ")
# print(e)
# self.show = False
def getTimestep(self, Time):
'''Get the time step to use for showing time series data'''
if Time < 0:
ts = np.int_(-Time) # negative value indicates passing a time step index
else: # otherwise it's a time in s, so find closest time step
if len(self.Tdata) > 0:
for index, item in enumerate(self.Tdata):
ts = -1
if item > Time:
ts = index
break
if ts==-1:
raise LineError(self.number, "getTimestep: requested time likely out of range")
else:
raise LineError(self.number, "getTimestep: zero time steps are stored")
return ts
def getLineCoords(self, Time, n=0): # formerly UpdateLine
'''Gets the updated line coordinates for drawing and plotting purposes.'''
if n==0: n = self.nNodes
# special temporary case to draw a rod for visualization. This assumes the rod end points have already been set somehow
if self.qs==1 and self.isRod > 0:
# make points for appropriately sized cylinder
d = self.type['d_vol']
Xs, Ys, Zs = makeTower(self.L, np.array([d/2, d/2])) # add in makeTower method once you start using Rods
# get unit vector and orientation matrix
k = (self.rB-self.rA)/self.L
Rmat = np.array(rotationMatrix(0, np.arctan2(np.hypot(k[0],k[1]), k[2]), np.arctan2(k[1],k[0])))
# translate and rotate into proper position for Rod
coords = np.vstack([Xs, Ys, Zs])
newcoords = np.matmul(Rmat,coords)
Xs = newcoords[0,:] + self.rA[0]
Ys = newcoords[1,:] + self.rA[1]
Zs = newcoords[2,:] + self.rA[2]
return Xs, Ys, Zs, None
# if a quasi-static analysis, just call the catenary function to return the line coordinates
elif self.qs==1:
depth = self.sys.depth
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1]) # horizontal spacing of line ends
LV = dr[2] # vertical offset from end A to end B
if LH >0:
cosBeta = dr[0]/LH # cos of line heading
sinBeta = dr[1]/LH # sin of line heading
self.th = np.arctan2(dr[1],dr[0]) # line heading
else: # special case of vertical line: line heading is undefined - use zero as default
cosBeta = 0.0
sinBeta = 0.0
self.th = 0.0
if np.min([self.rA[2],self.rB[2]]) > -depth:
self.cb = -depth - np.min([self.rA[2],self.rB[2]]) # if this line's lower end is off the seabed, set cb negative and to the distance off the seabed
elif self.cb < 0: # if a line end is at the seabed, but the cb is still set negative to indicate off the seabed
self.cb = 0.0 # set to zero so that the line includes seabed interaction.
# ----- check for linear vs nonlinear line elasticity -----
#If EA is found in the line properties we will run the original catenary function
if 'EA' in self.type:
try:
(fAH, fAV, fBH, fBV, info) = catenary(LH, LV, self.L, self.type['EA'], self.type['w'],
self.cb, HF0=self.HF, VF0=self.VF, nNodes=n, plots=1)
except CatenaryError as error:
raise LineError(self.number, error.message)
#(fAH, fAV, fBH, fBV, info) = catenary(LH, LV, self.L, self.type['EA'], self.type['w'], CB=self.cb, HF0=self.HF, VF0=self.VF, nNodes=n, plots=1) # call line model
#If EA isnt found then we will use the ten-str relationship defined in the input file
else:
(fAH, fAV, fBH, fBV, info) = nonlinear(LH, LV, self.L, self.type['Str'], self.type['Ten'],self.type['w'])
Xs = self.rA[0] + info["X"]*cosBeta
Ys = self.rA[1] + info["X"]*sinBeta
Zs = self.rA[2] + info["Z"]
Ts = info["Te"]
return Xs, Ys, Zs, Ts
# otherwise, count on read-in time-series data
else:
# figure out what time step to use
ts = self.getTimestep(Time)
# drawing rods
if self.isRod > 0:
k1 = np.array([ self.xp[ts,-1]-self.xp[ts,0], self.yp[ts,-1]-self.yp[ts,0], self.zp[ts,-1]-self.zp[ts,0] ]) / self.L # unit vector
k = np.array(k1) # make copy
Rmat = np.array(rotationMatrix(0, np.arctan2(np.hypot(k[0],k[1]), k[2]), np.arctan2(k[1],k[0]))) # <<< should fix this up at some point, MattLib func may be wrong
# make points for appropriately sized cylinder
d = self.type['d_vol']
Xs, Ys, Zs = makeTower(self.L, np.array([d/2, d/2])) # add in makeTower method once you start using Rods
# translate and rotate into proper position for Rod
coords = np.vstack([Xs, Ys, Zs])
newcoords = np.matmul(Rmat,coords)
Xs = newcoords[0,:] + self.xp[ts,0]
Ys = newcoords[1,:] + self.yp[ts,0]
Zs = newcoords[2,:] + self.zp[ts,0]
return Xs, Ys, Zs, None
# drawing lines
else:
# handle whether or not there is tension data
try: # use average to go from segment tension to node tensions <<< can skip this once MD is updated to output node tensions
Te = 0.5*(np.append(self.Te[ts,0], self.Te[ts,:]) +np.append(self.Te[ts,:], self.Te[ts,-1]))
except: # otherwise return zeros to avoid an error (might want a warning in some cases?)
Te = np.zeros(self.nNodes)
return self.xp[ts,:], self.yp[ts,:], self.zp[ts,:], Te
def getCoordinate(self, s, n=100):
'''Returns position and tension at a specific point along the line's unstretched length'''
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1])
Ss = np.linspace(0, self.L, n)
Xs, Ys, Zs, Ts = self.getLineCoords(0.0, n=n)
X = np.interp(s, Ss, Xs)*dr[0]/LH
Y = np.interp(s, Ss, Ys)*dr[1]/LH
Z = np.interp(s, Ss, Zs)
T = np.interp(s, Ss, Ts)
return X, Y, Z, T
def drawLine2d(self, Time, ax, color="k", Xuvec=[1,0,0], Yuvec=[0,0,1], Xoff=0, Yoff=0, colortension=False, cmap='rainbow', plotnodes=[], plotnodesline=[], label="", alpha=1.0):
'''Draw the line on 2D plot (ax must be 2D)
Parameters
----------
Time : float
time value at which to draw the line
ax : axis
the axis on which the line is to be drawn
color : string, optional
color identifier in one letter (k=black, b=blue,...). The default is "k".
Xuvec : list, optional
plane at which the x-axis is desired. The default is [1,0,0].
Yuvec : list, optional
plane at which the y-axis is desired. The default is [0,0,1].
colortension : bool, optional
toggle to plot the lines in a colormap based on node tensions. The default is False
cmap : string, optional
colormap string type to plot tensions when colortension=True. The default is 'rainbow'
Returns
-------
linebit : list
list of axes and points on which the line can be plotted
'''
linebit = [] # make empty list to hold plotted lines, however many there are
if self.isRod > 0:
Xs, Ys, Zs, Te = self.getLineCoords(Time)
# apply any 3D to 2D transformation here to provide desired viewing angle
Xs2d = Xs*Xuvec[0] + Ys*Xuvec[1] + Zs*Xuvec[2]
Ys2d = Xs*Yuvec[0] + Ys*Yuvec[1] + Zs*Yuvec[2]
for i in range(int(len(Xs)/2-1)):
linebit.append(ax.plot(Xs2d[2*i:2*i+2] ,Ys2d[2*i:2*i+2] , lw=0.5, color=color)) # side edges
linebit.append(ax.plot(Xs2d[[2*i,2*i+2]] ,Ys2d[[2*i,2*i+2]] , lw=0.5, color=color)) # end A edges
linebit.append(ax.plot(Xs2d[[2*i+1,2*i+3]],Ys2d[[2*i+1,2*i+3]], lw=0.5, color=color)) # end B edges
# drawing lines...
else:
# >>> can probably streamline the next bit of code a fair bit <<<
if self.qs==1:
Xs, Ys, Zs, tensions = self.getLineCoords(Time)
elif self.qs==0:
Xs, Ys, Zs, Ts = self.getLineCoords(Time)
self.rA = np.array([Xs[0], Ys[0], Zs[0]])
self.rB = np.array([Xs[-1], Ys[-1], Zs[-1]])
tensions = self.getLineTens()
# apply any 3D to 2D transformation here to provide desired viewing angle
Xs2d = Xs*Xuvec[0] + Ys*Xuvec[1] + Zs*Xuvec[2] + Xoff
Ys2d = Xs*Yuvec[0] + Ys*Yuvec[1] + Zs*Yuvec[2] + Yoff
if colortension: # if the mooring lines want to be plotted with colors based on node tensions
maxt = np.max(tensions); mint = np.min(tensions)
for i in range(len(Xs)-1): # for each node in the line
color_ratio = ((tensions[i] + tensions[i+1])/2 - mint)/(maxt - mint) # ratio of the node tension in relation to the max and min tension
cmap_obj = cm.get_cmap(cmap) # create a cmap object based on the desired colormap
rgba = cmap_obj(color_ratio) # return the rbga values of the colormap of where the node tension is
linebit.append(ax.plot(Xs2d[i:i+2], Ys2d[i:i+2], color=rgba))
else:
linebit.append(ax.plot(Xs2d, Ys2d, lw=1, color=color, label=label, alpha=alpha)) # previously had lw=1 (linewidth)
if len(plotnodes) > 0:
for i,node in enumerate(plotnodes):
if self.number==plotnodesline[i]:
linebit.append(ax.plot(Xs2d[node], Ys2d[node], 'o', color=color, markersize=5))
self.linebit = linebit # can we store this internally?
self.X = np.array([Xs, Ys, Zs])
return linebit
def drawLine(self, Time, ax, color="k", endpoints=False, shadow=True, colortension=False, cmap_tension='rainbow'):
'''Draw the line in 3D
Parameters
----------
Time : float
time value at which to draw the line
ax : axis
the axis on which the line is to be drawn
color : string, optional
color identifier in one letter (k=black, b=blue,...). The default is "k".
endpoints : bool, optional
toggle to plot the end points of the lines. The default is False
shadow : bool, optional
toggle to plot the mooring line shadow on the seabed. The default is True
colortension : bool, optional
toggle to plot the lines in a colormap based on node tensions. The default is False
cmap : string, optional
colormap string type to plot tensions when colortension=True. The default is 'rainbow'
Returns
-------
linebit : list
list of axes and points on which the line can be plotted
'''
if not self.show: # exit if this line isn't set to be shown
return 0
if color == 'self':
color = self.color # attempt to allow custom colors
lw = self.lw
else:
lw = 1
linebit = [] # make empty list to hold plotted lines, however many there are
if self.isRod > 0:
if color==None:
color = [0.3, 0.3, 0.3] # if no color provided, default to dark grey rather than rainbow rods
Xs, Ys, Zs, Ts = self.getLineCoords(Time)
for i in range(int(len(Xs)/2-1)):
linebit.append(ax.plot(Xs[2*i:2*i+2],Ys[2*i:2*i+2],Zs[2*i:2*i+2] , color=color)) # side edges
linebit.append(ax.plot(Xs[[2*i,2*i+2]],Ys[[2*i,2*i+2]],Zs[[2*i,2*i+2]] , color=color)) # end A edges
linebit.append(ax.plot(Xs[[2*i+1,2*i+3]],Ys[[2*i+1,2*i+3]],Zs[[2*i+1,2*i+3]], color=color)) # end B edges
# scatter points for line ends
#if endpoints == True:
# linebit.append(ax.scatter([Xs[0], Xs[-1]], [Ys[0], Ys[-1]], [Zs[0], Zs[-1]], color = color))
# drawing lines...
else:
# >>> can probably streamline the next bit of code a fair bit <<<
if self.qs==1: # returns the node positions and tensions of the line, doesn't matter what time
Xs, Ys, Zs, tensions = self.getLineCoords(Time)
elif self.qs==0: # returns the node positions and time data at the given time
Xs, Ys, Zs, Ts = self.getLineCoords(Time)
self.rA = np.array([Xs[0], Ys[0], Zs[0]])
self.rB = np.array([Xs[-1], Ys[-1], Zs[-1]])
tensions = self.getLineTens()
if colortension: # if the mooring lines want to be plotted with colors based on node tensions
maxt = np.max(tensions); mint = np.min(tensions)
for i in range(len(Xs)-1): # for each node in the line
color_ratio = ((tensions[i] + tensions[i+1])/2 - mint)/(maxt - mint) # ratio of the node tension in relation to the max and min tension
cmap_obj = cm.get_cmap(cmap_tension) # create a cmap object based on the desired colormap
rgba = cmap_obj(color_ratio) # return the rbga values of the colormap of where the node tension is
linebit.append(ax.plot(Xs[i:i+2], Ys[i:i+2], Zs[i:i+2], color=rgba, zorder=100))
else:
linebit.append(ax.plot(Xs, Ys, Zs, color=color, lw=lw, zorder=100))
if shadow:
ax.plot(Xs, Ys, np.zeros_like(Xs)-self.sys.depth, color=[0.5, 0.5, 0.5, 0.2], lw=lw, zorder = 1.5) # draw shadow
if endpoints == True:
linebit.append(ax.scatter([Xs[0], Xs[-1]], [Ys[0], Ys[-1]], [Zs[0], Zs[-1]], color = color))
# draw additional data if available (should make this for rods too eventually - drawn along their axis nodes)
if self.qs == 0:
ts = self.getTimestep(Time)
if self.Tendata:
pass
if self.Kudata:
pass
if self.Udata:
self.Ubits = ax.quiver(Xs, Ys, Zs, self.Ux[ts,:], self.Uy[ts,:], self.Uz[ts,:], color="blue") # make quiver plot and save handle to line object
if self.Bdata:
self.Bbits = ax.quiver(Xs, Ys, Zs, self.Bx[ts,:], self.By[ts,:], self.Bz[ts,:], color="red")
if self.Ddata:
self.Dbits = ax.quiver(Xs, Ys, Zs, self.Dx[ts,:], self.Dy[ts,:], self.Dz[ts,:], color="green")
if self.Wdata:
self.Wbits = ax.quiver(Xs, Ys, Zs, self.Wx[ts,:], self.Wy[ts,:], self.Wz[ts,:], color="orange")
self.linebit = linebit # can we store this internally?
self.X = np.array([Xs, Ys, Zs])
return linebit
def redrawLine(self, Time, colortension=False, cmap_tension='rainbow', drawU=True): #, linebit):
'''Update 3D line drawing based on instantaneous position'''
linebit = self.linebit
if self.isRod > 0:
Xs, Ys, Zs, Ts = self.getLineCoords(Time)
for i in range(int(len(Xs)/2-1)):
linebit[3*i ][0].set_data(Xs[2*i:2*i+2],Ys[2*i:2*i+2]) # side edges (x and y coordinates)
linebit[3*i ][0].set_3d_properties(Zs[2*i:2*i+2]) # (z coordinates)
linebit[3*i+1][0].set_data(Xs[[2*i,2*i+2]],Ys[[2*i,2*i+2]]) # end A edges
linebit[3*i+1][0].set_3d_properties(Zs[[2*i,2*i+2]])
linebit[3*i+2][0].set_data(Xs[[2*i+1,2*i+3]],Ys[[2*i+1,2*i+3]]) # end B edges
linebit[3*i+2][0].set_3d_properties(Zs[[2*i+1,2*i+3]])
# drawing lines...
else:
Xs, Ys, Zs, Ts = self.getLineCoords(Time)
if colortension:
self.rA = np.array([Xs[0], Ys[0], Zs[0]]) # update the line ends based on the MoorDyn data
self.rB = np.array([Xs[-1], Ys[-1], Zs[-1]])
tensions = self.getLineTens() # get the tensions of the line calculated quasi-statically
maxt = np.max(tensions); mint = np.min(tensions)
cmap_obj = cm.get_cmap(cmap_tension) # create the colormap object
for i in range(len(Xs)-1): # for each node in the line, find the relative tension of the segment based on the max and min tensions
color_ratio = ((tensions[i] + tensions[i+1])/2 - mint)/(maxt - mint)
rgba = cmap_obj(color_ratio)
linebit[i][0]._color = rgba # set the color of the segment to a new color based on its updated tension
linebit[i][0].set_data(Xs[i:i+2],Ys[i:i+2]) # set the x and y coordinates
linebit[i][0].set_3d_properties(Zs[i:i+2]) # set the z coorindates
else:
linebit[0][0].set_data(Xs,Ys) # (x and y coordinates)
linebit[0][0].set_3d_properties(Zs) # (z coordinates)
# draw additional data if available (should make this for rods too eventually - drawn along their axis nodes)
if self.qs == 0:
ts = self.getTimestep(Time)
s = 0.0002
if self.Tendata:
pass
if self.Kudata:
pass
if self.Udata:
self.Ubits.set_segments(quiver_data_to_segments(Xs, Ys, Zs, self.Ux[ts,:], self.Uy[ts,:], self.Uz[ts,:], scale=10.))
if self.Bdata:
self.Bbits.set_segments(quiver_data_to_segments(Xs, Ys, Zs, self.Bx[ts,:], self.By[ts,:], self.Bz[ts,:], scale=s))
if self.Ddata:
self.Dbits.set_segments(quiver_data_to_segments(Xs, Ys, Zs, self.Dx[ts,:], self.Dy[ts,:], self.Dz[ts,:], scale=s))
if self.Wdata:
self.Wbits.set_segments(quiver_data_to_segments(Xs, Ys, Zs, self.Wx[ts,:], self.Wy[ts,:], self.Wz[ts,:], scale=s))
return linebit
def setEndPosition(self, r, endB):
'''Sets the end position of the line based on the input endB value.
Parameters
----------
r : array
x,y,z coorindate position vector of the line end [m].
endB : boolean
An indicator of whether the r array is at the end or beginning of the line
Raises
------
LineError
If the given endB value is not a 1 or 0
Returns
-------
None.
'''
if endB == 1:
self.rB = np.array(r, dtype=np.float_)
elif endB == 0:
self.rA = np.array(r, dtype=np.float_)
else:
raise LineError("setEndPosition: endB value has to be either 1 or 0")
def staticSolve(self, reset=False, tol=0.0001, profiles=0):
'''Solves static equilibrium of line. Sets the end forces of the line based on the end points' positions.
Parameters
----------
reset : boolean, optional
Determines if the previous fairlead force values will be used for the catenary iteration. The default is False.
tol : float
Convergence tolerance for catenary solver measured as absolute error of x and z values in m.
profiles : int
Values greater than 0 signal for line profile data to be saved (used for plotting, getting distributed tensions, etc).
Raises
------
LineError
If the horizontal force at the fairlead (HF) is less than 0
Returns
-------
None.
'''
depth = self.sys.depth
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1]) # horizontal spacing of line ends
LV = dr[2] # vertical offset from end A to end B
if LH >0:
cosBeta = dr[0]/LH # cos of line heading
sinBeta = dr[1]/LH # sin of line heading
self.th = np.arctan2(dr[1],dr[0]) # line heading
else: # special case of vertical line: line heading is undefined - use zero as default
cosBeta = 0.0
sinBeta = 0.0
self.th = 0.0
if self.rA[2] < -depth:
raise LineError("Line {} end A is lower than the seabed.".format(self.number))
elif self.rB[2] < -depth:
raise LineError("Line {} end B is lower than the seabed.".format(self.number))
elif np.min([self.rA[2],self.rB[2]]) > -depth:
self.cb = -depth - np.min([self.rA[2],self.rB[2]]) # if this line's lower end is off the seabed, set cb negative and to the distance off the seabed
elif self.cb < 0: # if a line end is at the seabed, but the cb is still set negative to indicate off the seabed
self.cb = 0.0 # set to zero so that the line includes seabed interaction.
if self.HF < 0: # or self.VF < 0: <<<<<<<<<<< it shouldn't matter if VF is negative - this could happen for buoyant lines, etc.
raise LineError("Line HF cannot be negative") # this could be a ValueError too...
if reset==True: # Indicates not to use previous fairlead force values to start catenary
self.HF = 0 # iteration with, and insteady use the default values.
# ----- get line results for linear or nonlinear elasticity -----
#If EA is found in the line properties we will run the original catenary function
if 'EA' in self.type:
try:
(fAH, fAV, fBH, fBV, info) = catenary(LH, LV, self.L, self.type['EA'], self.type['w'], CB=self.cb, Tol=tol, HF0=self.HF, VF0=self.VF, plots=profiles) # call line model
except CatenaryError as error:
raise LineError(self.number, error.message)
#If EA isnt found then we will use the ten-str relationship defined in the input file
else:
(fAH, fAV, fBH, fBV, info) = nonlinear(LH, LV, self.L, self.type['Str'], self.type['Ten'],self.type['w'])
self.HF = info["HF"]
self.VF = info["VF"]
self.KA2 = info["stiffnessA"]
self.KB2 = info["stiffnessB"]
self.LBot = info["LBot"]
self.info = info
self.fA[0] = fAH*cosBeta
self.fA[1] = fAH*sinBeta
self.fA[2] = fAV
self.fB[0] = fBH*cosBeta
self.fB[1] = fBH*sinBeta
self.fB[2] = fBV
self.TA = np.sqrt(fAH*fAH + fAV*fAV) # end tensions
self.TB = np.sqrt(fBH*fBH + fBV*fBV)
# ----- compute 3d stiffness matrix for both line ends (3 DOF + 3 DOF) -----
# solve for required variables to set up the perpendicular stiffness. Keep it horizontal
#L_xy = np.linalg.norm(self.rB[:2] - self.rA[:2])
#T_xy = np.linalg.norm(self.fB[:2])
# create the rotation matrix based on the heading angle that the line is from the horizontal
R = rotationMatrix(0,0,self.th)
# initialize the line's analytic stiffness matrix in the "in-line" plane then rotate the matrix to be about the global frame [K'] = [R][K][R]^T
def from2Dto3Drotated(K2D, F, L):
if L > 0:
Kt = F/L # transverse stiffness term
else:
Kt = 0.0
K2 = np.array([[K2D[0,0], 0 , K2D[0,1]],
[ 0 , Kt, 0 ],
[K2D[1,0], 0 , K2D[1,1]]])
return np.matmul(np.matmul(R, K2), R.T)
self.KA = from2Dto3Drotated(info['stiffnessA'], -fBH, LH) # stiffness matrix describing reaction force on end A due to motion of end A
self.KB = from2Dto3Drotated(info['stiffnessB'], -fBH, LH) # stiffness matrix describing reaction force on end B due to motion of end B
self.KAB = from2Dto3Drotated(info['stiffnessAB'], fBH, LH) # stiffness matrix describing reaction force on end B due to motion of end A
#self.K6 = np.block([[ from2Dto3Drotated(self.KA), from2Dto3Drotated(self.KAB.T)],
# [ from2Dto3Drotated(self.KAB), from2Dto3Drotated(self.KB) ]])
if profiles > 1:
import matplotlib.pyplot as plt
plt.plot(info['X'], info['Z'])
plt.show()
def getEndForce(self, endB):
'''Returns the force of the line at the specified end based on the endB value
Parameters
----------
endB : boolean
An indicator of which end of the line is the force wanted
Raises
------
LineError
If the given endB value is not a 1 or 0
Returns
-------
fA or fB: array
The force vector at the end of the line
'''
if endB == 1:
return self.fB
elif endB == 0:
return self.fA
else:
raise LineError("getEndForce: endB value has to be either 1 or 0")
def getStiffnessMatrix(self):
'''Returns the stiffness matrix of a line derived from analytic terms in the jacobian of catenary
Raises
------
LineError
If a singluar matrix error occurs while taking the inverse of the Line's Jacobian matrix.
Returns
-------
K2_rot : matrix
the analytic stiffness matrix of the line in the rotated frame.
'''
# take the inverse of the Jacobian to get the starting analytic stiffness matrix
'''
if np.isnan(self.jacobian[0,0]): #if self.LBot >= self.L and self.HF==0. and self.VF==0. << handle tricky cases here?
K = np.array([[0., 0.], [0., 1.0/self.jacobian[1,1] ]])
else:
try:
K = np.linalg.inv(self.jacobian)
except:
raise LineError(self.number, f"Check Line Length ({self.L}), it might be too long, or check catenary ProfileType")
'''
# solve for required variables to set up the perpendicular stiffness. Keep it horizontal
L_xy = np.linalg.norm(self.rB[:2] - self.rA[:2])
T_xy = np.linalg.norm(self.fB[:2])
Kt = T_xy/L_xy
# initialize the line's analytic stiffness matrix in the "in-line" plane
KA = np.array([[self.KA2[0,0], 0 , self.KA2[0,1]],
[ 0 , Kt, 0 ],
[self.KA2[1,0], 0 , self.KA2[1,1]]])
KB = np.array([[self.KB2[0,0], 0 , self.KB2[0,1]],
[ 0 , Kt, 0 ],
[self.KB2[1,0], 0 , self.KB2[1,1]]])
# create the rotation matrix based on the heading angle that the line is from the horizontal
R = rotationMatrix(0,0,self.th)
# rotate the matrix to be about the global frame [K'] = [R][K][R]^T
KA_rot = np.matmul(np.matmul(R, KA), R.T)
KB_rot = np.matmul(np.matmul(R, KB), R.T)
return KA_rot, KB_rot
def getLineTens(self):
'''Calls the catenary function to return the tensions of the Line for a quasi-static analysis'''
# >>> this can probably be done using data already generated by static Solve <<<
depth = self.sys.depth
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1]) # horizontal spacing of line ends
LV = dr[2] # vertical offset from end A to end B
if np.min([self.rA[2],self.rB[2]]) > -depth:
self.cb = -depth - np.min([self.rA[2],self.rB[2]]) # if this line's lower end is off the seabed, set cb negative and to the distance off the seabed
elif self.cb < 0: # if a line end is at the seabed, but the cb is still set negative to indicate off the seabed
self.cb = 0.0 # set to zero so that the line includes seabed interaction.
tol = 0.0001
#If EA is found in the line properties we will run the original catenary function
if 'EA' in self.type:
try:
tol = 0.000001 #TODO figure out why tol and profiles are not defined. These values are hardcoded from defaults in other function calls
profiles = 1
(fAH, fAV, fBH, fBV, info) = catenary(LH, LV, self.L, self.type['EA'], self.type['w'], CB=self.cb, Tol=tol, HF0=self.HF, VF0=self.VF, plots=profiles) # call line model
except CatenaryError as error:
raise LineError(self.number, error.message)
#If EA isnt found then we will use the ten-str relationship defined in the input file
else:
(fAH, fAV, fBH, fBV, info) = nonlinear(LH, LV, self.L, self.type['Str'], self.type['Ten'],self.type['w'])
Ts = info["Te"]
return Ts
def getTension(self, s):
'''Returns tension at a given point along the line
Parameters
----------
s : scalar or array-like
Value or array of values for the arc length along the line from end A to end B at which
the information is desired. Positive values are arc length in m, negative values are a
relative location where 0 is end A, -1 is end B, and -0.5 is the midpoint.
Returns
-------
tension value(s)
'''
#if s < 0:
# s = -s*self.L
#if s > self.L:
# raise ValueError('Specified arc length is larger than the line unstretched length.')
Te = np.interp(s, self.info['s'], self.info['Te'])
return Te
def getPosition(self, s):
'''Returns position at a given point along the line
Parameters
----------
s : scalar or array-like
Value or array of values for the arc length along the line from end A to end B at which
the information is desired. Positive values are arc length in m, negative values are a
relative location where 0 is end A, -1 is end B, and -0.5 is the midpoint.
Returns
-------
position vector(s)
'''
# >>> should be merged with getLineCoords and getCoordinate functionality <<<
x = np.interp(s, self.info['s'], self.info['X'])
z = np.interp(s, self.info['s'], self.info['Z'])
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1])
Xs = self.rA[0] + x*dr[0]/LH
Ys = self.rA[1] + x*dr[1]/LH
Zs = self.rA[2] + z
return np.vstack([ Xs, Ys, Zs])
def attachLine(self, lineID, endB):
pass | PypiClean |
/Caroline-presentation-0.2.4.tar.gz/Caroline-presentation-0.2.4/caroline/html_dist/js/mathjax/sre/mathmaps/nemeth/symbols/latin-mathfonts-bold-fraktur.js | [{"locale":"nemeth"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠁"}},"key":"1D56C"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠃"}},"key":"1D56D"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠉"}},"key":"1D56E"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠙"}},"key":"1D56F"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠑"}},"key":"1D570"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠋"}},"key":"1D571"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠛"}},"key":"1D572"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠓"}},"key":"1D573"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠊"}},"key":"1D574"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠚"}},"key":"1D575"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠅"}},"key":"1D576"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠇"}},"key":"1D577"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠍"}},"key":"1D578"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠝"}},"key":"1D579"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠕"}},"key":"1D57A"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠏"}},"key":"1D57B"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠟"}},"key":"1D57C"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠗"}},"key":"1D57D"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠎"}},"key":"1D57E"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠞"}},"key":"1D57F"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠥"}},"key":"1D580"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠧"}},"key":"1D581"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠺"}},"key":"1D582"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠭"}},"key":"1D583"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠽"}},"key":"1D584"},{"category":"Lu","mappings":{"default":{"default":"⠸⠀⠸⠠⠵"}},"key":"1D585"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠁"}},"key":"1D586"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠃"}},"key":"1D587"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠉"}},"key":"1D588"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠙"}},"key":"1D589"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠑"}},"key":"1D58A"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠋"}},"key":"1D58B"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠛"}},"key":"1D58C"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠓"}},"key":"1D58D"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠊"}},"key":"1D58E"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠚"}},"key":"1D58F"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠅"}},"key":"1D590"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠇"}},"key":"1D591"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠍"}},"key":"1D592"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠝"}},"key":"1D593"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠕"}},"key":"1D594"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠏"}},"key":"1D595"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠟"}},"key":"1D596"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠗"}},"key":"1D597"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠎"}},"key":"1D598"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠞"}},"key":"1D599"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠥"}},"key":"1D59A"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠧"}},"key":"1D59B"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠺"}},"key":"1D59C"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠭"}},"key":"1D59D"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠽"}},"key":"1D59E"},{"category":"Ll","mappings":{"default":{"default":"⠸⠀⠸⠵"}},"key":"1D59F"}] | PypiClean |
/AISTLAB_nitrotyper-0.6.10.tar.gz/AISTLAB_nitrotyper-0.6.10/README.md | # AISTLAB_nitrotyper
> https://www.nitrotype.com/race auto typer using python3 and cv2
>
> only supported 1920x1080 Resolution currently.
>
> winxos, AISTLAB 2017-03-17
##INSTALL:
pip3 install nitrotyper
## USAGE:
1. open https://www.nitrotype.com/race using your web browser
2. open console and run **nitrotyper** command.
3. make sure the web browser is on the top layer of the desktop, enjoy it.
4. delay parameters can control the type speed.
*Just for educational purpose, take care of yourself.*
| PypiClean |
/CNVkit-0.9.10-py3-none-any.whl/skgenome/tabio/vcfsimple.py | import logging
import numpy as np
import pandas as pd
from Bio.File import as_handle
# TODO save VCF header (as string, the whole text block) in meta{header=}
def read_vcf_simple(infile):
"""Read VCF file without samples."""
# ENH: Make all readers return a tuple (header_string, body_table)
# ENH: usecols -- need to trim dtypes dict to match?
header_lines = []
with as_handle(infile, "r") as handle:
for line in handle:
if line.startswith("##"):
header_lines.append(line)
else:
assert line.startswith("#CHR")
header_line = line
header_lines.append(line)
break
# Extract sample names from VCF header, keep as column names
header_fields = header_line.split("\t")
sample_ids = header_fields[9:]
colnames = [
"chromosome",
"start",
"id",
"ref",
"alt",
"qual",
"filter",
"info",
"format",
] + sample_ids
dtypes = {c: str for c in colnames}
dtypes["start"] = int
del dtypes["qual"]
table = pd.read_csv(
handle,
sep="\t",
header=None,
na_filter=False,
names=colnames,
converters={"qual": parse_qual},
dtype=dtypes,
)
# ENH: do things with filter, info
table["start"] -= 1
table["end"] = table["info"].apply(parse_end_from_info)
set_ends(table)
logging.info("Loaded %d plain records", len(table))
return table
def read_vcf_sites(infile):
"""Read VCF contents into a DataFrame."""
colnames = ["chromosome", "start", "id", "ref", "alt", "qual", "filter", "end"]
dtypes = {
"chromosome": str,
"start": int,
"id": str,
"ref": str,
"alt": str,
"filter": str,
}
table = pd.read_csv(
infile,
sep="\t",
comment="#",
header=None,
na_filter=False,
names=colnames,
usecols=colnames,
converters={"end": parse_end_from_info, "qual": parse_qual},
dtype=dtypes,
)
# Where END is missing, infer from allele lengths
table["start"] -= 1
set_ends(table)
logging.info("Loaded %d plain records", len(table))
return table
def parse_end_from_info(info):
"""Parse END position, if present, from an INFO field."""
idx = info.find("END=")
if idx == -1:
return -1
info = info[idx + 4 :]
idx = info.find(";")
if idx != -1:
info = info[:idx]
return int(info)
def parse_qual(qual):
"""Parse a QUAL value as a number or NaN."""
# ENH: only appy na_filter to this column
if qual == ".":
return np.nan
return float(qual)
def set_ends(table):
"""Set 'end' field according to allele lengths."""
need_end_idx = table.end == -1
if need_end_idx.any():
ref_sz = table.loc[need_end_idx, "ref"].str.len()
# TODO handle multiple alts -- split commas & take max len
alt_sz = table.loc[need_end_idx, "alt"].str.len()
var_sz = alt_sz - ref_sz
# TODO XXX if end > start, swap 'em?
var_sz = var_sz.clip(lower=0)
table.loc[need_end_idx, "end"] = table.loc[need_end_idx, "start"] + var_sz | PypiClean |
/NanoSplit-0.1.4.tar.gz/NanoSplit-0.1.4/README.rst | NanoSplit
=========
Perform splitting of Oxford Nanopore sequencing data in a fail and pass
dataset using a user defined quality cutoff. The script can read
compressed input and will write to gzip compressed files.
INSTALLATION
~~~~~~~~~~~~
::
pip install NanoSplit
USAGE
~~~~~
::
NanoSplit [-h] [-q QUALITY] [--outdir OUTDIR] fastqfile
Required arguments:
fastqfile Fastq file to split, can be gz compressed.
Optional arguments:
-h, --help show this help message and exit
-q QUALITY, --quality QUALITY
Splitting on this average read quality score
Default: 12
--outdir OUTDIR Specify directory in which output has to be created.
EXAMPLES
~~~~~~~~
::
NanoSplit reads.fastq.gz
NanoSplit -q 6 reads.fastq.gz
NanoSplit -q 10 --outdir /home/user/datasetsplit/ reads.fastq
| PypiClean |
/Dts-OpenFisca-Core-34.8.0.tar.gz/Dts-OpenFisca-Core-34.8.0/openfisca_web_api/handlers.py |
import dpath
from openfisca_core.simulation_builder import SimulationBuilder
from openfisca_core.indexed_enums import Enum
from collections import defaultdict
def calculate(tax_benefit_system, input_data):
simulation = SimulationBuilder().build_from_entities(tax_benefit_system, input_data)
requested_computations = dpath.util.search(input_data, '*/*/*/*', afilter = lambda t: t is None, yielded = True)
computation_results = {}
for computation in requested_computations:
path = computation[0]
entity_plural, entity_id, variable_name, period = path.split('/')
variable = tax_benefit_system.get_variable(variable_name)
result = simulation.calculate(variable_name, period)
population = simulation.get_population(entity_plural)
entity_index = population.get_index(entity_id)
if variable.value_type == Enum:
entity_result = result.decode()[entity_index].name
elif variable.value_type == float:
entity_result = float(str(result[entity_index])) # To turn the float32 into a regular float without adding confusing extra decimals. There must be a better way.
elif variable.value_type == str:
entity_result = str(result[entity_index])
else:
entity_result = result.tolist()[entity_index]
dpath.util.new(computation_results, path, entity_result)
dpath.merge(input_data, computation_results)
return input_data
def dependencies(tax_benefit_system, input_data):
SimulationBuilder().build_from_entities(tax_benefit_system, input_data)
requested_computations = dpath.util.search(input_data,
'*/*/*/*', afilter = lambda t: t is None, yielded = True)
dep_vars = defaultdict(int)
for computation in requested_computations:
path = computation[0]
entity_plural, entity_id, variable_name, period = path.split('/')
variable = tax_benefit_system.get_variable(variable_name)
variable.entity.set_tax_benefit_system(tax_benefit_system)
get_dependencies(dep_vars, variable)
return dep_vars
def get_dependencies(dep_vars, variable):
"""
recursively find input variables for variables with formulas.
"""
for dep in variable.dependencies:
if dep.is_input_variable():
dep_vars[dep.name] += 1
else:
get_dependencies(dep_vars, dep)
def trace(tax_benefit_system, input_data):
simulation = SimulationBuilder().build_from_entities(tax_benefit_system, input_data)
simulation.trace = True
requested_calculations = []
requested_computations = dpath.util.search(input_data, '*/*/*/*', afilter = lambda t: t is None, yielded = True)
for computation in requested_computations:
path = computation[0]
entity_plural, entity_id, variable_name, period = path.split('/')
requested_calculations.append(f"{variable_name}<{str(period)}>")
simulation.calculate(variable_name, period)
trace = simulation.tracer.get_serialized_flat_trace()
return {
"trace": trace,
"entitiesDescription": simulation.describe_entities(),
"requestedCalculations": requested_calculations
} | PypiClean |
/ApiWrap-0.1.1.tar.gz/ApiWrap-0.1.1/README.md | # ApiWrap
Simple HTTP POST API wrapper for Python classes.
# Example:
Copy the Python code below into a file (eg. `api_server.py`)
```python
#!/usr/bin/env python
from wrapit.api_wrapper import create_app
# The class to be wrapped
class Calculator:
def add(self, x, y):
return x + y
def sub(self, x, y):
return x - y
def mult(self, x, y):
return x * y
def div(self, x, y):
return x / y
# Create an instance of the class
calculator = Calculator()
# Create an app by wrapping this instance
app = create_app(calculator)
# Main program
if __name__ == "__main__":
# Start the app accepting connections from arbitraty hosts on a port
app.run(host="0.0.0.0", port=5555)
```
# Running the API server
```
./api_server.py
```
# Testing the API server
```
curl -s --request POST \
--url http://127.0.0.1:5000/ \
--header 'Content-Type: application/json' \
--data '{"endpoint": "add", "payload": {"x": 8, "y": 5}}'
```
The output should be:
```
{"endpoint": "add", "payload": 13, "success": true}
```
| PypiClean |
/Curare-0.4.5-py3-none-any.whl/curare/report/js/Chart.min.js | !function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).Chart=e()}(this,(function(){"use strict";const t="undefined"==typeof window?function(t){return t()}:window.requestAnimationFrame;function e(e,i,n){const o=n||(t=>Array.prototype.slice.call(t));let s=!1,a=[];return function(...n){a=o(n),s||(s=!0,t.call(window,(()=>{s=!1,e.apply(i,a)})))}}function i(t,e){let i;return function(){return e?(clearTimeout(i),i=setTimeout(t,e)):t(),e}}const n=t=>"start"===t?"left":"end"===t?"right":"center",o=(t,e,i)=>"start"===t?e:"end"===t?i:(e+i)/2,s=(t,e,i)=>"right"===t?i:"center"===t?(e+i)/2:e;var a=new class{constructor(){this._request=null,this._charts=new Map,this._running=!1,this._lastDate=void 0}_notify(t,e,i,n){const o=e.listeners[n],s=e.duration;o.forEach((n=>n({chart:t,initial:e.initial,numSteps:s,currentStep:Math.min(i-e.start,s)})))}_refresh(){const e=this;e._request||(e._running=!0,e._request=t.call(window,(()=>{e._update(),e._request=null,e._running&&e._refresh()})))}_update(t=Date.now()){const e=this;let i=0;e._charts.forEach(((n,o)=>{if(!n.running||!n.items.length)return;const s=n.items;let a,r=s.length-1,l=!1;for(;r>=0;--r)a=s[r],a._active?(a._total>n.duration&&(n.duration=a._total),a.tick(t),l=!0):(s[r]=s[s.length-1],s.pop());l&&(o.draw(),e._notify(o,n,t,"progress")),s.length||(n.running=!1,e._notify(o,n,t,"complete"),n.initial=!1),i+=s.length})),e._lastDate=t,0===i&&(e._running=!1)}_getAnims(t){const e=this._charts;let i=e.get(t);return i||(i={running:!1,initial:!0,items:[],listeners:{complete:[],progress:[]}},e.set(t,i)),i}listen(t,e,i){this._getAnims(t).listeners[e].push(i)}add(t,e){e&&e.length&&this._getAnims(t).items.push(...e)}has(t){return this._getAnims(t).items.length>0}start(t){const e=this._charts.get(t);e&&(e.running=!0,e.start=Date.now(),e.duration=e.items.reduce(((t,e)=>Math.max(t,e._duration)),0),this._refresh())}running(t){if(!this._running)return!1;const e=this._charts.get(t);return!!(e&&e.running&&e.items.length)}stop(t){const e=this._charts.get(t);if(!e||!e.items.length)return;const i=e.items;let n=i.length-1;for(;n>=0;--n)i[n].cancel();e.items=[],this._notify(t,e,Date.now(),"complete")}remove(t){return this._charts.delete(t)}};
/*!
* @kurkle/color v0.1.9
* https://github.com/kurkle/color#readme
* (c) 2020 Jukka Kurkela
* Released under the MIT License
*/const r={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,A:10,B:11,C:12,D:13,E:14,F:15,a:10,b:11,c:12,d:13,e:14,f:15},l="0123456789ABCDEF",c=t=>l[15&t],h=t=>l[(240&t)>>4]+l[15&t],d=t=>(240&t)>>4==(15&t);function u(t){var e=function(t){return d(t.r)&&d(t.g)&&d(t.b)&&d(t.a)}(t)?c:h;return t?"#"+e(t.r)+e(t.g)+e(t.b)+(t.a<255?e(t.a):""):t}function f(t){return t+.5|0}const g=(t,e,i)=>Math.max(Math.min(t,i),e);function p(t){return g(f(2.55*t),0,255)}function m(t){return g(f(255*t),0,255)}function x(t){return g(f(t/2.55)/100,0,1)}function b(t){return g(f(100*t),0,100)}const _=/^rgba?\(\s*([-+.\d]+)(%)?[\s,]+([-+.e\d]+)(%)?[\s,]+([-+.e\d]+)(%)?(?:[\s,/]+([-+.e\d]+)(%)?)?\s*\)$/;const y=/^(hsla?|hwb|hsv)\(\s*([-+.e\d]+)(?:deg)?[\s,]+([-+.e\d]+)%[\s,]+([-+.e\d]+)%(?:[\s,]+([-+.e\d]+)(%)?)?\s*\)$/;function v(t,e,i){const n=e*Math.min(i,1-i),o=(e,o=(e+t/30)%12)=>i-n*Math.max(Math.min(o-3,9-o,1),-1);return[o(0),o(8),o(4)]}function w(t,e,i){const n=(n,o=(n+t/60)%6)=>i-i*e*Math.max(Math.min(o,4-o,1),0);return[n(5),n(3),n(1)]}function M(t,e,i){const n=v(t,1,.5);let o;for(e+i>1&&(o=1/(e+i),e*=o,i*=o),o=0;o<3;o++)n[o]*=1-e-i,n[o]+=e;return n}function k(t){const e=t.r/255,i=t.g/255,n=t.b/255,o=Math.max(e,i,n),s=Math.min(e,i,n),a=(o+s)/2;let r,l,c;return o!==s&&(c=o-s,l=a>.5?c/(2-o-s):c/(o+s),r=o===e?(i-n)/c+(i<n?6:0):o===i?(n-e)/c+2:(e-i)/c+4,r=60*r+.5),[0|r,l||0,a]}function S(t,e,i,n){return(Array.isArray(e)?t(e[0],e[1],e[2]):t(e,i,n)).map(m)}function P(t,e,i){return S(v,t,e,i)}function D(t){return(t%360+360)%360}function C(t){const e=y.exec(t);let i,n=255;if(!e)return;e[5]!==i&&(n=e[6]?p(+e[5]):m(+e[5]));const o=D(+e[2]),s=+e[3]/100,a=+e[4]/100;return i="hwb"===e[1]?function(t,e,i){return S(M,t,e,i)}(o,s,a):"hsv"===e[1]?function(t,e,i){return S(w,t,e,i)}(o,s,a):P(o,s,a),{r:i[0],g:i[1],b:i[2],a:n}}const O={x:"dark",Z:"light",Y:"re",X:"blu",W:"gr",V:"medium",U:"slate",A:"ee",T:"ol",S:"or",B:"ra",C:"lateg",D:"ights",R:"in",Q:"turquois",E:"hi",P:"ro",O:"al",N:"le",M:"de",L:"yello",F:"en",K:"ch",G:"arks",H:"ea",I:"ightg",J:"wh"},T={OiceXe:"f0f8ff",antiquewEte:"faebd7",aqua:"ffff",aquamarRe:"7fffd4",azuY:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"0",blanKedOmond:"ffebcd",Xe:"ff",XeviTet:"8a2be2",bPwn:"a52a2a",burlywood:"deb887",caMtXe:"5f9ea0",KartYuse:"7fff00",KocTate:"d2691e",cSO:"ff7f50",cSnflowerXe:"6495ed",cSnsilk:"fff8dc",crimson:"dc143c",cyan:"ffff",xXe:"8b",xcyan:"8b8b",xgTMnPd:"b8860b",xWay:"a9a9a9",xgYF:"6400",xgYy:"a9a9a9",xkhaki:"bdb76b",xmagFta:"8b008b",xTivegYF:"556b2f",xSange:"ff8c00",xScEd:"9932cc",xYd:"8b0000",xsOmon:"e9967a",xsHgYF:"8fbc8f",xUXe:"483d8b",xUWay:"2f4f4f",xUgYy:"2f4f4f",xQe:"ced1",xviTet:"9400d3",dAppRk:"ff1493",dApskyXe:"bfff",dimWay:"696969",dimgYy:"696969",dodgerXe:"1e90ff",fiYbrick:"b22222",flSOwEte:"fffaf0",foYstWAn:"228b22",fuKsia:"ff00ff",gaRsbSo:"dcdcdc",ghostwEte:"f8f8ff",gTd:"ffd700",gTMnPd:"daa520",Way:"808080",gYF:"8000",gYFLw:"adff2f",gYy:"808080",honeyMw:"f0fff0",hotpRk:"ff69b4",RdianYd:"cd5c5c",Rdigo:"4b0082",ivSy:"fffff0",khaki:"f0e68c",lavFMr:"e6e6fa",lavFMrXsh:"fff0f5",lawngYF:"7cfc00",NmoncEffon:"fffacd",ZXe:"add8e6",ZcSO:"f08080",Zcyan:"e0ffff",ZgTMnPdLw:"fafad2",ZWay:"d3d3d3",ZgYF:"90ee90",ZgYy:"d3d3d3",ZpRk:"ffb6c1",ZsOmon:"ffa07a",ZsHgYF:"20b2aa",ZskyXe:"87cefa",ZUWay:"778899",ZUgYy:"778899",ZstAlXe:"b0c4de",ZLw:"ffffe0",lime:"ff00",limegYF:"32cd32",lRF:"faf0e6",magFta:"ff00ff",maPon:"800000",VaquamarRe:"66cdaa",VXe:"cd",VScEd:"ba55d3",VpurpN:"9370db",VsHgYF:"3cb371",VUXe:"7b68ee",VsprRggYF:"fa9a",VQe:"48d1cc",VviTetYd:"c71585",midnightXe:"191970",mRtcYam:"f5fffa",mistyPse:"ffe4e1",moccasR:"ffe4b5",navajowEte:"ffdead",navy:"80",Tdlace:"fdf5e6",Tive:"808000",TivedBb:"6b8e23",Sange:"ffa500",SangeYd:"ff4500",ScEd:"da70d6",pOegTMnPd:"eee8aa",pOegYF:"98fb98",pOeQe:"afeeee",pOeviTetYd:"db7093",papayawEp:"ffefd5",pHKpuff:"ffdab9",peru:"cd853f",pRk:"ffc0cb",plum:"dda0dd",powMrXe:"b0e0e6",purpN:"800080",YbeccapurpN:"663399",Yd:"ff0000",Psybrown:"bc8f8f",PyOXe:"4169e1",saddNbPwn:"8b4513",sOmon:"fa8072",sandybPwn:"f4a460",sHgYF:"2e8b57",sHshell:"fff5ee",siFna:"a0522d",silver:"c0c0c0",skyXe:"87ceeb",UXe:"6a5acd",UWay:"708090",UgYy:"708090",snow:"fffafa",sprRggYF:"ff7f",stAlXe:"4682b4",tan:"d2b48c",teO:"8080",tEstN:"d8bfd8",tomato:"ff6347",Qe:"40e0d0",viTet:"ee82ee",JHt:"f5deb3",wEte:"ffffff",wEtesmoke:"f5f5f5",Lw:"ffff00",LwgYF:"9acd32"};let A;function L(t){A||(A=function(){const t={},e=Object.keys(T),i=Object.keys(O);let n,o,s,a,r;for(n=0;n<e.length;n++){for(a=r=e[n],o=0;o<i.length;o++)s=i[o],r=r.replace(s,O[s]);s=parseInt(T[a],16),t[r]=[s>>16&255,s>>8&255,255&s]}return t}(),A.transparent=[0,0,0,0]);const e=A[t.toLowerCase()];return e&&{r:e[0],g:e[1],b:e[2],a:4===e.length?e[3]:255}}function R(t,e,i){if(t){let n=k(t);n[e]=Math.max(0,Math.min(n[e]+n[e]*i,0===e?360:1)),n=P(n),t.r=n[0],t.g=n[1],t.b=n[2]}}function E(t,e){return t?Object.assign(e||{},t):t}function I(t){var e={r:0,g:0,b:0,a:255};return Array.isArray(t)?t.length>=3&&(e={r:t[0],g:t[1],b:t[2],a:255},t.length>3&&(e.a=m(t[3]))):(e=E(t,{r:0,g:0,b:0,a:1})).a=m(e.a),e}function z(t){return"r"===t.charAt(0)?function(t){const e=_.exec(t);let i,n,o,s=255;if(e){if(e[7]!==i){const t=+e[7];s=255&(e[8]?p(t):255*t)}return i=+e[1],n=+e[3],o=+e[5],i=255&(e[2]?p(i):i),n=255&(e[4]?p(n):n),o=255&(e[6]?p(o):o),{r:i,g:n,b:o,a:s}}}(t):C(t)}class F{constructor(t){if(t instanceof F)return t;const e=typeof t;let i;var n,o,s;"object"===e?i=I(t):"string"===e&&(s=(n=t).length,"#"===n[0]&&(4===s||5===s?o={r:255&17*r[n[1]],g:255&17*r[n[2]],b:255&17*r[n[3]],a:5===s?17*r[n[4]]:255}:7!==s&&9!==s||(o={r:r[n[1]]<<4|r[n[2]],g:r[n[3]]<<4|r[n[4]],b:r[n[5]]<<4|r[n[6]],a:9===s?r[n[7]]<<4|r[n[8]]:255})),i=o||L(t)||z(t)),this._rgb=i,this._valid=!!i}get valid(){return this._valid}get rgb(){var t=E(this._rgb);return t&&(t.a=x(t.a)),t}set rgb(t){this._rgb=I(t)}rgbString(){return this._valid?(t=this._rgb)&&(t.a<255?`rgba(${t.r}, ${t.g}, ${t.b}, ${x(t.a)})`:`rgb(${t.r}, ${t.g}, ${t.b})`):this._rgb;var t}hexString(){return this._valid?u(this._rgb):this._rgb}hslString(){return this._valid?function(t){if(!t)return;const e=k(t),i=e[0],n=b(e[1]),o=b(e[2]);return t.a<255?`hsla(${i}, ${n}%, ${o}%, ${x(t.a)})`:`hsl(${i}, ${n}%, ${o}%)`}(this._rgb):this._rgb}mix(t,e){const i=this;if(t){const n=i.rgb,o=t.rgb;let s;const a=e===s?.5:e,r=2*a-1,l=n.a-o.a,c=((r*l==-1?r:(r+l)/(1+r*l))+1)/2;s=1-c,n.r=255&c*n.r+s*o.r+.5,n.g=255&c*n.g+s*o.g+.5,n.b=255&c*n.b+s*o.b+.5,n.a=a*n.a+(1-a)*o.a,i.rgb=n}return i}clone(){return new F(this.rgb)}alpha(t){return this._rgb.a=m(t),this}clearer(t){return this._rgb.a*=1-t,this}greyscale(){const t=this._rgb,e=f(.3*t.r+.59*t.g+.11*t.b);return t.r=t.g=t.b=e,this}opaquer(t){return this._rgb.a*=1+t,this}negate(){const t=this._rgb;return t.r=255-t.r,t.g=255-t.g,t.b=255-t.b,this}lighten(t){return R(this._rgb,2,t),this}darken(t){return R(this._rgb,2,-t),this}saturate(t){return R(this._rgb,1,t),this}desaturate(t){return R(this._rgb,1,-t),this}rotate(t){return function(t,e){var i=k(t);i[0]=D(i[0]+e),i=P(i),t.r=i[0],t.g=i[1],t.b=i[2]}(this._rgb,t),this}}function V(t){return new F(t)}const B=t=>t instanceof CanvasGradient||t instanceof CanvasPattern;function W(t){return B(t)?t:V(t)}function H(t){return B(t)?t:V(t).saturate(.5).darken(.1).hexString()}function N(){}const j=function(){let t=0;return function(){return t++}}();function $(t){return null==t}function Y(t){if(Array.isArray&&Array.isArray(t))return!0;const e=Object.prototype.toString.call(t);return"[object"===e.substr(0,7)&&"Array]"===e.substr(-6)}function U(t){return null!==t&&"[object Object]"===Object.prototype.toString.call(t)}const X=t=>("number"==typeof t||t instanceof Number)&&isFinite(+t);function q(t,e){return X(t)?t:e}function K(t,e){return void 0===t?e:t}const G=(t,e)=>"string"==typeof t&&t.endsWith("%")?parseFloat(t)/100:t/e,Z=(t,e)=>"string"==typeof t&&t.endsWith("%")?parseFloat(t)/100*e:+t;function Q(t,e,i){if(t&&"function"==typeof t.call)return t.apply(i,e)}function J(t,e,i,n){let o,s,a;if(Y(t))if(s=t.length,n)for(o=s-1;o>=0;o--)e.call(i,t[o],o);else for(o=0;o<s;o++)e.call(i,t[o],o);else if(U(t))for(a=Object.keys(t),s=a.length,o=0;o<s;o++)e.call(i,t[a[o]],a[o])}function tt(t,e){let i,n,o,s;if(!t||!e||t.length!==e.length)return!1;for(i=0,n=t.length;i<n;++i)if(o=t[i],s=e[i],o.datasetIndex!==s.datasetIndex||o.index!==s.index)return!1;return!0}function et(t){if(Y(t))return t.map(et);if(U(t)){const e=Object.create(null),i=Object.keys(t),n=i.length;let o=0;for(;o<n;++o)e[i[o]]=et(t[i[o]]);return e}return t}function it(t){return-1===["__proto__","prototype","constructor"].indexOf(t)}function nt(t,e,i,n){if(!it(t))return;const o=e[t],s=i[t];U(o)&&U(s)?ot(o,s,n):e[t]=et(s)}function ot(t,e,i){const n=Y(e)?e:[e],o=n.length;if(!U(t))return t;const s=(i=i||{}).merger||nt;for(let a=0;a<o;++a){if(!U(e=n[a]))continue;const o=Object.keys(e);for(let n=0,a=o.length;n<a;++n)s(o[n],t,e,i)}return t}function st(t,e){return ot(t,e,{merger:at})}function at(t,e,i){if(!it(t))return;const n=e[t],o=i[t];U(n)&&U(o)?st(n,o):Object.prototype.hasOwnProperty.call(e,t)||(e[t]=et(o))}function rt(t,e){const i=t.indexOf(".",e);return-1===i?t.length:i}function lt(t,e){if(""===e)return t;let i=0,n=rt(e,i);for(;t&&n>i;)t=t[e.substr(i,n-i)],i=n+1,n=rt(e,i);return t}function ct(t){return t.charAt(0).toUpperCase()+t.slice(1)}const ht=t=>void 0!==t,dt=t=>"function"==typeof t,ut=(t,e)=>{if(t.size!==e.size)return!1;for(const i of t)if(!e.has(i))return!1;return!0},ft=Object.create(null),gt=Object.create(null);function pt(t,e){if(!e)return t;const i=e.split(".");for(let e=0,n=i.length;e<n;++e){const n=i[e];t=t[n]||(t[n]=Object.create(null))}return t}function mt(t,e,i){return"string"==typeof e?ot(pt(t,e),i):ot(pt(t,""),e)}var xt=new class{constructor(t){this.animation=void 0,this.backgroundColor="rgba(0,0,0,0.1)",this.borderColor="rgba(0,0,0,0.1)",this.color="#666",this.datasets={},this.devicePixelRatio=t=>t.chart.platform.getDevicePixelRatio(),this.elements={},this.events=["mousemove","mouseout","click","touchstart","touchmove"],this.font={family:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",size:12,style:"normal",lineHeight:1.2,weight:null},this.hover={},this.hoverBackgroundColor=(t,e)=>H(e.backgroundColor),this.hoverBorderColor=(t,e)=>H(e.borderColor),this.hoverColor=(t,e)=>H(e.color),this.indexAxis="x",this.interaction={mode:"nearest",intersect:!0},this.maintainAspectRatio=!0,this.onHover=null,this.onClick=null,this.parsing=!0,this.plugins={},this.responsive=!0,this.scale=void 0,this.scales={},this.showLine=!0,this.describe(t)}set(t,e){return mt(this,t,e)}get(t){return pt(this,t)}describe(t,e){return mt(gt,t,e)}override(t,e){return mt(ft,t,e)}route(t,e,i,n){const o=pt(this,t),s=pt(this,i),a="_"+e;Object.defineProperties(o,{[a]:{value:o[e],writable:!0},[e]:{enumerable:!0,get(){const t=this[a],e=s[n];return U(t)?Object.assign({},e,t):K(t,e)},set(t){this[a]=t}}})}}({_scriptable:t=>!t.startsWith("on"),_indexable:t=>"events"!==t,hover:{_fallback:"interaction"},interaction:{_scriptable:!1,_indexable:!1}});const bt=Math.PI,_t=2*bt,yt=_t+bt,vt=Number.POSITIVE_INFINITY,wt=bt/180,Mt=bt/2,kt=bt/4,St=2*bt/3,Pt=Math.log10,Dt=Math.sign;function Ct(t){const e=Math.pow(10,Math.floor(Pt(t))),i=t/e;return(i<=1?1:i<=2?2:i<=5?5:10)*e}function Ot(t){const e=[],i=Math.sqrt(t);let n;for(n=1;n<i;n++)t%n==0&&(e.push(n),e.push(t/n));return i===(0|i)&&e.push(i),e.sort(((t,e)=>t-e)).pop(),e}function Tt(t){return!isNaN(parseFloat(t))&&isFinite(t)}function At(t,e,i){return Math.abs(t-e)<i}function Lt(t,e){const i=Math.round(t);return i-e<=t&&i+e>=t}function Rt(t,e,i){let n,o,s;for(n=0,o=t.length;n<o;n++)s=t[n][i],isNaN(s)||(e.min=Math.min(e.min,s),e.max=Math.max(e.max,s))}function Et(t){return t*(bt/180)}function It(t){return t*(180/bt)}function zt(t){if(!X(t))return;let e=1,i=0;for(;Math.round(t*e)/e!==t;)e*=10,i++;return i}function Ft(t,e){const i=e.x-t.x,n=e.y-t.y,o=Math.sqrt(i*i+n*n);let s=Math.atan2(n,i);return s<-.5*bt&&(s+=_t),{angle:s,distance:o}}function Vt(t,e){return Math.sqrt(Math.pow(e.x-t.x,2)+Math.pow(e.y-t.y,2))}function Bt(t,e){return(t-e+yt)%_t-bt}function Wt(t){return(t%_t+_t)%_t}function Ht(t,e,i,n){const o=Wt(t),s=Wt(e),a=Wt(i),r=Wt(s-o),l=Wt(a-o),c=Wt(o-s),h=Wt(o-a);return o===s||o===a||n&&s===a||r>l&&c<h}function Nt(t,e,i){return Math.max(e,Math.min(i,t))}function jt(t){return Nt(t,-32768,32767)}function $t(t){return!t||$(t.size)||$(t.family)?null:(t.style?t.style+" ":"")+(t.weight?t.weight+" ":"")+t.size+"px "+t.family}function Yt(t,e,i,n,o){let s=e[o];return s||(s=e[o]=t.measureText(o).width,i.push(o)),s>n&&(n=s),n}function Ut(t,e,i,n){let o=(n=n||{}).data=n.data||{},s=n.garbageCollect=n.garbageCollect||[];n.font!==e&&(o=n.data={},s=n.garbageCollect=[],n.font=e),t.save(),t.font=e;let a=0;const r=i.length;let l,c,h,d,u;for(l=0;l<r;l++)if(d=i[l],null!=d&&!0!==Y(d))a=Yt(t,o,s,a,d);else if(Y(d))for(c=0,h=d.length;c<h;c++)u=d[c],null==u||Y(u)||(a=Yt(t,o,s,a,u));t.restore();const f=s.length/2;if(f>i.length){for(l=0;l<f;l++)delete o[s[l]];s.splice(0,f)}return a}function Xt(t,e,i){const n=t.currentDevicePixelRatio,o=0!==i?Math.max(i/2,.5):0;return Math.round((e-o)*n)/n+o}function qt(t,e){(e=e||t.getContext("2d")).save(),e.resetTransform(),e.clearRect(0,0,t.width,t.height),e.restore()}function Kt(t,e,i,n){let o,s,a,r,l;const c=e.pointStyle,h=e.rotation,d=e.radius;let u=(h||0)*wt;if(c&&"object"==typeof c&&(o=c.toString(),"[object HTMLImageElement]"===o||"[object HTMLCanvasElement]"===o))return t.save(),t.translate(i,n),t.rotate(u),t.drawImage(c,-c.width/2,-c.height/2,c.width,c.height),void t.restore();if(!(isNaN(d)||d<=0)){switch(t.beginPath(),c){default:t.arc(i,n,d,0,_t),t.closePath();break;case"triangle":t.moveTo(i+Math.sin(u)*d,n-Math.cos(u)*d),u+=St,t.lineTo(i+Math.sin(u)*d,n-Math.cos(u)*d),u+=St,t.lineTo(i+Math.sin(u)*d,n-Math.cos(u)*d),t.closePath();break;case"rectRounded":l=.516*d,r=d-l,s=Math.cos(u+kt)*r,a=Math.sin(u+kt)*r,t.arc(i-s,n-a,l,u-bt,u-Mt),t.arc(i+a,n-s,l,u-Mt,u),t.arc(i+s,n+a,l,u,u+Mt),t.arc(i-a,n+s,l,u+Mt,u+bt),t.closePath();break;case"rect":if(!h){r=Math.SQRT1_2*d,t.rect(i-r,n-r,2*r,2*r);break}u+=kt;case"rectRot":s=Math.cos(u)*d,a=Math.sin(u)*d,t.moveTo(i-s,n-a),t.lineTo(i+a,n-s),t.lineTo(i+s,n+a),t.lineTo(i-a,n+s),t.closePath();break;case"crossRot":u+=kt;case"cross":s=Math.cos(u)*d,a=Math.sin(u)*d,t.moveTo(i-s,n-a),t.lineTo(i+s,n+a),t.moveTo(i+a,n-s),t.lineTo(i-a,n+s);break;case"star":s=Math.cos(u)*d,a=Math.sin(u)*d,t.moveTo(i-s,n-a),t.lineTo(i+s,n+a),t.moveTo(i+a,n-s),t.lineTo(i-a,n+s),u+=kt,s=Math.cos(u)*d,a=Math.sin(u)*d,t.moveTo(i-s,n-a),t.lineTo(i+s,n+a),t.moveTo(i+a,n-s),t.lineTo(i-a,n+s);break;case"line":s=Math.cos(u)*d,a=Math.sin(u)*d,t.moveTo(i-s,n-a),t.lineTo(i+s,n+a);break;case"dash":t.moveTo(i,n),t.lineTo(i+Math.cos(u)*d,n+Math.sin(u)*d)}t.fill(),e.borderWidth>0&&t.stroke()}}function Gt(t,e,i){return i=i||.5,t&&t.x>e.left-i&&t.x<e.right+i&&t.y>e.top-i&&t.y<e.bottom+i}function Zt(t,e){t.save(),t.beginPath(),t.rect(e.left,e.top,e.right-e.left,e.bottom-e.top),t.clip()}function Qt(t){t.restore()}function Jt(t,e,i,n,o){if(!e)return t.lineTo(i.x,i.y);if("middle"===o){const n=(e.x+i.x)/2;t.lineTo(n,e.y),t.lineTo(n,i.y)}else"after"===o!=!!n?t.lineTo(e.x,i.y):t.lineTo(i.x,e.y);t.lineTo(i.x,i.y)}function te(t,e,i,n){if(!e)return t.lineTo(i.x,i.y);t.bezierCurveTo(n?e.cp1x:e.cp2x,n?e.cp1y:e.cp2y,n?i.cp2x:i.cp1x,n?i.cp2y:i.cp1y,i.x,i.y)}function ee(t,e,i,n,o,s={}){const a=Y(e)?e:[e],r=s.strokeWidth>0&&""!==s.strokeColor;let l,c;for(t.save(),s.translation&&t.translate(s.translation[0],s.translation[1]),$(s.rotation)||t.rotate(s.rotation),t.font=o.string,s.color&&(t.fillStyle=s.color),s.textAlign&&(t.textAlign=s.textAlign),s.textBaseline&&(t.textBaseline=s.textBaseline),l=0;l<a.length;++l){if(c=a[l],r&&(s.strokeColor&&(t.strokeStyle=s.strokeColor),$(s.strokeWidth)||(t.lineWidth=s.strokeWidth),t.strokeText(c,i,n,s.maxWidth)),t.fillText(c,i,n,s.maxWidth),s.strikethrough||s.underline){const e=t.measureText(c),o=i-e.actualBoundingBoxLeft,a=i+e.actualBoundingBoxRight,r=n-e.actualBoundingBoxAscent,l=n+e.actualBoundingBoxDescent,h=s.strikethrough?(r+l)/2:l;t.strokeStyle=t.fillStyle,t.beginPath(),t.lineWidth=s.decorationWidth||2,t.moveTo(o,h),t.lineTo(a,h),t.stroke()}n+=o.lineHeight}t.restore()}function ie(t,e){const{x:i,y:n,w:o,h:s,radius:a}=e;t.arc(i+a.topLeft,n+a.topLeft,a.topLeft,-Mt,bt,!0),t.lineTo(i,n+s-a.bottomLeft),t.arc(i+a.bottomLeft,n+s-a.bottomLeft,a.bottomLeft,bt,Mt,!0),t.lineTo(i+o-a.bottomRight,n+s),t.arc(i+o-a.bottomRight,n+s-a.bottomRight,a.bottomRight,Mt,0,!0),t.lineTo(i+o,n+a.topRight),t.arc(i+o-a.topRight,n+a.topRight,a.topRight,0,-Mt,!0),t.lineTo(i+a.topLeft,n)}function ne(t,e,i){i=i||(i=>t[i]<e);let n,o=t.length-1,s=0;for(;o-s>1;)n=s+o>>1,i(n)?s=n:o=n;return{lo:s,hi:o}}const oe=(t,e,i)=>ne(t,i,(n=>t[n][e]<i)),se=(t,e,i)=>ne(t,i,(n=>t[n][e]>=i));function ae(t,e,i){let n=0,o=t.length;for(;n<o&&t[n]<e;)n++;for(;o>n&&t[o-1]>i;)o--;return n>0||o<t.length?t.slice(n,o):t}const re=["push","pop","shift","splice","unshift"];function le(t,e){t._chartjs?t._chartjs.listeners.push(e):(Object.defineProperty(t,"_chartjs",{configurable:!0,enumerable:!1,value:{listeners:[e]}}),re.forEach((e=>{const i="_onData"+ct(e),n=t[e];Object.defineProperty(t,e,{configurable:!0,enumerable:!1,value(...e){const o=n.apply(this,e);return t._chartjs.listeners.forEach((t=>{"function"==typeof t[i]&&t[i](...e)})),o}})})))}function ce(t,e){const i=t._chartjs;if(!i)return;const n=i.listeners,o=n.indexOf(e);-1!==o&&n.splice(o,1),n.length>0||(re.forEach((e=>{delete t[e]})),delete t._chartjs)}function he(t){const e=new Set;let i,n;for(i=0,n=t.length;i<n;++i)e.add(t[i]);if(e.size===n)return t;const o=[];return e.forEach((t=>{o.push(t)})),o}function de(t){let e=t.parentNode;return e&&"[object ShadowRoot]"===e.toString()&&(e=e.host),e}function ue(t,e,i){let n;return"string"==typeof t?(n=parseInt(t,10),-1!==t.indexOf("%")&&(n=n/100*e.parentNode[i])):n=t,n}const fe=t=>window.getComputedStyle(t,null);function ge(t,e){return fe(t).getPropertyValue(e)}const pe=["top","right","bottom","left"];function me(t,e,i){const n={};i=i?"-"+i:"";for(let o=0;o<4;o++){const s=pe[o];n[s]=parseFloat(t[e+"-"+s+i])||0}return n.width=n.left+n.right,n.height=n.top+n.bottom,n}function xe(t,e){const{canvas:i,currentDevicePixelRatio:n}=e,o=fe(i),s="border-box"===o.boxSizing,a=me(o,"padding"),r=me(o,"border","width"),{x:l,y:c,box:h}=function(t,e){const i=t.native||t,n=i.touches,o=n&&n.length?n[0]:i,{offsetX:s,offsetY:a}=o;let r,l,c=!1;if(((t,e,i)=>(t>0||e>0)&&(!i||!i.shadowRoot))(s,a,i.target))r=s,l=a;else{const t=e.getBoundingClientRect();r=o.clientX-t.left,l=o.clientY-t.top,c=!0}return{x:r,y:l,box:c}}(t,i),d=a.left+(h&&r.left),u=a.top+(h&&r.top);let{width:f,height:g}=e;return s&&(f-=a.width+r.width,g-=a.height+r.height),{x:Math.round((l-d)/f*i.width/n),y:Math.round((c-u)/g*i.height/n)}}const be=t=>Math.round(10*t)/10;function _e(t,e,i,n){const o=fe(t),s=me(o,"margin"),a=ue(o.maxWidth,t,"clientWidth")||vt,r=ue(o.maxHeight,t,"clientHeight")||vt,l=function(t,e,i){let n,o;if(void 0===e||void 0===i){const s=de(t);if(s){const t=s.getBoundingClientRect(),a=fe(s),r=me(a,"border","width"),l=me(a,"padding");e=t.width-l.width-r.width,i=t.height-l.height-r.height,n=ue(a.maxWidth,s,"clientWidth"),o=ue(a.maxHeight,s,"clientHeight")}else e=t.clientWidth,i=t.clientHeight}return{width:e,height:i,maxWidth:n||vt,maxHeight:o||vt}}(t,e,i);let{width:c,height:h}=l;if("content-box"===o.boxSizing){const t=me(o,"border","width"),e=me(o,"padding");c-=e.width+t.width,h-=e.height+t.height}return c=Math.max(0,c-s.width),h=Math.max(0,n?Math.floor(c/n):h-s.height),c=be(Math.min(c,a,l.maxWidth)),h=be(Math.min(h,r,l.maxHeight)),c&&!h&&(h=be(c/2)),{width:c,height:h}}function ye(t,e,i){const n=e||1,o=Math.floor(t.height*n),s=Math.floor(t.width*n);t.height=o/n,t.width=s/n;const a=t.canvas;return a.style&&(i||!a.style.height&&!a.style.width)&&(a.style.height=`${t.height}px`,a.style.width=`${t.width}px`),(t.currentDevicePixelRatio!==n||a.height!==o||a.width!==s)&&(t.currentDevicePixelRatio=n,a.height=o,a.width=s,t.ctx.setTransform(n,0,0,n,0,0),!0)}const ve=function(){let t=!1;try{const e={get passive(){return t=!0,!1}};window.addEventListener("test",null,e),window.removeEventListener("test",null,e)}catch(t){}return t}();function we(t,e){const i=ge(t,e),n=i&&i.match(/^(\d+)(\.\d+)?px$/);return n?+n[1]:void 0}function Me(t,e){return"native"in t?{x:t.x,y:t.y}:xe(t,e)}function ke(t,e,i,n){const{controller:o,data:s,_sorted:a}=t,r=o._cachedMeta.iScale;if(r&&e===r.axis&&a&&s.length){const t=r._reversePixels?se:oe;if(!n)return t(s,e,i);if(o._sharedOptions){const n=s[0],o="function"==typeof n.getRange&&n.getRange(e);if(o){const n=t(s,e,i-o),a=t(s,e,i+o);return{lo:n.lo,hi:a.hi}}}}return{lo:0,hi:s.length-1}}function Se(t,e,i,n,o){const s=t.getSortedVisibleDatasetMetas(),a=i[e];for(let t=0,i=s.length;t<i;++t){const{index:i,data:r}=s[t],{lo:l,hi:c}=ke(s[t],e,a,o);for(let t=l;t<=c;++t){const e=r[t];e.skip||n(e,i,t)}}}function Pe(t,e,i,n){const o=[];if(!Gt(e,t.chartArea,t._minPadding))return o;return Se(t,i,e,(function(t,i,s){t.inRange(e.x,e.y,n)&&o.push({element:t,datasetIndex:i,index:s})}),!0),o}function De(t,e,i,n,o){const s=function(t){const e=-1!==t.indexOf("x"),i=-1!==t.indexOf("y");return function(t,n){const o=e?Math.abs(t.x-n.x):0,s=i?Math.abs(t.y-n.y):0;return Math.sqrt(Math.pow(o,2)+Math.pow(s,2))}}(i);let a=Number.POSITIVE_INFINITY,r=[];if(!Gt(e,t.chartArea,t._minPadding))return r;return Se(t,i,e,(function(i,l,c){if(n&&!i.inRange(e.x,e.y,o))return;const h=i.getCenterPoint(o);if(!Gt(h,t.chartArea,t._minPadding))return;const d=s(e,h);d<a?(r=[{element:i,datasetIndex:l,index:c}],a=d):d===a&&r.push({element:i,datasetIndex:l,index:c})})),r}function Ce(t,e,i,n){const o=Me(e,t),s=[],a=i.axis,r="x"===a?"inXRange":"inYRange";let l=!1;return function(t,e){const i=t.getSortedVisibleDatasetMetas();let n,o,s;for(let t=0,a=i.length;t<a;++t){({index:n,data:o}=i[t]);for(let t=0,i=o.length;t<i;++t)s=o[t],s.skip||e(s,n,t)}}(t,((t,e,i)=>{t[r](o[a],n)&&s.push({element:t,datasetIndex:e,index:i}),t.inRange(o.x,o.y,n)&&(l=!0)})),i.intersect&&!l?[]:s}var Oe={modes:{index(t,e,i,n){const o=Me(e,t),s=i.axis||"x",a=i.intersect?Pe(t,o,s,n):De(t,o,s,!1,n),r=[];return a.length?(t.getSortedVisibleDatasetMetas().forEach((t=>{const e=a[0].index,i=t.data[e];i&&!i.skip&&r.push({element:i,datasetIndex:t.index,index:e})})),r):[]},dataset(t,e,i,n){const o=Me(e,t),s=i.axis||"xy";let a=i.intersect?Pe(t,o,s,n):De(t,o,s,!1,n);if(a.length>0){const e=a[0].datasetIndex,i=t.getDatasetMeta(e).data;a=[];for(let t=0;t<i.length;++t)a.push({element:i[t],datasetIndex:e,index:t})}return a},point:(t,e,i,n)=>Pe(t,Me(e,t),i.axis||"xy",n),nearest:(t,e,i,n)=>De(t,Me(e,t),i.axis||"xy",i.intersect,n),x:(t,e,i,n)=>(i.axis="x",Ce(t,e,i,n)),y:(t,e,i,n)=>(i.axis="y",Ce(t,e,i,n))}};const Te=new RegExp(/^(normal|(\d+(?:\.\d+)?)(px|em|%)?)$/),Ae=new RegExp(/^(normal|italic|initial|inherit|unset|(oblique( -?[0-9]?[0-9]deg)?))$/);function Le(t,e){const i=(""+t).match(Te);if(!i||"normal"===i[1])return 1.2*e;switch(t=+i[2],i[3]){case"px":return t;case"%":t/=100}return e*t}function Re(t,e){const i={},n=U(e),o=n?Object.keys(e):e,s=U(t)?n?i=>K(t[i],t[e[i]]):e=>t[e]:()=>t;for(const t of o)i[t]=+s(t)||0;return i}function Ee(t){return Re(t,{top:"y",right:"x",bottom:"y",left:"x"})}function Ie(t){return Re(t,["topLeft","topRight","bottomLeft","bottomRight"])}function ze(t){const e=Ee(t);return e.width=e.left+e.right,e.height=e.top+e.bottom,e}function Fe(t,e){t=t||{},e=e||xt.font;let i=K(t.size,e.size);"string"==typeof i&&(i=parseInt(i,10));let n=K(t.style,e.style);n&&!(""+n).match(Ae)&&(console.warn('Invalid font style specified: "'+n+'"'),n="");const o={family:K(t.family,e.family),lineHeight:Le(K(t.lineHeight,e.lineHeight),i),size:i,style:n,weight:K(t.weight,e.weight),string:""};return o.string=$t(o),o}function Ve(t,e,i,n){let o,s,a,r=!0;for(o=0,s=t.length;o<s;++o)if(a=t[o],void 0!==a&&(void 0!==e&&"function"==typeof a&&(a=a(e),r=!1),void 0!==i&&Y(a)&&(a=a[i%a.length],r=!1),void 0!==a))return n&&!r&&(n.cacheable=!1),a}function Be(t,e){const{min:i,max:n}=t;return{min:i-Math.abs(Z(e,i)),max:n+Z(e,n)}}const We=["left","top","right","bottom"];function He(t,e){return t.filter((t=>t.pos===e))}function Ne(t,e){return t.filter((t=>-1===We.indexOf(t.pos)&&t.box.axis===e))}function je(t,e){return t.sort(((t,i)=>{const n=e?i:t,o=e?t:i;return n.weight===o.weight?n.index-o.index:n.weight-o.weight}))}function $e(t,e,i,n){return Math.max(t[i],e[i])+Math.max(t[n],e[n])}function Ye(t,e){t.top=Math.max(t.top,e.top),t.left=Math.max(t.left,e.left),t.bottom=Math.max(t.bottom,e.bottom),t.right=Math.max(t.right,e.right)}function Ue(t,e,i){const n=i.box,o=t.maxPadding;U(i.pos)||(i.size&&(t[i.pos]-=i.size),i.size=i.horizontal?n.height:n.width,t[i.pos]+=i.size),n.getPadding&&Ye(o,n.getPadding());const s=Math.max(0,e.outerWidth-$e(o,t,"left","right")),a=Math.max(0,e.outerHeight-$e(o,t,"top","bottom")),r=s!==t.w,l=a!==t.h;return t.w=s,t.h=a,i.horizontal?{same:r,other:l}:{same:l,other:r}}function Xe(t,e){const i=e.maxPadding;function n(t){const n={left:0,top:0,right:0,bottom:0};return t.forEach((t=>{n[t]=Math.max(e[t],i[t])})),n}return n(t?["left","right"]:["top","bottom"])}function qe(t,e,i){const n=[];let o,s,a,r,l,c;for(o=0,s=t.length,l=0;o<s;++o){a=t[o],r=a.box,r.update(a.width||e.w,a.height||e.h,Xe(a.horizontal,e));const{same:s,other:h}=Ue(e,i,a);l|=s&&n.length,c=c||h,r.fullSize||n.push(a)}return l&&qe(n,e,i)||c}function Ke(t,e,i){const n=i.padding;let o,s,a,r,l=e.x,c=e.y;for(o=0,s=t.length;o<s;++o)a=t[o],r=a.box,a.horizontal?(r.left=r.fullSize?n.left:e.left,r.right=r.fullSize?i.outerWidth-n.right:e.left+e.w,r.top=c,r.bottom=c+r.height,r.width=r.right-r.left,c=r.bottom):(r.left=l,r.right=l+r.width,r.top=r.fullSize?n.top:e.top,r.bottom=r.fullSize?i.outerHeight-n.right:e.top+e.h,r.height=r.bottom-r.top,l=r.right);e.x=l,e.y=c}xt.set("layout",{padding:{top:0,right:0,bottom:0,left:0}});var Ge={addBox(t,e){t.boxes||(t.boxes=[]),e.fullSize=e.fullSize||!1,e.position=e.position||"top",e.weight=e.weight||0,e._layers=e._layers||function(){return[{z:0,draw(t){e.draw(t)}}]},t.boxes.push(e)},removeBox(t,e){const i=t.boxes?t.boxes.indexOf(e):-1;-1!==i&&t.boxes.splice(i,1)},configure(t,e,i){e.fullSize=i.fullSize,e.position=i.position,e.weight=i.weight},update(t,e,i,n){if(!t)return;const o=ze(t.options.layout.padding),s=Math.max(e-o.width,0),a=Math.max(i-o.height,0),r=function(t){const e=function(t){const e=[];let i,n,o;for(i=0,n=(t||[]).length;i<n;++i)o=t[i],e.push({index:i,box:o,pos:o.position,horizontal:o.isHorizontal(),weight:o.weight});return e}(t),i=je(e.filter((t=>t.box.fullSize)),!0),n=je(He(e,"left"),!0),o=je(He(e,"right")),s=je(He(e,"top"),!0),a=je(He(e,"bottom")),r=Ne(e,"x"),l=Ne(e,"y");return{fullSize:i,leftAndTop:n.concat(s),rightAndBottom:o.concat(l).concat(a).concat(r),chartArea:He(e,"chartArea"),vertical:n.concat(o).concat(l),horizontal:s.concat(a).concat(r)}}(t.boxes),l=r.vertical,c=r.horizontal;J(t.boxes,(t=>{"function"==typeof t.beforeLayout&&t.beforeLayout()}));const h=l.reduce(((t,e)=>e.box.options&&!1===e.box.options.display?t:t+1),0)||1,d=Object.freeze({outerWidth:e,outerHeight:i,padding:o,availableWidth:s,availableHeight:a,vBoxMaxWidth:s/2/h,hBoxMaxHeight:a/2}),u=Object.assign({},o);Ye(u,ze(n));const f=Object.assign({maxPadding:u,w:s,h:a,x:o.left,y:o.top},o);!function(t,e){let i,n,o;for(i=0,n=t.length;i<n;++i)o=t[i],o.horizontal?(o.width=o.box.fullSize&&e.availableWidth,o.height=e.hBoxMaxHeight):(o.width=e.vBoxMaxWidth,o.height=o.box.fullSize&&e.availableHeight)}(l.concat(c),d),qe(r.fullSize,f,d),qe(l,f,d),qe(c,f,d)&&qe(l,f,d),function(t){const e=t.maxPadding;function i(i){const n=Math.max(e[i]-t[i],0);return t[i]+=n,n}t.y+=i("top"),t.x+=i("left"),i("right"),i("bottom")}(f),Ke(r.leftAndTop,f,d),f.x+=f.w,f.y+=f.h,Ke(r.rightAndBottom,f,d),t.chartArea={left:f.left,top:f.top,right:f.left+f.w,bottom:f.top+f.h,height:f.h,width:f.w},J(r.chartArea,(e=>{const i=e.box;Object.assign(i,t.chartArea),i.update(f.w,f.h)}))}};class Ze{acquireContext(t,e){}releaseContext(t){return!1}addEventListener(t,e,i){}removeEventListener(t,e,i){}getDevicePixelRatio(){return 1}getMaximumSize(t,e,i,n){return e=Math.max(0,e||t.width),i=i||t.height,{width:e,height:Math.max(0,n?Math.floor(e/n):i)}}isAttached(t){return!0}}class Qe extends Ze{acquireContext(t){return t&&t.getContext&&t.getContext("2d")||null}}const Je={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"},ti=t=>null===t||""===t;const ei=!!ve&&{passive:!0};function ii(t,e,i){t.canvas.removeEventListener(e,i,ei)}function ni(t,e,i){const n=t.canvas,o=n&&de(n)||n,s=new MutationObserver((t=>{const e=de(o);t.forEach((t=>{for(let n=0;n<t.addedNodes.length;n++){const s=t.addedNodes[n];s!==o&&s!==e||i(t.target)}}))}));return s.observe(document,{childList:!0,subtree:!0}),s}function oi(t,e,i){const n=t.canvas,o=n&&de(n);if(!o)return;const s=new MutationObserver((t=>{t.forEach((t=>{for(let e=0;e<t.removedNodes.length;e++)if(t.removedNodes[e]===n){i();break}}))}));return s.observe(o,{childList:!0}),s}const si=new Map;let ai=0;function ri(){const t=window.devicePixelRatio;t!==ai&&(ai=t,si.forEach(((e,i)=>{i.currentDevicePixelRatio!==t&&e()})))}function li(t,i,n){const o=t.canvas,s=o&&de(o);if(!s)return;const a=e(((t,e)=>{const i=s.clientWidth;n(t,e),i<s.clientWidth&&n()}),window),r=new ResizeObserver((t=>{const e=t[0],i=e.contentRect.width,n=e.contentRect.height;0===i&&0===n||a(i,n)}));return r.observe(s),function(t,e){si.size||window.addEventListener("resize",ri),si.set(t,e)}(t,a),r}function ci(t,e,i){i&&i.disconnect(),"resize"===e&&function(t){si.delete(t),si.size||window.removeEventListener("resize",ri)}(t)}function hi(t,i,n){const o=t.canvas,s=e((e=>{null!==t.ctx&&n(function(t,e){const i=Je[t.type]||t.type,{x:n,y:o}=xe(t,e);return{type:i,chart:e,native:t,x:void 0!==n?n:null,y:void 0!==o?o:null}}(e,t))}),t,(t=>{const e=t[0];return[e,e.offsetX,e.offsetY]}));return function(t,e,i){t.addEventListener(e,i,ei)}(o,i,s),s}class di extends Ze{acquireContext(t,e){const i=t&&t.getContext&&t.getContext("2d");return i&&i.canvas===t?(function(t,e){const i=t.style,n=t.getAttribute("height"),o=t.getAttribute("width");if(t.$chartjs={initial:{height:n,width:o,style:{display:i.display,height:i.height,width:i.width}}},i.display=i.display||"block",i.boxSizing=i.boxSizing||"border-box",ti(o)){const e=we(t,"width");void 0!==e&&(t.width=e)}if(ti(n))if(""===t.style.height)t.height=t.width/(e||2);else{const e=we(t,"height");void 0!==e&&(t.height=e)}}(t,e),i):null}releaseContext(t){const e=t.canvas;if(!e.$chartjs)return!1;const i=e.$chartjs.initial;["height","width"].forEach((t=>{const n=i[t];$(n)?e.removeAttribute(t):e.setAttribute(t,n)}));const n=i.style||{};return Object.keys(n).forEach((t=>{e.style[t]=n[t]})),e.width=e.width,delete e.$chartjs,!0}addEventListener(t,e,i){this.removeEventListener(t,e);const n=t.$proxies||(t.$proxies={}),o={attach:ni,detach:oi,resize:li}[e]||hi;n[e]=o(t,e,i)}removeEventListener(t,e){const i=t.$proxies||(t.$proxies={}),n=i[e];if(!n)return;({attach:ci,detach:ci,resize:ci}[e]||ii)(t,e,n),i[e]=void 0}getDevicePixelRatio(){return window.devicePixelRatio}getMaximumSize(t,e,i,n){return _e(t,e,i,n)}isAttached(t){const e=de(t);return!(!e||!de(e))}}var ui=Object.freeze({__proto__:null,BasePlatform:Ze,BasicPlatform:Qe,DomPlatform:di});const fi=t=>0===t||1===t,gi=(t,e,i)=>-Math.pow(2,10*(t-=1))*Math.sin((t-e)*_t/i),pi=(t,e,i)=>Math.pow(2,-10*t)*Math.sin((t-e)*_t/i)+1,mi={linear:t=>t,easeInQuad:t=>t*t,easeOutQuad:t=>-t*(t-2),easeInOutQuad:t=>(t/=.5)<1?.5*t*t:-.5*(--t*(t-2)-1),easeInCubic:t=>t*t*t,easeOutCubic:t=>(t-=1)*t*t+1,easeInOutCubic:t=>(t/=.5)<1?.5*t*t*t:.5*((t-=2)*t*t+2),easeInQuart:t=>t*t*t*t,easeOutQuart:t=>-((t-=1)*t*t*t-1),easeInOutQuart:t=>(t/=.5)<1?.5*t*t*t*t:-.5*((t-=2)*t*t*t-2),easeInQuint:t=>t*t*t*t*t,easeOutQuint:t=>(t-=1)*t*t*t*t+1,easeInOutQuint:t=>(t/=.5)<1?.5*t*t*t*t*t:.5*((t-=2)*t*t*t*t+2),easeInSine:t=>1-Math.cos(t*Mt),easeOutSine:t=>Math.sin(t*Mt),easeInOutSine:t=>-.5*(Math.cos(bt*t)-1),easeInExpo:t=>0===t?0:Math.pow(2,10*(t-1)),easeOutExpo:t=>1===t?1:1-Math.pow(2,-10*t),easeInOutExpo:t=>fi(t)?t:t<.5?.5*Math.pow(2,10*(2*t-1)):.5*(2-Math.pow(2,-10*(2*t-1))),easeInCirc:t=>t>=1?t:-(Math.sqrt(1-t*t)-1),easeOutCirc:t=>Math.sqrt(1-(t-=1)*t),easeInOutCirc:t=>(t/=.5)<1?-.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1),easeInElastic:t=>fi(t)?t:gi(t,.075,.3),easeOutElastic:t=>fi(t)?t:pi(t,.075,.3),easeInOutElastic(t){const e=.1125;return fi(t)?t:t<.5?.5*gi(2*t,e,.45):.5+.5*pi(2*t-1,e,.45)},easeInBack(t){const e=1.70158;return t*t*((e+1)*t-e)},easeOutBack(t){const e=1.70158;return(t-=1)*t*((e+1)*t+e)+1},easeInOutBack(t){let e=1.70158;return(t/=.5)<1?t*t*((1+(e*=1.525))*t-e)*.5:.5*((t-=2)*t*((1+(e*=1.525))*t+e)+2)},easeInBounce:t=>1-mi.easeOutBounce(1-t),easeOutBounce(t){const e=7.5625,i=2.75;return t<1/i?e*t*t:t<2/i?e*(t-=1.5/i)*t+.75:t<2.5/i?e*(t-=2.25/i)*t+.9375:e*(t-=2.625/i)*t+.984375},easeInOutBounce:t=>t<.5?.5*mi.easeInBounce(2*t):.5*mi.easeOutBounce(2*t-1)+.5},xi="transparent",bi={boolean:(t,e,i)=>i>.5?e:t,color(t,e,i){const n=W(t||xi),o=n.valid&&W(e||xi);return o&&o.valid?o.mix(n,i).hexString():e},number:(t,e,i)=>t+(e-t)*i};class _i{constructor(t,e,i,n){const o=e[i];n=Ve([t.to,n,o,t.from]);const s=Ve([t.from,o,n]);this._active=!0,this._fn=t.fn||bi[t.type||typeof s],this._easing=mi[t.easing]||mi.linear,this._start=Math.floor(Date.now()+(t.delay||0)),this._duration=this._total=Math.floor(t.duration),this._loop=!!t.loop,this._target=e,this._prop=i,this._from=s,this._to=n,this._promises=void 0}active(){return this._active}update(t,e,i){const n=this;if(n._active){n._notify(!1);const o=n._target[n._prop],s=i-n._start,a=n._duration-s;n._start=i,n._duration=Math.floor(Math.max(a,t.duration)),n._total+=s,n._loop=!!t.loop,n._to=Ve([t.to,e,o,t.from]),n._from=Ve([t.from,o,e])}}cancel(){const t=this;t._active&&(t.tick(Date.now()),t._active=!1,t._notify(!1))}tick(t){const e=this,i=t-e._start,n=e._duration,o=e._prop,s=e._from,a=e._loop,r=e._to;let l;if(e._active=s!==r&&(a||i<n),!e._active)return e._target[o]=r,void e._notify(!0);i<0?e._target[o]=s:(l=i/n%2,l=a&&l>1?2-l:l,l=e._easing(Math.min(1,Math.max(0,l))),e._target[o]=e._fn(s,r,l))}wait(){const t=this._promises||(this._promises=[]);return new Promise(((e,i)=>{t.push({res:e,rej:i})}))}_notify(t){const e=t?"res":"rej",i=this._promises||[];for(let t=0;t<i.length;t++)i[t][e]()}}xt.set("animation",{delay:void 0,duration:1e3,easing:"easeOutQuart",fn:void 0,from:void 0,loop:void 0,to:void 0,type:void 0});const yi=Object.keys(xt.animation);xt.describe("animation",{_fallback:!1,_indexable:!1,_scriptable:t=>"onProgress"!==t&&"onComplete"!==t&&"fn"!==t}),xt.set("animations",{colors:{type:"color",properties:["color","borderColor","backgroundColor"]},numbers:{type:"number",properties:["x","y","borderWidth","radius","tension"]}}),xt.describe("animations",{_fallback:"animation"}),xt.set("transitions",{active:{animation:{duration:400}},resize:{animation:{duration:0}},show:{animations:{colors:{from:"transparent"},visible:{type:"boolean",duration:0}}},hide:{animations:{colors:{to:"transparent"},visible:{type:"boolean",easing:"linear",fn:t=>0|t}}}});class vi{constructor(t,e){this._chart=t,this._properties=new Map,this.configure(e)}configure(t){if(!U(t))return;const e=this._properties;Object.getOwnPropertyNames(t).forEach((i=>{const n=t[i];if(!U(n))return;const o={};for(const t of yi)o[t]=n[t];(Y(n.properties)&&n.properties||[i]).forEach((t=>{t!==i&&e.has(t)||e.set(t,o)}))}))}_animateOptions(t,e){const i=e.options,n=function(t,e){if(!e)return;let i=t.options;if(!i)return void(t.options=e);i.$shared&&(t.options=i=Object.assign({},i,{$shared:!1,$animations:{}}));return i}(t,i);if(!n)return[];const o=this._createAnimations(n,i);return i.$shared&&function(t,e){const i=[],n=Object.keys(e);for(let e=0;e<n.length;e++){const o=t[n[e]];o&&o.active()&&i.push(o.wait())}return Promise.all(i)}(t.options.$animations,i).then((()=>{t.options=i}),(()=>{})),o}_createAnimations(t,e){const i=this._properties,n=[],o=t.$animations||(t.$animations={}),s=Object.keys(e),a=Date.now();let r;for(r=s.length-1;r>=0;--r){const l=s[r];if("$"===l.charAt(0))continue;if("options"===l){n.push(...this._animateOptions(t,e));continue}const c=e[l];let h=o[l];const d=i.get(l);if(h){if(d&&h.active()){h.update(d,c,a);continue}h.cancel()}d&&d.duration?(o[l]=h=new _i(d,t,l,c),n.push(h)):t[l]=c}return n}update(t,e){if(0===this._properties.size)return void Object.assign(t,e);const i=this._createAnimations(t,e);return i.length?(a.add(this._chart,i),!0):void 0}}function wi(t,e){const i=t&&t.options||{},n=i.reverse,o=void 0===i.min?e:0,s=void 0===i.max?e:0;return{start:n?s:o,end:n?o:s}}function Mi(t,e){const i=[],n=t._getSortedDatasetMetas(e);let o,s;for(o=0,s=n.length;o<s;++o)i.push(n[o].index);return i}function ki(t,e,i,n){const o=t.keys,s="single"===n.mode;let a,r,l,c;if(null!==e){for(a=0,r=o.length;a<r;++a){if(l=+o[a],l===i){if(n.all)continue;break}c=t.values[l],X(c)&&(s||0===e||Dt(e)===Dt(c))&&(e+=c)}return e}}function Si(t,e){const i=t&&t.options.stacked;return i||void 0===i&&void 0!==e.stack}function Pi(t,e,i){const n=t[e]||(t[e]={});return n[i]||(n[i]={})}function Di(t,e,i){for(const n of e.getMatchingVisibleMetas("bar").reverse()){const e=t[n.index];if(i&&e>0||!i&&e<0)return n.index}return null}function Ci(t,e){const{chart:i,_cachedMeta:n}=t,o=i._stacks||(i._stacks={}),{iScale:s,vScale:a,index:r}=n,l=s.axis,c=a.axis,h=function(t,e,i){return`${t.id}.${e.id}.${i.stack||i.type}`}(s,a,n),d=e.length;let u;for(let t=0;t<d;++t){const i=e[t],{[l]:n,[c]:s}=i;u=(i._stacks||(i._stacks={}))[c]=Pi(o,h,n),u[r]=s,u._top=Di(u,a,!0),u._bottom=Di(u,a,!1)}}function Oi(t,e){const i=t.scales;return Object.keys(i).filter((t=>i[t].axis===e)).shift()}function Ti(t,e){const i=t.vScale&&t.vScale.axis;if(i){e=e||t._parsed;for(const n of e){const e=n._stacks;if(!e||void 0===e[i]||void 0===e[i][t.index])return;delete e[i][t.index]}}}const Ai=t=>"reset"===t||"none"===t,Li=(t,e)=>e?t:Object.assign({},t);class Ri{constructor(t,e){this.chart=t,this._ctx=t.ctx,this.index=e,this._cachedDataOpts={},this._cachedMeta=this.getMeta(),this._type=this._cachedMeta.type,this.options=void 0,this._parsing=!1,this._data=void 0,this._objectData=void 0,this._sharedOptions=void 0,this._drawStart=void 0,this._drawCount=void 0,this.enableOptionSharing=!1,this.$context=void 0,this._syncList=[],this.initialize()}initialize(){const t=this,e=t._cachedMeta;t.configure(),t.linkScales(),e._stacked=Si(e.vScale,e),t.addElements()}updateIndex(t){this.index!==t&&Ti(this._cachedMeta),this.index=t}linkScales(){const t=this,e=t.chart,i=t._cachedMeta,n=t.getDataset(),o=(t,e,i,n)=>"x"===t?e:"r"===t?n:i,s=i.xAxisID=K(n.xAxisID,Oi(e,"x")),a=i.yAxisID=K(n.yAxisID,Oi(e,"y")),r=i.rAxisID=K(n.rAxisID,Oi(e,"r")),l=i.indexAxis,c=i.iAxisID=o(l,s,a,r),h=i.vAxisID=o(l,a,s,r);i.xScale=t.getScaleForId(s),i.yScale=t.getScaleForId(a),i.rScale=t.getScaleForId(r),i.iScale=t.getScaleForId(c),i.vScale=t.getScaleForId(h)}getDataset(){return this.chart.data.datasets[this.index]}getMeta(){return this.chart.getDatasetMeta(this.index)}getScaleForId(t){return this.chart.scales[t]}_getOtherScale(t){const e=this._cachedMeta;return t===e.iScale?e.vScale:e.iScale}reset(){this._update("reset")}_destroy(){const t=this._cachedMeta;this._data&&ce(this._data,this),t._stacked&&Ti(t)}_dataCheck(){const t=this,e=t.getDataset(),i=e.data||(e.data=[]),n=t._data;if(U(i))t._data=function(t){const e=Object.keys(t),i=new Array(e.length);let n,o,s;for(n=0,o=e.length;n<o;++n)s=e[n],i[n]={x:s,y:t[s]};return i}(i);else if(n!==i){if(n){ce(n,t);const e=t._cachedMeta;Ti(e),e._parsed=[]}i&&Object.isExtensible(i)&&le(i,t),t._syncList=[],t._data=i}}addElements(){const t=this,e=t._cachedMeta;t._dataCheck(),t.datasetElementType&&(e.dataset=new t.datasetElementType)}buildOrUpdateElements(t){const e=this,i=e._cachedMeta,n=e.getDataset();let o=!1;e._dataCheck();const s=i._stacked;i._stacked=Si(i.vScale,i),i.stack!==n.stack&&(o=!0,Ti(i),i.stack=n.stack),e._resyncElements(t),(o||s!==i._stacked)&&Ci(e,i._parsed)}configure(){const t=this,e=t.chart.config,i=e.datasetScopeKeys(t._type),n=e.getOptionScopes(t.getDataset(),i,!0);t.options=e.createResolver(n,t.getContext()),t._parsing=t.options.parsing}parse(t,e){const i=this,{_cachedMeta:n,_data:o}=i,{iScale:s,_stacked:a}=n,r=s.axis;let l,c,h,d=0===t&&e===o.length||n._sorted,u=t>0&&n._parsed[t-1];if(!1===i._parsing)n._parsed=o,n._sorted=!0,h=o;else{h=Y(o[t])?i.parseArrayData(n,o,t,e):U(o[t])?i.parseObjectData(n,o,t,e):i.parsePrimitiveData(n,o,t,e);const s=()=>null===c[r]||u&&c[r]<u[r];for(l=0;l<e;++l)n._parsed[l+t]=c=h[l],d&&(s()&&(d=!1),u=c);n._sorted=d}a&&Ci(i,h)}parsePrimitiveData(t,e,i,n){const{iScale:o,vScale:s}=t,a=o.axis,r=s.axis,l=o.getLabels(),c=o===s,h=new Array(n);let d,u,f;for(d=0,u=n;d<u;++d)f=d+i,h[d]={[a]:c||o.parse(l[f],f),[r]:s.parse(e[f],f)};return h}parseArrayData(t,e,i,n){const{xScale:o,yScale:s}=t,a=new Array(n);let r,l,c,h;for(r=0,l=n;r<l;++r)c=r+i,h=e[c],a[r]={x:o.parse(h[0],c),y:s.parse(h[1],c)};return a}parseObjectData(t,e,i,n){const{xScale:o,yScale:s}=t,{xAxisKey:a="x",yAxisKey:r="y"}=this._parsing,l=new Array(n);let c,h,d,u;for(c=0,h=n;c<h;++c)d=c+i,u=e[d],l[c]={x:o.parse(lt(u,a),d),y:s.parse(lt(u,r),d)};return l}getParsed(t){return this._cachedMeta._parsed[t]}getDataElement(t){return this._cachedMeta.data[t]}applyStack(t,e,i){const n=this.chart,o=this._cachedMeta,s=e[t.axis];return ki({keys:Mi(n,!0),values:e._stacks[t.axis]},s,o.index,{mode:i})}updateRangeFromParsed(t,e,i,n){const o=i[e.axis];let s=null===o?NaN:o;const a=n&&i._stacks[e.axis];n&&a&&(n.values=a,t.min=Math.min(t.min,s),t.max=Math.max(t.max,s),s=ki(n,o,this._cachedMeta.index,{all:!0})),t.min=Math.min(t.min,s),t.max=Math.max(t.max,s)}getMinMax(t,e){const i=this,n=i._cachedMeta,o=n._parsed,s=n._sorted&&t===n.iScale,a=o.length,r=i._getOtherScale(t),l=e&&n._stacked&&{keys:Mi(i.chart,!0),values:null},c={min:Number.POSITIVE_INFINITY,max:Number.NEGATIVE_INFINITY},{min:h,max:d}=function(t){const{min:e,max:i,minDefined:n,maxDefined:o}=t.getUserBounds();return{min:n?e:Number.NEGATIVE_INFINITY,max:o?i:Number.POSITIVE_INFINITY}}(r);let u,f,g,p;function m(){return g=o[u],f=g[t.axis],p=g[r.axis],!X(f)||h>p||d<p}for(u=0;u<a&&(m()||(i.updateRangeFromParsed(c,t,g,l),!s));++u);if(s)for(u=a-1;u>=0;--u)if(!m()){i.updateRangeFromParsed(c,t,g,l);break}return c}getAllParsedValues(t){const e=this._cachedMeta._parsed,i=[];let n,o,s;for(n=0,o=e.length;n<o;++n)s=e[n][t.axis],X(s)&&i.push(s);return i}getMaxOverflow(){return!1}getLabelAndValue(t){const e=this._cachedMeta,i=e.iScale,n=e.vScale,o=this.getParsed(t);return{label:i?""+i.getLabelForValue(o[i.axis]):"",value:n?""+n.getLabelForValue(o[n.axis]):""}}_update(t){const e=this,i=e._cachedMeta;e.configure(),e._cachedDataOpts={},e.update(t||"default"),i._clip=function(t){let e,i,n,o;return U(t)?(e=t.top,i=t.right,n=t.bottom,o=t.left):e=i=n=o=t,{top:e,right:i,bottom:n,left:o}}(K(e.options.clip,function(t,e,i){if(!1===i)return!1;const n=wi(t,i),o=wi(e,i);return{top:o.end,right:n.end,bottom:o.start,left:n.start}}(i.xScale,i.yScale,e.getMaxOverflow())))}update(t){}draw(){const t=this,e=t._ctx,i=t.chart,n=t._cachedMeta,o=n.data||[],s=i.chartArea,a=[],r=t._drawStart||0,l=t._drawCount||o.length-r;let c;for(n.dataset&&n.dataset.draw(e,s,r,l),c=r;c<r+l;++c){const t=o[c];t.active?a.push(t):t.draw(e,s)}for(c=0;c<a.length;++c)a[c].draw(e,s)}getStyle(t,e){const i=e?"active":"default";return void 0===t&&this._cachedMeta.dataset?this.resolveDatasetElementOptions(i):this.resolveDataElementOptions(t||0,i)}getContext(t,e,i){const n=this,o=n.getDataset();let s;if(t>=0&&t<n._cachedMeta.data.length){const e=n._cachedMeta.data[t];s=e.$context||(e.$context=function(t,e,i){return Object.assign(Object.create(t),{active:!1,dataIndex:e,parsed:void 0,raw:void 0,element:i,index:e,mode:"default",type:"data"})}(n.getContext(),t,e)),s.parsed=n.getParsed(t),s.raw=o.data[t],s.index=s.dataIndex=t}else s=n.$context||(n.$context=function(t,e){return Object.assign(Object.create(t),{active:!1,dataset:void 0,datasetIndex:e,index:e,mode:"default",type:"dataset"})}(n.chart.getContext(),n.index)),s.dataset=o,s.index=s.datasetIndex=n.index;return s.active=!!e,s.mode=i,s}resolveDatasetElementOptions(t){return this._resolveElementOptions(this.datasetElementType.id,t)}resolveDataElementOptions(t,e){return this._resolveElementOptions(this.dataElementType.id,e,t)}_resolveElementOptions(t,e="default",i){const n=this,o="active"===e,s=n._cachedDataOpts,a=t+"-"+e,r=s[a],l=n.enableOptionSharing&&ht(i);if(r)return Li(r,l);const c=n.chart.config,h=c.datasetElementScopeKeys(n._type,t),d=o?[`${t}Hover`,"hover",t,""]:[t,""],u=c.getOptionScopes(n.getDataset(),h),f=Object.keys(xt.elements[t]),g=c.resolveNamedOptions(u,f,(()=>n.getContext(i,o)),d);return g.$shared&&(g.$shared=l,s[a]=Object.freeze(Li(g,l))),g}_resolveAnimations(t,e,i){const n=this,o=n.chart,s=n._cachedDataOpts,a=`animation-${e}`,r=s[a];if(r)return r;let l;if(!1!==o.options.animation){const o=n.chart.config,s=o.datasetAnimationScopeKeys(n._type,e),a=o.getOptionScopes(n.getDataset(),s);l=o.createResolver(a,n.getContext(t,i,e))}const c=new vi(o,l&&l.animations);return l&&l._cacheable&&(s[a]=Object.freeze(c)),c}getSharedOptions(t){if(t.$shared)return this._sharedOptions||(this._sharedOptions=Object.assign({},t))}includeOptions(t,e){return!e||Ai(t)||this.chart._animationsDisabled}updateElement(t,e,i,n){Ai(n)?Object.assign(t,i):this._resolveAnimations(e,n).update(t,i)}updateSharedOptions(t,e,i){t&&!Ai(e)&&this._resolveAnimations(void 0,e).update(t,i)}_setStyle(t,e,i,n){t.active=n;const o=this.getStyle(e,n);this._resolveAnimations(e,i,n).update(t,{options:!n&&this.getSharedOptions(o)||o})}removeHoverStyle(t,e,i){this._setStyle(t,i,"active",!1)}setHoverStyle(t,e,i){this._setStyle(t,i,"active",!0)}_removeDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!1)}_setDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!0)}_resyncElements(t){const e=this,i=e._data,n=e._cachedMeta.data;for(const[t,i,n]of e._syncList)e[t](i,n);e._syncList=[];const o=n.length,s=i.length,a=Math.min(s,o);a&&e.parse(0,a),s>o?e._insertElements(o,s-o,t):s<o&&e._removeElements(s,o-s)}_insertElements(t,e,i=!0){const n=this,o=n._cachedMeta,s=o.data,a=t+e;let r;const l=t=>{for(t.length+=e,r=t.length-1;r>=a;r--)t[r]=t[r-e]};for(l(s),r=t;r<a;++r)s[r]=new n.dataElementType;n._parsing&&l(o._parsed),n.parse(t,e),i&&n.updateElements(s,t,e,"reset")}updateElements(t,e,i,n){}_removeElements(t,e){const i=this._cachedMeta;if(this._parsing){const n=i._parsed.splice(t,e);i._stacked&&Ti(i,n)}i.data.splice(t,e)}_onDataPush(){const t=arguments.length;this._syncList.push(["_insertElements",this.getDataset().data.length-t,t])}_onDataPop(){this._syncList.push(["_removeElements",this._cachedMeta.data.length-1,1])}_onDataShift(){this._syncList.push(["_removeElements",0,1])}_onDataSplice(t,e){this._syncList.push(["_removeElements",t,e]),this._syncList.push(["_insertElements",t,arguments.length-2])}_onDataUnshift(){this._syncList.push(["_insertElements",0,arguments.length])}}Ri.defaults={},Ri.prototype.datasetElementType=null,Ri.prototype.dataElementType=null;class Ei{constructor(){this.x=void 0,this.y=void 0,this.active=!1,this.options=void 0,this.$animations=void 0}tooltipPosition(t){const{x:e,y:i}=this.getProps(["x","y"],t);return{x:e,y:i}}hasValue(){return Tt(this.x)&&Tt(this.y)}getProps(t,e){const i=this,n=this.$animations;if(!e||!n)return i;const o={};return t.forEach((t=>{o[t]=n[t]&&n[t].active()?n[t]._to:i[t]})),o}}Ei.defaults={},Ei.defaultRoutes=void 0;const Ii=new Map;function zi(t,e,i){return function(t,e){e=e||{};const i=t+JSON.stringify(e);let n=Ii.get(i);return n||(n=new Intl.NumberFormat(t,e),Ii.set(i,n)),n}(e,i).format(t)}const Fi={values:t=>Y(t)?t:""+t,numeric(t,e,i){if(0===t)return"0";const n=this.chart.options.locale;let o,s=t;if(i.length>1){const e=Math.max(Math.abs(i[0].value),Math.abs(i[i.length-1].value));(e<1e-4||e>1e15)&&(o="scientific"),s=function(t,e){let i=e.length>3?e[2].value-e[1].value:e[1].value-e[0].value;Math.abs(i)>=1&&t!==Math.floor(t)&&(i=t-Math.floor(t));return i}(t,i)}const a=Pt(Math.abs(s)),r=Math.max(Math.min(-1*Math.floor(a),20),0),l={notation:o,minimumFractionDigits:r,maximumFractionDigits:r};return Object.assign(l,this.options.ticks.format),zi(t,n,l)},logarithmic(t,e,i){if(0===t)return"0";const n=t/Math.pow(10,Math.floor(Pt(t)));return 1===n||2===n||5===n?Fi.numeric.call(this,t,e,i):""}};var Vi={formatters:Fi};function Bi(t,e){const i=t.options.ticks,n=i.maxTicksLimit||function(t){const e=t.options.offset,i=t._tickSize(),n=t._length/i+(e?0:1),o=t._maxLength/i;return Math.floor(Math.min(n,o))}(t),o=i.major.enabled?function(t){const e=[];let i,n;for(i=0,n=t.length;i<n;i++)t[i].major&&e.push(i);return e}(e):[],s=o.length,a=o[0],r=o[s-1],l=[];if(s>n)return function(t,e,i,n){let o,s=0,a=i[0];for(n=Math.ceil(n),o=0;o<t.length;o++)o===a&&(e.push(t[o]),s++,a=i[s*n])}(e,l,o,s/n),l;const c=function(t,e,i){const n=function(t){const e=t.length;let i,n;if(e<2)return!1;for(n=t[0],i=1;i<e;++i)if(t[i]-t[i-1]!==n)return!1;return n}(t),o=e.length/i;if(!n)return Math.max(o,1);const s=Ot(n);for(let t=0,e=s.length-1;t<e;t++){const e=s[t];if(e>o)return e}return Math.max(o,1)}(o,e,n);if(s>0){let t,i;const n=s>1?Math.round((r-a)/(s-1)):null;for(Wi(e,l,c,$(n)?0:a-n,a),t=0,i=s-1;t<i;t++)Wi(e,l,c,o[t],o[t+1]);return Wi(e,l,c,r,$(n)?e.length:r+n),l}return Wi(e,l,c),l}function Wi(t,e,i,n,o){const s=K(n,0),a=Math.min(K(o,t.length),t.length);let r,l,c,h=0;for(i=Math.ceil(i),o&&(r=o-n,i=r/Math.floor(r/i)),c=s;c<0;)h++,c=Math.round(s+h*i);for(l=Math.max(s,0);l<a;l++)l===c&&(e.push(t[l]),h++,c=Math.round(s+h*i))}xt.set("scale",{display:!0,offset:!1,reverse:!1,beginAtZero:!1,bounds:"ticks",grace:0,grid:{display:!0,lineWidth:1,drawBorder:!0,drawOnChartArea:!0,drawTicks:!0,tickLength:8,tickWidth:(t,e)=>e.lineWidth,tickColor:(t,e)=>e.color,offset:!1,borderDash:[],borderDashOffset:0,borderWidth:1},title:{display:!1,text:"",padding:{top:4,bottom:4}},ticks:{minRotation:0,maxRotation:50,mirror:!1,textStrokeWidth:0,textStrokeColor:"",padding:3,display:!0,autoSkip:!0,autoSkipPadding:3,labelOffset:0,callback:Vi.formatters.values,minor:{},major:{},align:"center",crossAlign:"near",showLabelBackdrop:!1,backdropColor:"rgba(255, 255, 255, 0.75)",backdropPadding:2}}),xt.route("scale.ticks","color","","color"),xt.route("scale.grid","color","","borderColor"),xt.route("scale.grid","borderColor","","borderColor"),xt.route("scale.title","color","","color"),xt.describe("scale",{_fallback:!1,_scriptable:t=>!t.startsWith("before")&&!t.startsWith("after")&&"callback"!==t&&"parser"!==t,_indexable:t=>"borderDash"!==t&&"tickBorderDash"!==t}),xt.describe("scales",{_fallback:"scale"}),xt.describe("scale.ticks",{_scriptable:t=>"backdropPadding"!==t&&"callback"!==t,_indexable:t=>"backdropPadding"!==t});const Hi=(t,e,i)=>"top"===e||"left"===e?t[e]+i:t[e]-i;function Ni(t,e){const i=[],n=t.length/e,o=t.length;let s=0;for(;s<o;s+=n)i.push(t[Math.floor(s)]);return i}function ji(t,e,i){const n=t.ticks.length,o=Math.min(e,n-1),s=t._startPixel,a=t._endPixel,r=1e-6;let l,c=t.getPixelForTick(o);if(!(i&&(l=1===n?Math.max(c-s,a-c):0===e?(t.getPixelForTick(1)-c)/2:(c-t.getPixelForTick(o-1))/2,c+=o<e?l:-l,c<s-r||c>a+r)))return c}function $i(t){return t.drawTicks?t.tickLength:0}function Yi(t,e){if(!t.display)return 0;const i=Fe(t.font,e),n=ze(t.padding);return(Y(t.text)?t.text.length:1)*i.lineHeight+n.height}function Ui(t,e,i){let o=n(t);return(i&&"right"!==e||!i&&"right"===e)&&(o=(t=>"left"===t?"right":"right"===t?"left":t)(o)),o}class Xi extends Ei{constructor(t){super(),this.id=t.id,this.type=t.type,this.options=void 0,this.ctx=t.ctx,this.chart=t.chart,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.width=void 0,this.height=void 0,this._margins={left:0,right:0,top:0,bottom:0},this.maxWidth=void 0,this.maxHeight=void 0,this.paddingTop=void 0,this.paddingBottom=void 0,this.paddingLeft=void 0,this.paddingRight=void 0,this.axis=void 0,this.labelRotation=void 0,this.min=void 0,this.max=void 0,this._range=void 0,this.ticks=[],this._gridLineItems=null,this._labelItems=null,this._labelSizes=null,this._length=0,this._maxLength=0,this._longestTextCache={},this._startPixel=void 0,this._endPixel=void 0,this._reversePixels=!1,this._userMax=void 0,this._userMin=void 0,this._suggestedMax=void 0,this._suggestedMin=void 0,this._ticksLength=0,this._borderValue=0,this._cache={},this._dataLimitsCached=!1,this.$context=void 0}init(t){const e=this;e.options=t.setContext(e.getContext()),e.axis=t.axis,e._userMin=e.parse(t.min),e._userMax=e.parse(t.max),e._suggestedMin=e.parse(t.suggestedMin),e._suggestedMax=e.parse(t.suggestedMax)}parse(t,e){return t}getUserBounds(){let{_userMin:t,_userMax:e,_suggestedMin:i,_suggestedMax:n}=this;return t=q(t,Number.POSITIVE_INFINITY),e=q(e,Number.NEGATIVE_INFINITY),i=q(i,Number.POSITIVE_INFINITY),n=q(n,Number.NEGATIVE_INFINITY),{min:q(t,i),max:q(e,n),minDefined:X(t),maxDefined:X(e)}}getMinMax(t){const e=this;let i,{min:n,max:o,minDefined:s,maxDefined:a}=e.getUserBounds();if(s&&a)return{min:n,max:o};const r=e.getMatchingVisibleMetas();for(let l=0,c=r.length;l<c;++l)i=r[l].controller.getMinMax(e,t),s||(n=Math.min(n,i.min)),a||(o=Math.max(o,i.max));return{min:q(n,q(o,n)),max:q(o,q(n,o))}}getPadding(){const t=this;return{left:t.paddingLeft||0,top:t.paddingTop||0,right:t.paddingRight||0,bottom:t.paddingBottom||0}}getTicks(){return this.ticks}getLabels(){const t=this.chart.data;return this.options.labels||(this.isHorizontal()?t.xLabels:t.yLabels)||t.labels||[]}beforeLayout(){this._cache={},this._dataLimitsCached=!1}beforeUpdate(){Q(this.options.beforeUpdate,[this])}update(t,e,i){const n=this,o=n.options.ticks,s=o.sampleSize;n.beforeUpdate(),n.maxWidth=t,n.maxHeight=e,n._margins=i=Object.assign({left:0,right:0,top:0,bottom:0},i),n.ticks=null,n._labelSizes=null,n._gridLineItems=null,n._labelItems=null,n.beforeSetDimensions(),n.setDimensions(),n.afterSetDimensions(),n._maxLength=n.isHorizontal()?n.width+i.left+i.right:n.height+i.top+i.bottom,n._dataLimitsCached||(n.beforeDataLimits(),n.determineDataLimits(),n.afterDataLimits(),n._range=Be(n,n.options.grace),n._dataLimitsCached=!0),n.beforeBuildTicks(),n.ticks=n.buildTicks()||[],n.afterBuildTicks();const a=s<n.ticks.length;n._convertTicksToLabels(a?Ni(n.ticks,s):n.ticks),n.configure(),n.beforeCalculateLabelRotation(),n.calculateLabelRotation(),n.afterCalculateLabelRotation(),o.display&&(o.autoSkip||"auto"===o.source)&&(n.ticks=Bi(n,n.ticks),n._labelSizes=null),a&&n._convertTicksToLabels(n.ticks),n.beforeFit(),n.fit(),n.afterFit(),n.afterUpdate()}configure(){const t=this;let e,i,n=t.options.reverse;t.isHorizontal()?(e=t.left,i=t.right):(e=t.top,i=t.bottom,n=!n),t._startPixel=e,t._endPixel=i,t._reversePixels=n,t._length=i-e,t._alignToPixels=t.options.alignToPixels}afterUpdate(){Q(this.options.afterUpdate,[this])}beforeSetDimensions(){Q(this.options.beforeSetDimensions,[this])}setDimensions(){const t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0}afterSetDimensions(){Q(this.options.afterSetDimensions,[this])}_callHooks(t){const e=this;e.chart.notifyPlugins(t,e.getContext()),Q(e.options[t],[e])}beforeDataLimits(){this._callHooks("beforeDataLimits")}determineDataLimits(){}afterDataLimits(){this._callHooks("afterDataLimits")}beforeBuildTicks(){this._callHooks("beforeBuildTicks")}buildTicks(){return[]}afterBuildTicks(){this._callHooks("afterBuildTicks")}beforeTickToLabelConversion(){Q(this.options.beforeTickToLabelConversion,[this])}generateTickLabels(t){const e=this,i=e.options.ticks;let n,o,s;for(n=0,o=t.length;n<o;n++)s=t[n],s.label=Q(i.callback,[s.value,n,t],e);for(n=0;n<o;n++)$(t[n].label)&&(t.splice(n,1),o--,n--)}afterTickToLabelConversion(){Q(this.options.afterTickToLabelConversion,[this])}beforeCalculateLabelRotation(){Q(this.options.beforeCalculateLabelRotation,[this])}calculateLabelRotation(){const t=this,e=t.options,i=e.ticks,n=t.ticks.length,o=i.minRotation||0,s=i.maxRotation;let a,r,l,c=o;if(!t._isVisible()||!i.display||o>=s||n<=1||!t.isHorizontal())return void(t.labelRotation=o);const h=t._getLabelSizes(),d=h.widest.width,u=h.highest.height,f=Nt(t.chart.width-d,0,t.maxWidth);a=e.offset?t.maxWidth/n:f/(n-1),d+6>a&&(a=f/(n-(e.offset?.5:1)),r=t.maxHeight-$i(e.grid)-i.padding-Yi(e.title,t.chart.options.font),l=Math.sqrt(d*d+u*u),c=It(Math.min(Math.asin(Math.min((h.highest.height+6)/a,1)),Math.asin(Math.min(r/l,1))-Math.asin(u/l))),c=Math.max(o,Math.min(s,c))),t.labelRotation=c}afterCalculateLabelRotation(){Q(this.options.afterCalculateLabelRotation,[this])}beforeFit(){Q(this.options.beforeFit,[this])}fit(){const t=this,e={width:0,height:0},{chart:i,options:{ticks:n,title:o,grid:s}}=t,a=t._isVisible(),r=t.isHorizontal();if(a){const a=Yi(o,i.options.font);if(r?(e.width=t.maxWidth,e.height=$i(s)+a):(e.height=t.maxHeight,e.width=$i(s)+a),n.display&&t.ticks.length){const{first:i,last:o,widest:s,highest:a}=t._getLabelSizes(),l=2*n.padding,c=Et(t.labelRotation),h=Math.cos(c),d=Math.sin(c);if(r){const i=n.mirror?0:d*s.width+h*a.height;e.height=Math.min(t.maxHeight,e.height+i+l)}else{const i=n.mirror?0:h*s.width+d*a.height;e.width=Math.min(t.maxWidth,e.width+i+l)}t._calculatePadding(i,o,d,h)}}t._handleMargins(),r?(t.width=t._length=i.width-t._margins.left-t._margins.right,t.height=e.height):(t.width=e.width,t.height=t._length=i.height-t._margins.top-t._margins.bottom)}_calculatePadding(t,e,i,n){const o=this,{ticks:{align:s,padding:a},position:r}=o.options,l=0!==o.labelRotation,c="top"!==r&&"x"===o.axis;if(o.isHorizontal()){const r=o.getPixelForTick(0)-o.left,h=o.right-o.getPixelForTick(o.ticks.length-1);let d=0,u=0;l?c?(d=n*t.width,u=i*e.height):(d=i*t.height,u=n*e.width):"start"===s?u=e.width:"end"===s?d=t.width:(d=t.width/2,u=e.width/2),o.paddingLeft=Math.max((d-r+a)*o.width/(o.width-r),0),o.paddingRight=Math.max((u-h+a)*o.width/(o.width-h),0)}else{let i=e.height/2,n=t.height/2;"start"===s?(i=0,n=t.height):"end"===s&&(i=e.height,n=0),o.paddingTop=i+a,o.paddingBottom=n+a}}_handleMargins(){const t=this;t._margins&&(t._margins.left=Math.max(t.paddingLeft,t._margins.left),t._margins.top=Math.max(t.paddingTop,t._margins.top),t._margins.right=Math.max(t.paddingRight,t._margins.right),t._margins.bottom=Math.max(t.paddingBottom,t._margins.bottom))}afterFit(){Q(this.options.afterFit,[this])}isHorizontal(){const{axis:t,position:e}=this.options;return"top"===e||"bottom"===e||"x"===t}isFullSize(){return this.options.fullSize}_convertTicksToLabels(t){const e=this;e.beforeTickToLabelConversion(),e.generateTickLabels(t),e.afterTickToLabelConversion()}_getLabelSizes(){const t=this;let e=t._labelSizes;if(!e){const i=t.options.ticks.sampleSize;let n=t.ticks;i<n.length&&(n=Ni(n,i)),t._labelSizes=e=t._computeLabelSizes(n,n.length)}return e}_computeLabelSizes(t,e){const{ctx:i,_longestTextCache:n}=this,o=[],s=[];let a,r,l,c,h,d,u,f,g,p,m,x=0,b=0;for(a=0;a<e;++a){if(c=t[a].label,h=this._resolveTickFontOptions(a),i.font=d=h.string,u=n[d]=n[d]||{data:{},gc:[]},f=h.lineHeight,g=p=0,$(c)||Y(c)){if(Y(c))for(r=0,l=c.length;r<l;++r)m=c[r],$(m)||Y(m)||(g=Yt(i,u.data,u.gc,g,m),p+=f)}else g=Yt(i,u.data,u.gc,g,c),p=f;o.push(g),s.push(p),x=Math.max(g,x),b=Math.max(p,b)}!function(t,e){J(t,(t=>{const i=t.gc,n=i.length/2;let o;if(n>e){for(o=0;o<n;++o)delete t.data[i[o]];i.splice(0,n)}}))}(n,e);const _=o.indexOf(x),y=s.indexOf(b),v=t=>({width:o[t]||0,height:s[t]||0});return{first:v(0),last:v(e-1),widest:v(_),highest:v(y),widths:o,heights:s}}getLabelForValue(t){return t}getPixelForValue(t,e){return NaN}getValueForPixel(t){}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getPixelForDecimal(t){const e=this;e._reversePixels&&(t=1-t);const i=e._startPixel+t*e._length;return jt(e._alignToPixels?Xt(e.chart,i,0):i)}getDecimalForPixel(t){const e=(t-this._startPixel)/this._length;return this._reversePixels?1-e:e}getBasePixel(){return this.getPixelForValue(this.getBaseValue())}getBaseValue(){const{min:t,max:e}=this;return t<0&&e<0?e:t>0&&e>0?t:0}getContext(t){const e=this,i=e.ticks||[];if(t>=0&&t<i.length){const n=i[t];return n.$context||(n.$context=function(t,e,i){return Object.assign(Object.create(t),{tick:i,index:e,type:"tick"})}(e.getContext(),t,n))}return e.$context||(e.$context=(n=e.chart.getContext(),o=e,Object.assign(Object.create(n),{scale:o,type:"scale"})));var n,o}_tickSize(){const t=this,e=t.options.ticks,i=Et(t.labelRotation),n=Math.abs(Math.cos(i)),o=Math.abs(Math.sin(i)),s=t._getLabelSizes(),a=e.autoSkipPadding||0,r=s?s.widest.width+a:0,l=s?s.highest.height+a:0;return t.isHorizontal()?l*n>r*o?r/n:l/o:l*o<r*n?l/n:r/o}_isVisible(){const t=this.options.display;return"auto"!==t?!!t:this.getMatchingVisibleMetas().length>0}_computeGridLineItems(t){const e=this,i=e.axis,n=e.chart,o=e.options,{grid:s,position:a}=o,r=s.offset,l=e.isHorizontal(),c=e.ticks.length+(r?1:0),h=$i(s),d=[],u=s.setContext(e.getContext()),f=u.drawBorder?u.borderWidth:0,g=f/2,p=function(t){return Xt(n,t,f)};let m,x,b,_,y,v,w,M,k,S,P,D;if("top"===a)m=p(e.bottom),v=e.bottom-h,M=m-g,S=p(t.top)+g,D=t.bottom;else if("bottom"===a)m=p(e.top),S=t.top,D=p(t.bottom)-g,v=m+g,M=e.top+h;else if("left"===a)m=p(e.right),y=e.right-h,w=m-g,k=p(t.left)+g,P=t.right;else if("right"===a)m=p(e.left),k=t.left,P=p(t.right)-g,y=m+g,w=e.left+h;else if("x"===i){if("center"===a)m=p((t.top+t.bottom)/2+.5);else if(U(a)){const t=Object.keys(a)[0],i=a[t];m=p(e.chart.scales[t].getPixelForValue(i))}S=t.top,D=t.bottom,v=m+g,M=v+h}else if("y"===i){if("center"===a)m=p((t.left+t.right)/2);else if(U(a)){const t=Object.keys(a)[0],i=a[t];m=p(e.chart.scales[t].getPixelForValue(i))}y=m-g,w=y-h,k=t.left,P=t.right}for(x=0;x<c;++x){const t=s.setContext(e.getContext(x)),i=t.lineWidth,o=t.color,a=s.borderDash||[],c=t.borderDashOffset,h=t.tickWidth,u=t.tickColor,f=t.tickBorderDash||[],g=t.tickBorderDashOffset;b=ji(e,x,r),void 0!==b&&(_=Xt(n,b,i),l?y=w=k=P=_:v=M=S=D=_,d.push({tx1:y,ty1:v,tx2:w,ty2:M,x1:k,y1:S,x2:P,y2:D,width:i,color:o,borderDash:a,borderDashOffset:c,tickWidth:h,tickColor:u,tickBorderDash:f,tickBorderDashOffset:g}))}return e._ticksLength=c,e._borderValue=m,d}_computeLabelItems(t){const e=this,i=e.axis,n=e.options,{position:o,ticks:s}=n,a=e.isHorizontal(),r=e.ticks,{align:l,crossAlign:c,padding:h,mirror:d}=s,u=$i(n.grid),f=u+h,g=d?-h:f,p=-Et(e.labelRotation),m=[];let x,b,_,y,v,w,M,k,S,P,D,C,O="middle";if("top"===o)w=e.bottom-g,M=e._getXAxisLabelAlignment();else if("bottom"===o)w=e.top+g,M=e._getXAxisLabelAlignment();else if("left"===o){const t=e._getYAxisLabelAlignment(u);M=t.textAlign,v=t.x}else if("right"===o){const t=e._getYAxisLabelAlignment(u);M=t.textAlign,v=t.x}else if("x"===i){if("center"===o)w=(t.top+t.bottom)/2+f;else if(U(o)){const t=Object.keys(o)[0],i=o[t];w=e.chart.scales[t].getPixelForValue(i)+f}M=e._getXAxisLabelAlignment()}else if("y"===i){if("center"===o)v=(t.left+t.right)/2-f;else if(U(o)){const t=Object.keys(o)[0],i=o[t];v=e.chart.scales[t].getPixelForValue(i)}M=e._getYAxisLabelAlignment(u).textAlign}"y"===i&&("start"===l?O="top":"end"===l&&(O="bottom"));const T=e._getLabelSizes();for(x=0,b=r.length;x<b;++x){_=r[x],y=_.label;const t=s.setContext(e.getContext(x));k=e.getPixelForTick(x)+s.labelOffset,S=e._resolveTickFontOptions(x),P=S.lineHeight,D=Y(y)?y.length:1;const i=D/2,n=t.color,l=t.textStrokeColor,h=t.textStrokeWidth;let u;if(a?(v=k,C="top"===o?"near"===c||0!==p?-D*P+P/2:"center"===c?-T.highest.height/2-i*P+P:-T.highest.height+P/2:"near"===c||0!==p?P/2:"center"===c?T.highest.height/2-i*P:T.highest.height-D*P,d&&(C*=-1)):(w=k,C=(1-D)*P/2),t.showLabelBackdrop){const e=ze(t.backdropPadding),i=T.heights[x],n=T.widths[x];let o=w+C-e.top,s=v-e.left;switch(O){case"middle":o-=i/2;break;case"bottom":o-=i}switch(M){case"center":s-=n/2;break;case"right":s-=n}u={left:s,top:o,width:n+e.width,height:i+e.height,color:t.backdropColor}}m.push({rotation:p,label:y,font:S,color:n,strokeColor:l,strokeWidth:h,textOffset:C,textAlign:M,textBaseline:O,translation:[v,w],backdrop:u})}return m}_getXAxisLabelAlignment(){const{position:t,ticks:e}=this.options;if(-Et(this.labelRotation))return"top"===t?"left":"right";let i="center";return"start"===e.align?i="left":"end"===e.align&&(i="right"),i}_getYAxisLabelAlignment(t){const e=this,{position:i,ticks:{crossAlign:n,mirror:o,padding:s}}=e.options,a=t+s,r=e._getLabelSizes().widest.width;let l,c;return"left"===i?o?(l="left",c=e.right+s):(c=e.right-a,"near"===n?l="right":"center"===n?(l="center",c-=r/2):(l="left",c=e.left)):"right"===i?o?(l="right",c=e.left+s):(c=e.left+a,"near"===n?l="left":"center"===n?(l="center",c+=r/2):(l="right",c=e.right)):l="right",{textAlign:l,x:c}}_computeLabelArea(){const t=this;if(t.options.ticks.mirror)return;const e=t.chart,i=t.options.position;return"left"===i||"right"===i?{top:0,left:t.left,bottom:e.height,right:t.right}:"top"===i||"bottom"===i?{top:t.top,left:0,bottom:t.bottom,right:e.width}:void 0}drawBackground(){const{ctx:t,options:{backgroundColor:e},left:i,top:n,width:o,height:s}=this;e&&(t.save(),t.fillStyle=e,t.fillRect(i,n,o,s),t.restore())}getLineWidthForValue(t){const e=this,i=e.options.grid;if(!e._isVisible()||!i.display)return 0;const n=e.ticks.findIndex((e=>e.value===t));if(n>=0){return i.setContext(e.getContext(n)).lineWidth}return 0}drawGrid(t){const e=this,i=e.options.grid,n=e.ctx,o=e._gridLineItems||(e._gridLineItems=e._computeGridLineItems(t));let s,a;const r=(t,e,i)=>{i.width&&i.color&&(n.save(),n.lineWidth=i.width,n.strokeStyle=i.color,n.setLineDash(i.borderDash||[]),n.lineDashOffset=i.borderDashOffset,n.beginPath(),n.moveTo(t.x,t.y),n.lineTo(e.x,e.y),n.stroke(),n.restore())};if(i.display)for(s=0,a=o.length;s<a;++s){const t=o[s];i.drawOnChartArea&&r({x:t.x1,y:t.y1},{x:t.x2,y:t.y2},t),i.drawTicks&&r({x:t.tx1,y:t.ty1},{x:t.tx2,y:t.ty2},{color:t.tickColor,width:t.tickWidth,borderDash:t.tickBorderDash,borderDashOffset:t.tickBorderDashOffset})}}drawBorder(){const t=this,{chart:e,ctx:i,options:{grid:n}}=t,o=n.setContext(t.getContext()),s=n.drawBorder?o.borderWidth:0;if(!s)return;const a=n.setContext(t.getContext(0)).lineWidth,r=t._borderValue;let l,c,h,d;t.isHorizontal()?(l=Xt(e,t.left,s)-s/2,c=Xt(e,t.right,a)+a/2,h=d=r):(h=Xt(e,t.top,s)-s/2,d=Xt(e,t.bottom,a)+a/2,l=c=r),i.save(),i.lineWidth=o.borderWidth,i.strokeStyle=o.borderColor,i.beginPath(),i.moveTo(l,h),i.lineTo(c,d),i.stroke(),i.restore()}drawLabels(t){const e=this;if(!e.options.ticks.display)return;const i=e.ctx,n=e._computeLabelArea();n&&Zt(i,n);const o=e._labelItems||(e._labelItems=e._computeLabelItems(t));let s,a;for(s=0,a=o.length;s<a;++s){const t=o[s],e=t.font,n=t.label;t.backdrop&&(i.fillStyle=t.backdrop.color,i.fillRect(t.backdrop.left,t.backdrop.top,t.backdrop.width,t.backdrop.height)),ee(i,n,0,t.textOffset,e,t)}n&&Qt(i)}drawTitle(){const{ctx:t,options:{position:e,title:i,reverse:n}}=this;if(!i.display)return;const s=Fe(i.font),a=ze(i.padding),r=i.align;let l=s.lineHeight/2;"bottom"===e?(l+=a.bottom,Y(i.text)&&(l+=s.lineHeight*(i.text.length-1))):l+=a.top;const{titleX:c,titleY:h,maxWidth:d,rotation:u}=function(t,e,i,n){const{top:s,left:a,bottom:r,right:l}=t;let c,h,d,u=0;return t.isHorizontal()?(h=o(n,a,l),d=Hi(t,i,e),c=l-a):(h=Hi(t,i,e),d=o(n,r,s),u="left"===i?-Mt:Mt),{titleX:h,titleY:d,maxWidth:c,rotation:u}}(this,l,e,r);ee(t,i.text,0,0,s,{color:i.color,maxWidth:d,rotation:u,textAlign:Ui(r,e,n),textBaseline:"middle",translation:[c,h]})}draw(t){const e=this;e._isVisible()&&(e.drawBackground(),e.drawGrid(t),e.drawBorder(),e.drawTitle(),e.drawLabels(t))}_layers(){const t=this,e=t.options,i=e.ticks&&e.ticks.z||0,n=e.grid&&e.grid.z||0;return t._isVisible()&&t.draw===Xi.prototype.draw?[{z:n,draw(e){t.drawBackground(),t.drawGrid(e),t.drawTitle()}},{z:n+1,draw(){t.drawBorder()}},{z:i,draw(e){t.drawLabels(e)}}]:[{z:i,draw(e){t.draw(e)}}]}getMatchingVisibleMetas(t){const e=this,i=e.chart.getSortedVisibleDatasetMetas(),n=e.axis+"AxisID",o=[];let s,a;for(s=0,a=i.length;s<a;++s){const a=i[s];a[n]!==e.id||t&&a.type!==t||o.push(a)}return o}_resolveTickFontOptions(t){return Fe(this.options.ticks.setContext(this.getContext(t)).font)}_maxDigits(){const t=this,e=t._resolveTickFontOptions(0).lineHeight;return(t.isHorizontal()?t.width:t.height)/e}}function qi(t,e=[""],i=t,n,o=(()=>t[0])){ht(n)||(n=an("_fallback",t));const s={[Symbol.toStringTag]:"Object",_cacheable:!0,_scopes:t,_rootScopes:i,_fallback:n,_getTarget:o,override:o=>qi([o,...t],e,i,n)};return new Proxy(s,{deleteProperty:(e,i)=>(delete e[i],delete e._keys,delete t[0][i],!0),get:(i,n)=>Ji(i,n,(()=>function(t,e,i,n){let o;for(const s of e)if(o=an(Zi(s,t),i),ht(o))return Qi(t,o)?on(i,n,t,o):o}(n,e,t,i))),getOwnPropertyDescriptor:(t,e)=>Reflect.getOwnPropertyDescriptor(t._scopes[0],e),getPrototypeOf:()=>Reflect.getPrototypeOf(t[0]),has:(t,e)=>rn(t).includes(e),ownKeys:t=>rn(t),set:(t,e,i)=>((t._storage||(t._storage=o()))[e]=i,delete t[e],delete t._keys,!0)})}function Ki(t,e,i,n){const o={_cacheable:!1,_proxy:t,_context:e,_subProxy:i,_stack:new Set,_descriptors:Gi(t,n),setContext:e=>Ki(t,e,i,n),override:o=>Ki(t.override(o),e,i,n)};return new Proxy(o,{deleteProperty:(e,i)=>(delete e[i],delete t[i],!0),get:(t,e,i)=>Ji(t,e,(()=>function(t,e,i){const{_proxy:n,_context:o,_subProxy:s,_descriptors:a}=t;let r=n[e];dt(r)&&a.isScriptable(e)&&(r=function(t,e,i,n){const{_proxy:o,_context:s,_subProxy:a,_stack:r}=i;if(r.has(t))throw new Error("Recursion detected: "+[...r].join("->")+"->"+t);r.add(t),e=e(s,a||n),r.delete(t),U(e)&&(e=on(o._scopes,o,t,e));return e}(e,r,t,i));Y(r)&&r.length&&(r=function(t,e,i,n){const{_proxy:o,_context:s,_subProxy:a,_descriptors:r}=i;if(ht(s.index)&&n(t))e=e[s.index%e.length];else if(U(e[0])){const i=e,n=o._scopes.filter((t=>t!==i));e=[];for(const l of i){const i=on(n,o,t,l);e.push(Ki(i,s,a&&a[t],r))}}return e}(e,r,t,a.isIndexable));Qi(e,r)&&(r=Ki(r,o,s&&s[e],a));return r}(t,e,i))),getOwnPropertyDescriptor:(e,i)=>e._descriptors.allKeys?Reflect.has(t,i)?{enumerable:!0,configurable:!0}:void 0:Reflect.getOwnPropertyDescriptor(t,i),getPrototypeOf:()=>Reflect.getPrototypeOf(t),has:(e,i)=>Reflect.has(t,i),ownKeys:()=>Reflect.ownKeys(t),set:(e,i,n)=>(t[i]=n,delete e[i],!0)})}function Gi(t,e={scriptable:!0,indexable:!0}){const{_scriptable:i=e.scriptable,_indexable:n=e.indexable,_allKeys:o=e.allKeys}=t;return{allKeys:o,scriptable:i,indexable:n,isScriptable:dt(i)?i:()=>i,isIndexable:dt(n)?n:()=>n}}const Zi=(t,e)=>t?t+ct(e):e,Qi=(t,e)=>U(e)&&"adapters"!==t;function Ji(t,e,i){let n=t[e];return ht(n)||(n=i(),ht(n)&&(t[e]=n)),n}function tn(t,e,i){return dt(t)?t(e,i):t}const en=(t,e)=>!0===t?e:"string"==typeof t?lt(e,t):void 0;function nn(t,e,i,n){for(const o of e){const e=en(i,o);if(e){t.add(e);const o=tn(e._fallback,i,e);if(ht(o)&&o!==i&&o!==n)return o}else if(!1===e&&ht(n)&&i!==n)return null}return!1}function on(t,e,i,n){const o=e._rootScopes,s=tn(e._fallback,i,n),a=[...t,...o],r=new Set;r.add(n);let l=sn(r,a,i,s||i);return null!==l&&((!ht(s)||s===i||(l=sn(r,a,s,l),null!==l))&&qi([...r],[""],o,s,(()=>function(t,e,i){const n=t._getTarget();e in n||(n[e]={});const o=n[e];if(Y(o)&&U(i))return i;return o}(e,i,n))))}function sn(t,e,i,n){for(;i;)i=nn(t,e,i,n);return i}function an(t,e){for(const i of e){if(!i)continue;const e=i[t];if(ht(e))return e}}function rn(t){let e=t._keys;return e||(e=t._keys=function(t){const e=new Set;for(const i of t)for(const t of Object.keys(i).filter((t=>!t.startsWith("_"))))e.add(t);return[...e]}(t._scopes)),e}const ln=Number.EPSILON||1e-14,cn=(t,e)=>e<t.length&&!t[e].skip&&t[e],hn=t=>"x"===t?"y":"x";function dn(t,e,i,n){const o=t.skip?e:t,s=e,a=i.skip?e:i,r=Vt(s,o),l=Vt(a,s);let c=r/(r+l),h=l/(r+l);c=isNaN(c)?0:c,h=isNaN(h)?0:h;const d=n*c,u=n*h;return{previous:{x:s.x-d*(a.x-o.x),y:s.y-d*(a.y-o.y)},next:{x:s.x+u*(a.x-o.x),y:s.y+u*(a.y-o.y)}}}function un(t,e="x"){const i=hn(e),n=t.length,o=Array(n).fill(0),s=Array(n);let a,r,l,c=cn(t,0);for(a=0;a<n;++a)if(r=l,l=c,c=cn(t,a+1),l){if(c){const t=c[e]-l[e];o[a]=0!==t?(c[i]-l[i])/t:0}s[a]=r?c?Dt(o[a-1])!==Dt(o[a])?0:(o[a-1]+o[a])/2:o[a-1]:o[a]}!function(t,e,i){const n=t.length;let o,s,a,r,l,c=cn(t,0);for(let h=0;h<n-1;++h)l=c,c=cn(t,h+1),l&&c&&(At(e[h],0,ln)?i[h]=i[h+1]=0:(o=i[h]/e[h],s=i[h+1]/e[h],r=Math.pow(o,2)+Math.pow(s,2),r<=9||(a=3/Math.sqrt(r),i[h]=o*a*e[h],i[h+1]=s*a*e[h])))}(t,o,s),function(t,e,i="x"){const n=hn(i),o=t.length;let s,a,r,l=cn(t,0);for(let c=0;c<o;++c){if(a=r,r=l,l=cn(t,c+1),!r)continue;const o=r[i],h=r[n];a&&(s=(o-a[i])/3,r[`cp1${i}`]=o-s,r[`cp1${n}`]=h-s*e[c]),l&&(s=(l[i]-o)/3,r[`cp2${i}`]=o+s,r[`cp2${n}`]=h+s*e[c])}}(t,s,e)}function fn(t,e,i){return Math.max(Math.min(t,i),e)}function gn(t,e,i,n,o){let s,a,r,l;if(e.spanGaps&&(t=t.filter((t=>!t.skip))),"monotone"===e.cubicInterpolationMode)un(t,o);else{let i=n?t[t.length-1]:t[0];for(s=0,a=t.length;s<a;++s)r=t[s],l=dn(i,r,t[Math.min(s+1,a-(n?0:1))%a],e.tension),r.cp1x=l.previous.x,r.cp1y=l.previous.y,r.cp2x=l.next.x,r.cp2y=l.next.y,i=r}e.capBezierPoints&&function(t,e){let i,n,o,s,a,r=Gt(t[0],e);for(i=0,n=t.length;i<n;++i)a=s,s=r,r=i<n-1&&Gt(t[i+1],e),s&&(o=t[i],a&&(o.cp1x=fn(o.cp1x,e.left,e.right),o.cp1y=fn(o.cp1y,e.top,e.bottom)),r&&(o.cp2x=fn(o.cp2x,e.left,e.right),o.cp2y=fn(o.cp2y,e.top,e.bottom)))}(t,i)}function pn(t,e,i,n){return{x:t.x+i*(e.x-t.x),y:t.y+i*(e.y-t.y)}}function mn(t,e,i,n){return{x:t.x+i*(e.x-t.x),y:"middle"===n?i<.5?t.y:e.y:"after"===n?i<1?t.y:e.y:i>0?e.y:t.y}}function xn(t,e,i,n){const o={x:t.cp2x,y:t.cp2y},s={x:e.cp1x,y:e.cp1y},a=pn(t,o,i),r=pn(o,s,i),l=pn(s,e,i),c=pn(a,r,i),h=pn(r,l,i);return pn(c,h,i)}function bn(t,e,i){return t?function(t,e){return{x:i=>t+t+e-i,setWidth(t){e=t},textAlign:t=>"center"===t?t:"right"===t?"left":"right",xPlus:(t,e)=>t-e,leftForLtr:(t,e)=>t-e}}(e,i):{x:t=>t,setWidth(t){},textAlign:t=>t,xPlus:(t,e)=>t+e,leftForLtr:(t,e)=>t}}function _n(t,e){let i,n;"ltr"!==e&&"rtl"!==e||(i=t.canvas.style,n=[i.getPropertyValue("direction"),i.getPropertyPriority("direction")],i.setProperty("direction",e,"important"),t.prevTextDirection=n)}function yn(t,e){void 0!==e&&(delete t.prevTextDirection,t.canvas.style.setProperty("direction",e[0],e[1]))}function vn(t){return"angle"===t?{between:Ht,compare:Bt,normalize:Wt}:{between:(t,e,i)=>t>=Math.min(e,i)&&t<=Math.max(i,e),compare:(t,e)=>t-e,normalize:t=>t}}function wn({start:t,end:e,count:i,loop:n,style:o}){return{start:t%i,end:e%i,loop:n&&(e-t+1)%i==0,style:o}}function Mn(t,e,i){if(!i)return[t];const{property:n,start:o,end:s}=i,a=e.length,{compare:r,between:l,normalize:c}=vn(n),{start:h,end:d,loop:u,style:f}=function(t,e,i){const{property:n,start:o,end:s}=i,{between:a,normalize:r}=vn(n),l=e.length;let c,h,{start:d,end:u,loop:f}=t;if(f){for(d+=l,u+=l,c=0,h=l;c<h&&a(r(e[d%l][n]),o,s);++c)d--,u--;d%=l,u%=l}return u<d&&(u+=l),{start:d,end:u,loop:f,style:t.style}}(t,e,i),g=[];let p,m,x,b=!1,_=null;const y=()=>b||l(o,x,p)&&0!==r(o,x),v=()=>!b||0===r(s,p)||l(s,x,p);for(let t=h,i=h;t<=d;++t)m=e[t%a],m.skip||(p=c(m[n]),p!==x&&(b=l(p,o,s),null===_&&y()&&(_=0===r(p,o)?t:i),null!==_&&v()&&(g.push(wn({start:_,end:t,loop:u,count:a,style:f})),_=null),i=t,x=p));return null!==_&&g.push(wn({start:_,end:d,loop:u,count:a,style:f})),g}function kn(t,e){const i=[],n=t.segments;for(let o=0;o<n.length;o++){const s=Mn(n[o],t.points,e);s.length&&i.push(...s)}return i}function Sn(t,e){const i=t.points,n=t.options.spanGaps,o=i.length;if(!o)return[];const s=!!t._loop,{start:a,end:r}=function(t,e,i,n){let o=0,s=e-1;if(i&&!n)for(;o<e&&!t[o].skip;)o++;for(;o<e&&t[o].skip;)o++;for(o%=e,i&&(s+=o);s>o&&t[s%e].skip;)s--;return s%=e,{start:o,end:s}}(i,o,s,n);if(!0===n)return Pn([{start:a,end:r,loop:s}],i,e);return Pn(function(t,e,i,n){const o=t.length,s=[];let a,r=e,l=t[e];for(a=e+1;a<=i;++a){const i=t[a%o];i.skip||i.stop?l.skip||(n=!1,s.push({start:e%o,end:(a-1)%o,loop:n}),e=r=i.stop?a:null):(r=a,l.skip&&(e=a)),l=i}return null!==r&&s.push({start:e%o,end:r%o,loop:n}),s}(i,a,r<a?r+o:r,!!t._fullLoop&&0===a&&r===o-1),i,e)}function Pn(t,e,i){return i&&i.setContext&&e?function(t,e,i){const n=e.length,o=[];let s=t[0].start,a=s;for(const r of t){let t,l,c=e[s%n];for(a=s+1;a<=r.end;a++){const h=e[a%n];l=Dn(i.setContext({type:"segment",p0:c,p1:h})),Cn(l,t)&&(o.push({start:s,end:a-1,loop:r.loop,style:t}),t=l,s=a-1),c=h,t=l}s<a-1&&(o.push({start:s,end:a-1,loop:r.loop,style:l}),s=a-1)}return o}(t,e,i):t}function Dn(t){return{backgroundColor:t.backgroundColor,borderCapStyle:t.borderCapStyle,borderDash:t.borderDash,borderDashOffset:t.borderDashOffset,borderJoinStyle:t.borderJoinStyle,borderWidth:t.borderWidth,borderColor:t.borderColor}}function Cn(t,e){return e&&JSON.stringify(t)!==JSON.stringify(e)}var On=Object.freeze({__proto__:null,easingEffects:mi,color:W,getHoverColor:H,noop:N,uid:j,isNullOrUndef:$,isArray:Y,isObject:U,isFinite:X,finiteOrDefault:q,valueOrDefault:K,toPercentage:G,toDimension:Z,callback:Q,each:J,_elementsEqual:tt,clone:et,_merger:nt,merge:ot,mergeIf:st,_mergerIf:at,_deprecated:function(t,e,i,n){void 0!==e&&console.warn(t+': "'+i+'" is deprecated. Please use "'+n+'" instead')},resolveObjectKey:lt,_capitalize:ct,defined:ht,isFunction:dt,setsEqual:ut,toFontString:$t,_measureText:Yt,_longestText:Ut,_alignPixel:Xt,clearCanvas:qt,drawPoint:Kt,_isPointInArea:Gt,clipArea:Zt,unclipArea:Qt,_steppedLineTo:Jt,_bezierCurveTo:te,renderText:ee,addRoundedRectPath:ie,_lookup:ne,_lookupByKey:oe,_rlookupByKey:se,_filterBetween:ae,listenArrayEvents:le,unlistenArrayEvents:ce,_arrayUnique:he,_createResolver:qi,_attachContext:Ki,_descriptors:Gi,splineCurve:dn,splineCurveMonotone:un,_updateBezierControlPoints:gn,_getParentNode:de,getStyle:ge,getRelativePosition:xe,getMaximumSize:_e,retinaScale:ye,supportsEventListenerOptions:ve,readUsedSize:we,fontString:function(t,e,i){return e+" "+t+"px "+i},requestAnimFrame:t,throttled:e,debounce:i,_toLeftRightCenter:n,_alignStartEnd:o,_textX:s,_pointInLine:pn,_steppedInterpolation:mn,_bezierInterpolation:xn,formatNumber:zi,toLineHeight:Le,_readValueToProps:Re,toTRBL:Ee,toTRBLCorners:Ie,toPadding:ze,toFont:Fe,resolve:Ve,_addGrace:Be,PI:bt,TAU:_t,PITAU:yt,INFINITY:vt,RAD_PER_DEG:wt,HALF_PI:Mt,QUARTER_PI:kt,TWO_THIRDS_PI:St,log10:Pt,sign:Dt,niceNum:Ct,_factorize:Ot,isNumber:Tt,almostEquals:At,almostWhole:Lt,_setMinAndMaxByKey:Rt,toRadians:Et,toDegrees:It,_decimalPlaces:zt,getAngleFromPoint:Ft,distanceBetweenPoints:Vt,_angleDiff:Bt,_normalizeAngle:Wt,_angleBetween:Ht,_limitValue:Nt,_int16Range:jt,getRtlAdapter:bn,overrideTextDirection:_n,restoreTextDirection:yn,_boundSegment:Mn,_boundSegments:kn,_computeSegments:Sn});class Tn{constructor(t,e,i){this.type=t,this.scope=e,this.override=i,this.items=Object.create(null)}isForType(t){return Object.prototype.isPrototypeOf.call(this.type.prototype,t.prototype)}register(t){const e=this,i=Object.getPrototypeOf(t);let n;(function(t){return"id"in t&&"defaults"in t})(i)&&(n=e.register(i));const o=e.items,s=t.id,a=e.scope+"."+s;if(!s)throw new Error("class does not have id: "+t);return s in o||(o[s]=t,function(t,e,i){const n=ot(Object.create(null),[i?xt.get(i):{},xt.get(e),t.defaults]);xt.set(e,n),t.defaultRoutes&&function(t,e){Object.keys(e).forEach((i=>{const n=i.split("."),o=n.pop(),s=[t].concat(n).join("."),a=e[i].split("."),r=a.pop(),l=a.join(".");xt.route(s,o,l,r)}))}(e,t.defaultRoutes);t.descriptors&&xt.describe(e,t.descriptors)}(t,a,n),e.override&&xt.override(t.id,t.overrides)),a}get(t){return this.items[t]}unregister(t){const e=this.items,i=t.id,n=this.scope;i in e&&delete e[i],n&&i in xt[n]&&(delete xt[n][i],this.override&&delete ft[i])}}var An=new class{constructor(){this.controllers=new Tn(Ri,"datasets",!0),this.elements=new Tn(Ei,"elements"),this.plugins=new Tn(Object,"plugins"),this.scales=new Tn(Xi,"scales"),this._typedRegistries=[this.controllers,this.scales,this.elements]}add(...t){this._each("register",t)}remove(...t){this._each("unregister",t)}addControllers(...t){this._each("register",t,this.controllers)}addElements(...t){this._each("register",t,this.elements)}addPlugins(...t){this._each("register",t,this.plugins)}addScales(...t){this._each("register",t,this.scales)}getController(t){return this._get(t,this.controllers,"controller")}getElement(t){return this._get(t,this.elements,"element")}getPlugin(t){return this._get(t,this.plugins,"plugin")}getScale(t){return this._get(t,this.scales,"scale")}removeControllers(...t){this._each("unregister",t,this.controllers)}removeElements(...t){this._each("unregister",t,this.elements)}removePlugins(...t){this._each("unregister",t,this.plugins)}removeScales(...t){this._each("unregister",t,this.scales)}_each(t,e,i){const n=this;[...e].forEach((e=>{const o=i||n._getRegistryForType(e);i||o.isForType(e)||o===n.plugins&&e.id?n._exec(t,o,e):J(e,(e=>{const o=i||n._getRegistryForType(e);n._exec(t,o,e)}))}))}_exec(t,e,i){const n=ct(t);Q(i["before"+n],[],i),e[t](i),Q(i["after"+n],[],i)}_getRegistryForType(t){for(let e=0;e<this._typedRegistries.length;e++){const i=this._typedRegistries[e];if(i.isForType(t))return i}return this.plugins}_get(t,e,i){const n=e.get(t);if(void 0===n)throw new Error('"'+t+'" is not a registered '+i+".");return n}};class Ln{constructor(){this._init=[]}notify(t,e,i,n){const o=this;"beforeInit"===e&&(o._init=o._createDescriptors(t,!0),o._notify(o._init,t,"install"));const s=n?o._descriptors(t).filter(n):o._descriptors(t),a=o._notify(s,t,e,i);return"destroy"===e&&(o._notify(s,t,"stop"),o._notify(o._init,t,"uninstall")),a}_notify(t,e,i,n){n=n||{};for(const o of t){const t=o.plugin;if(!1===Q(t[i],[e,n,o.options],t)&&n.cancelable)return!1}return!0}invalidate(){$(this._cache)||(this._oldCache=this._cache,this._cache=void 0)}_descriptors(t){if(this._cache)return this._cache;const e=this._cache=this._createDescriptors(t);return this._notifyStateChanges(t),e}_createDescriptors(t,e){const i=t&&t.config,n=K(i.options&&i.options.plugins,{}),o=function(t){const e=[],i=Object.keys(An.plugins.items);for(let t=0;t<i.length;t++)e.push(An.getPlugin(i[t]));const n=t.plugins||[];for(let t=0;t<n.length;t++){const i=n[t];-1===e.indexOf(i)&&e.push(i)}return e}(i);return!1!==n||e?function(t,e,i,n){const o=[],s=t.getContext();for(let a=0;a<e.length;a++){const r=e[a],l=Rn(i[r.id],n);null!==l&&o.push({plugin:r,options:En(t.config,r,l,s)})}return o}(t,o,n,e):[]}_notifyStateChanges(t){const e=this._oldCache||[],i=this._cache,n=(t,e)=>t.filter((t=>!e.some((e=>t.plugin.id===e.plugin.id))));this._notify(n(e,i),t,"stop"),this._notify(n(i,e),t,"start")}}function Rn(t,e){return e||!1!==t?!0===t?{}:t:null}function En(t,e,i,n){const o=t.pluginScopeKeys(e),s=t.getOptionScopes(i,o);return t.createResolver(s,n,[""],{scriptable:!1,indexable:!1,allKeys:!0})}function In(t,e){const i=xt.datasets[t]||{};return((e.datasets||{})[t]||{}).indexAxis||e.indexAxis||i.indexAxis||"x"}function zn(t,e){return"x"===t||"y"===t?t:e.axis||("top"===(i=e.position)||"bottom"===i?"x":"left"===i||"right"===i?"y":void 0)||t.charAt(0).toLowerCase();var i}function Fn(t){const e=t.options||(t.options={});e.plugins=K(e.plugins,{}),e.scales=function(t,e){const i=ft[t.type]||{scales:{}},n=e.scales||{},o=In(t.type,e),s=Object.create(null),a=Object.create(null);return Object.keys(n).forEach((t=>{const e=n[t],r=zn(t,e),l=function(t,e){return t===e?"_index_":"_value_"}(r,o),c=i.scales||{};s[r]=s[r]||t,a[t]=st(Object.create(null),[{axis:r},e,c[r],c[l]])})),t.data.datasets.forEach((i=>{const o=i.type||t.type,r=i.indexAxis||In(o,e),l=(ft[o]||{}).scales||{};Object.keys(l).forEach((t=>{const e=function(t,e){let i=t;return"_index_"===t?i=e:"_value_"===t&&(i="x"===e?"y":"x"),i}(t,r),o=i[e+"AxisID"]||s[e]||e;a[o]=a[o]||Object.create(null),st(a[o],[{axis:e},n[o],l[t]])}))})),Object.keys(a).forEach((t=>{const e=a[t];st(e,[xt.scales[e.type],xt.scale])})),a}(t,e)}function Vn(t){return(t=t||{}).datasets=t.datasets||[],t.labels=t.labels||[],t}const Bn=new Map,Wn=new Set;function Hn(t,e){let i=Bn.get(t);return i||(i=e(),Bn.set(t,i),Wn.add(i)),i}const Nn=(t,e,i)=>{const n=lt(e,i);void 0!==n&&t.add(n)};class jn{constructor(t){this._config=function(t){return(t=t||{}).data=Vn(t.data),Fn(t),t}(t),this._scopeCache=new Map,this._resolverCache=new Map}get type(){return this._config.type}set type(t){this._config.type=t}get data(){return this._config.data}set data(t){this._config.data=Vn(t)}get options(){return this._config.options}set options(t){this._config.options=t}get plugins(){return this._config.plugins}update(){const t=this._config;this.clearCache(),Fn(t)}clearCache(){this._scopeCache.clear(),this._resolverCache.clear()}datasetScopeKeys(t){return Hn(t,(()=>[[`datasets.${t}`,""]]))}datasetAnimationScopeKeys(t,e){return Hn(`${t}.transition.${e}`,(()=>[[`datasets.${t}.transitions.${e}`,`transitions.${e}`],[`datasets.${t}`,""]]))}datasetElementScopeKeys(t,e){return Hn(`${t}-${e}`,(()=>[[`datasets.${t}.elements.${e}`,`datasets.${t}`,`elements.${e}`,""]]))}pluginScopeKeys(t){const e=t.id;return Hn(`${this.type}-plugin-${e}`,(()=>[[`plugins.${e}`,...t.additionalOptionScopes||[]]]))}_cachedScopes(t,e){const i=this._scopeCache;let n=i.get(t);return n&&!e||(n=new Map,i.set(t,n)),n}getOptionScopes(t,e,i){const{options:n,type:o}=this,s=this._cachedScopes(t,i),a=s.get(e);if(a)return a;const r=new Set;e.forEach((e=>{t&&(r.add(t),e.forEach((e=>Nn(r,t,e)))),e.forEach((t=>Nn(r,n,t))),e.forEach((t=>Nn(r,ft[o]||{},t))),e.forEach((t=>Nn(r,xt,t))),e.forEach((t=>Nn(r,gt,t)))}));const l=[...r];return Wn.has(e)&&s.set(e,l),l}chartOptionScopes(){const{options:t,type:e}=this;return[t,ft[e]||{},xt.datasets[e]||{},{type:e},xt,gt]}resolveNamedOptions(t,e,i,n=[""]){const o={$shared:!0},{resolver:s,subPrefixes:a}=$n(this._resolverCache,t,n);let r=s;if(function(t,e){const{isScriptable:i,isIndexable:n}=Gi(t);for(const o of e)if(i(o)&&dt(t[o])||n(o)&&Y(t[o]))return!0;return!1}(s,e)){o.$shared=!1;r=Ki(s,i=dt(i)?i():i,this.createResolver(t,i,a))}for(const t of e)o[t]=r[t];return o}createResolver(t,e,i=[""],n){const{resolver:o}=$n(this._resolverCache,t,i);return U(e)?Ki(o,e,void 0,n):o}}function $n(t,e,i){let n=t.get(e);n||(n=new Map,t.set(e,n));const o=i.join();let s=n.get(o);if(!s){s={resolver:qi(e,i),subPrefixes:i.filter((t=>!t.toLowerCase().includes("hover")))},n.set(o,s)}return s}const Yn=["top","bottom","left","right","chartArea"];function Un(t,e){return"top"===t||"bottom"===t||-1===Yn.indexOf(t)&&"x"===e}function Xn(t,e){return function(i,n){return i[t]===n[t]?i[e]-n[e]:i[t]-n[t]}}function qn(t){const e=t.chart,i=e.options.animation;e.notifyPlugins("afterRender"),Q(i&&i.onComplete,[t],e)}function Kn(t){const e=t.chart,i=e.options.animation;Q(i&&i.onProgress,[t],e)}function Gn(){return"undefined"!=typeof window&&"undefined"!=typeof document}function Zn(t){return Gn()&&"string"==typeof t?t=document.getElementById(t):t&&t.length&&(t=t[0]),t&&t.canvas&&(t=t.canvas),t}const Qn={},Jn=t=>{const e=Zn(t);return Object.values(Qn).filter((t=>t.canvas===e)).pop()};class to{constructor(t,e){const n=this;this.config=e=new jn(e);const o=Zn(t),s=Jn(o);if(s)throw new Error("Canvas is already in use. Chart with ID '"+s.id+"' must be destroyed before the canvas can be reused.");const r=e.createResolver(e.chartOptionScopes(),n.getContext());this.platform=n._initializePlatform(o,e);const l=n.platform.acquireContext(o,r.aspectRatio),c=l&&l.canvas,h=c&&c.height,d=c&&c.width;this.id=j(),this.ctx=l,this.canvas=c,this.width=d,this.height=h,this._options=r,this._aspectRatio=this.aspectRatio,this._layers=[],this._metasets=[],this._stacks=void 0,this.boxes=[],this.currentDevicePixelRatio=void 0,this.chartArea=void 0,this._active=[],this._lastEvent=void 0,this._listeners={},this._responsiveListeners=void 0,this._sortedMetasets=[],this.scales={},this.scale=void 0,this._plugins=new Ln,this.$proxies={},this._hiddenIndices={},this.attached=!1,this._animationsDisabled=void 0,this.$context=void 0,this._doResize=i((()=>this.update("resize")),r.resizeDelay||0),Qn[n.id]=n,l&&c?(a.listen(n,"complete",qn),a.listen(n,"progress",Kn),n._initialize(),n.attached&&n.update()):console.error("Failed to create chart: can't acquire context from the given item")}get aspectRatio(){const{options:{aspectRatio:t,maintainAspectRatio:e},width:i,height:n,_aspectRatio:o}=this;return $(t)?e&&o?o:n?i/n:null:t}get data(){return this.config.data}set data(t){this.config.data=t}get options(){return this._options}set options(t){this.config.options=t}_initialize(){const t=this;return t.notifyPlugins("beforeInit"),t.options.responsive?t.resize():ye(t,t.options.devicePixelRatio),t.bindEvents(),t.notifyPlugins("afterInit"),t}_initializePlatform(t,e){return e.platform?new e.platform:!Gn()||"undefined"!=typeof OffscreenCanvas&&t instanceof OffscreenCanvas?new Qe:new di}clear(){return qt(this.canvas,this.ctx),this}stop(){return a.stop(this),this}resize(t,e){a.running(this)?this._resizeBeforeDraw={width:t,height:e}:this._resize(t,e)}_resize(t,e){const i=this,n=i.options,o=i.canvas,s=n.maintainAspectRatio&&i.aspectRatio,a=i.platform.getMaximumSize(o,t,e,s),r=n.devicePixelRatio||i.platform.getDevicePixelRatio();i.width=a.width,i.height=a.height,i._aspectRatio=i.aspectRatio,ye(i,r,!0)&&(i.notifyPlugins("resize",{size:a}),Q(n.onResize,[i,a],i),i.attached&&i._doResize()&&i.render())}ensureScalesHaveIDs(){J(this.options.scales||{},((t,e)=>{t.id=e}))}buildOrUpdateScales(){const t=this,e=t.options,i=e.scales,n=t.scales,o=Object.keys(n).reduce(((t,e)=>(t[e]=!1,t)),{});let s=[];i&&(s=s.concat(Object.keys(i).map((t=>{const e=i[t],n=zn(t,e),o="r"===n,s="x"===n;return{options:e,dposition:o?"chartArea":s?"bottom":"left",dtype:o?"radialLinear":s?"category":"linear"}})))),J(s,(i=>{const s=i.options,a=s.id,r=zn(a,s),l=K(s.type,i.dtype);void 0!==s.position&&Un(s.position,r)===Un(i.dposition)||(s.position=i.dposition),o[a]=!0;let c=null;if(a in n&&n[a].type===l)c=n[a];else{c=new(An.getScale(l))({id:a,type:l,ctx:t.ctx,chart:t}),n[c.id]=c}c.init(s,e)})),J(o,((t,e)=>{t||delete n[e]})),J(n,(e=>{Ge.configure(t,e,e.options),Ge.addBox(t,e)}))}_updateMetasets(){const t=this,e=t._metasets,i=t.data.datasets.length,n=e.length;if(e.sort(((t,e)=>t.index-e.index)),n>i){for(let e=i;e<n;++e)t._destroyDatasetMeta(e);e.splice(i,n-i)}t._sortedMetasets=e.slice(0).sort(Xn("order","index"))}_removeUnreferencedMetasets(){const t=this,{_metasets:e,data:{datasets:i}}=t;e.length>i.length&&delete t._stacks,e.forEach(((e,n)=>{0===i.filter((t=>t===e._dataset)).length&&t._destroyDatasetMeta(n)}))}buildOrUpdateControllers(){const t=this,e=[],i=t.data.datasets;let n,o;for(t._removeUnreferencedMetasets(),n=0,o=i.length;n<o;n++){const o=i[n];let s=t.getDatasetMeta(n);const a=o.type||t.config.type;if(s.type&&s.type!==a&&(t._destroyDatasetMeta(n),s=t.getDatasetMeta(n)),s.type=a,s.indexAxis=o.indexAxis||In(a,t.options),s.order=o.order||0,s.index=n,s.label=""+o.label,s.visible=t.isDatasetVisible(n),s.controller)s.controller.updateIndex(n),s.controller.linkScales();else{const i=An.getController(a),{datasetElementType:o,dataElementType:r}=xt.datasets[a];Object.assign(i.prototype,{dataElementType:An.getElement(r),datasetElementType:o&&An.getElement(o)}),s.controller=new i(t,n),e.push(s.controller)}}return t._updateMetasets(),e}_resetElements(){const t=this;J(t.data.datasets,((e,i)=>{t.getDatasetMeta(i).controller.reset()}),t)}reset(){this._resetElements(),this.notifyPlugins("reset")}update(t){const e=this,i=e.config;i.update(),e._options=i.createResolver(i.chartOptionScopes(),e.getContext()),J(e.scales,(t=>{Ge.removeBox(e,t)}));const n=e._animationsDisabled=!e.options.animation;e.ensureScalesHaveIDs(),e.buildOrUpdateScales();const o=new Set(Object.keys(e._listeners)),s=new Set(e.options.events);if(ut(o,s)&&!!this._responsiveListeners===e.options.responsive||(e.unbindEvents(),e.bindEvents()),e._plugins.invalidate(),!1===e.notifyPlugins("beforeUpdate",{mode:t,cancelable:!0}))return;const a=e.buildOrUpdateControllers();e.notifyPlugins("beforeElementsUpdate");let r=0;for(let t=0,i=e.data.datasets.length;t<i;t++){const{controller:i}=e.getDatasetMeta(t),o=!n&&-1===a.indexOf(i);i.buildOrUpdateElements(o),r=Math.max(+i.getMaxOverflow(),r)}e._minPadding=r,e._updateLayout(r),n||J(a,(t=>{t.reset()})),e._updateDatasets(t),e.notifyPlugins("afterUpdate",{mode:t}),e._layers.sort(Xn("z","_idx")),e._lastEvent&&e._eventHandler(e._lastEvent,!0),e.render()}_updateLayout(t){const e=this;if(!1===e.notifyPlugins("beforeLayout",{cancelable:!0}))return;Ge.update(e,e.width,e.height,t);const i=e.chartArea,n=i.width<=0||i.height<=0;e._layers=[],J(e.boxes,(t=>{n&&"chartArea"===t.position||(t.configure&&t.configure(),e._layers.push(...t._layers()))}),e),e._layers.forEach(((t,e)=>{t._idx=e})),e.notifyPlugins("afterLayout")}_updateDatasets(t){const e=this,i="function"==typeof t;if(!1!==e.notifyPlugins("beforeDatasetsUpdate",{mode:t,cancelable:!0})){for(let n=0,o=e.data.datasets.length;n<o;++n)e._updateDataset(n,i?t({datasetIndex:n}):t);e.notifyPlugins("afterDatasetsUpdate",{mode:t})}}_updateDataset(t,e){const i=this,n=i.getDatasetMeta(t),o={meta:n,index:t,mode:e,cancelable:!0};!1!==i.notifyPlugins("beforeDatasetUpdate",o)&&(n.controller._update(e),o.cancelable=!1,i.notifyPlugins("afterDatasetUpdate",o))}render(){const t=this;!1!==t.notifyPlugins("beforeRender",{cancelable:!0})&&(a.has(t)?t.attached&&!a.running(t)&&a.start(t):(t.draw(),qn({chart:t})))}draw(){const t=this;let e;if(t._resizeBeforeDraw){const{width:e,height:i}=t._resizeBeforeDraw;t._resize(e,i),t._resizeBeforeDraw=null}if(t.clear(),t.width<=0||t.height<=0)return;if(!1===t.notifyPlugins("beforeDraw",{cancelable:!0}))return;const i=t._layers;for(e=0;e<i.length&&i[e].z<=0;++e)i[e].draw(t.chartArea);for(t._drawDatasets();e<i.length;++e)i[e].draw(t.chartArea);t.notifyPlugins("afterDraw")}_getSortedDatasetMetas(t){const e=this._sortedMetasets,i=[];let n,o;for(n=0,o=e.length;n<o;++n){const o=e[n];t&&!o.visible||i.push(o)}return i}getSortedVisibleDatasetMetas(){return this._getSortedDatasetMetas(!0)}_drawDatasets(){const t=this;if(!1===t.notifyPlugins("beforeDatasetsDraw",{cancelable:!0}))return;const e=t.getSortedVisibleDatasetMetas();for(let i=e.length-1;i>=0;--i)t._drawDataset(e[i]);t.notifyPlugins("afterDatasetsDraw")}_drawDataset(t){const e=this,i=e.ctx,n=t._clip,o=e.chartArea,s={meta:t,index:t.index,cancelable:!0};!1!==e.notifyPlugins("beforeDatasetDraw",s)&&(Zt(i,{left:!1===n.left?0:o.left-n.left,right:!1===n.right?e.width:o.right+n.right,top:!1===n.top?0:o.top-n.top,bottom:!1===n.bottom?e.height:o.bottom+n.bottom}),t.controller.draw(),Qt(i),s.cancelable=!1,e.notifyPlugins("afterDatasetDraw",s))}getElementsAtEventForMode(t,e,i,n){const o=Oe.modes[e];return"function"==typeof o?o(this,t,i,n):[]}getDatasetMeta(t){const e=this.data.datasets[t],i=this._metasets;let n=i.filter((t=>t&&t._dataset===e)).pop();return n||(n={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:e&&e.order||0,index:t,_dataset:e,_parsed:[],_sorted:!1},i.push(n)),n}getContext(){return this.$context||(this.$context={chart:this,type:"chart"})}getVisibleDatasetCount(){return this.getSortedVisibleDatasetMetas().length}isDatasetVisible(t){const e=this.data.datasets[t];if(!e)return!1;const i=this.getDatasetMeta(t);return"boolean"==typeof i.hidden?!i.hidden:!e.hidden}setDatasetVisibility(t,e){this.getDatasetMeta(t).hidden=!e}toggleDataVisibility(t){this._hiddenIndices[t]=!this._hiddenIndices[t]}getDataVisibility(t){return!this._hiddenIndices[t]}_updateDatasetVisibility(t,e){const i=this,n=e?"show":"hide",o=i.getDatasetMeta(t),s=o.controller._resolveAnimations(void 0,n);i.setDatasetVisibility(t,e),s.update(o,{visible:e}),i.update((e=>e.datasetIndex===t?n:void 0))}hide(t){this._updateDatasetVisibility(t,!1)}show(t){this._updateDatasetVisibility(t,!0)}_destroyDatasetMeta(t){const e=this,i=e._metasets&&e._metasets[t];i&&i.controller&&(i.controller._destroy(),delete e._metasets[t])}destroy(){const t=this,{canvas:e,ctx:i}=t;let n,o;for(t.stop(),a.remove(t),n=0,o=t.data.datasets.length;n<o;++n)t._destroyDatasetMeta(n);t.config.clearCache(),e&&(t.unbindEvents(),qt(e,i),t.platform.releaseContext(i),t.canvas=null,t.ctx=null),t.notifyPlugins("destroy"),delete Qn[t.id]}toBase64Image(...t){return this.canvas.toDataURL(...t)}bindEvents(){this.bindUserEvents(),this.options.responsive?this.bindResponsiveEvents():this.attached=!0}bindUserEvents(){const t=this,e=t._listeners,i=t.platform,n=function(e,i,n){e.offsetX=i,e.offsetY=n,t._eventHandler(e)};J(t.options.events,(o=>((n,o)=>{i.addEventListener(t,n,o),e[n]=o})(o,n)))}bindResponsiveEvents(){const t=this;t._responsiveListeners||(t._responsiveListeners={});const e=t._responsiveListeners,i=t.platform,n=(n,o)=>{i.addEventListener(t,n,o),e[n]=o},o=(n,o)=>{e[n]&&(i.removeEventListener(t,n,o),delete e[n])},s=(e,i)=>{t.canvas&&t.resize(e,i)};let a;const r=()=>{o("attach",r),t.attached=!0,t.resize(),n("resize",s),n("detach",a)};a=()=>{t.attached=!1,o("resize",s),n("attach",r)},i.isAttached(t.canvas)?r():a()}unbindEvents(){const t=this;J(t._listeners,((e,i)=>{t.platform.removeEventListener(t,i,e)})),t._listeners={},J(t._responsiveListeners,((e,i)=>{t.platform.removeEventListener(t,i,e)})),t._responsiveListeners=void 0}updateHoverStyle(t,e,i){const n=i?"set":"remove";let o,s,a,r;for("dataset"===e&&(o=this.getDatasetMeta(t[0].datasetIndex),o.controller["_"+n+"DatasetHoverStyle"]()),a=0,r=t.length;a<r;++a){s=t[a];const e=s&&this.getDatasetMeta(s.datasetIndex).controller;e&&e[n+"HoverStyle"](s.element,s.datasetIndex,s.index)}}getActiveElements(){return this._active||[]}setActiveElements(t){const e=this,i=e._active||[],n=t.map((({datasetIndex:t,index:i})=>{const n=e.getDatasetMeta(t);if(!n)throw new Error("No dataset found at index "+t);return{datasetIndex:t,element:n.data[i],index:i}}));!tt(n,i)&&(e._active=n,e._updateHoverStyles(n,i))}notifyPlugins(t,e,i){return this._plugins.notify(this,t,e,i)}_updateHoverStyles(t,e,i){const n=this,o=n.options.hover,s=(t,e)=>t.filter((t=>!e.some((e=>t.datasetIndex===e.datasetIndex&&t.index===e.index)))),a=s(e,t),r=i?t:s(t,e);a.length&&n.updateHoverStyle(a,o.mode,!1),r.length&&o.mode&&n.updateHoverStyle(r,o.mode,!0)}_eventHandler(t,e){const i=this,n={event:t,replay:e,cancelable:!0},o=e=>(e.options.events||this.options.events).includes(t.type);if(!1===i.notifyPlugins("beforeEvent",n,o))return;const s=i._handleEvent(t,e);return n.cancelable=!1,i.notifyPlugins("afterEvent",n,o),(s||n.changed)&&i.render(),i}_handleEvent(t,e){const i=this,{_active:n=[],options:o}=i,s=o.hover,a=e;let r=[],l=!1,c=null;return"mouseout"!==t.type&&(r=i.getElementsAtEventForMode(t,s.mode,s,a),c="click"===t.type?i._lastEvent:t),i._lastEvent=null,Gt(t,i.chartArea,i._minPadding)&&(Q(o.onHover,[t,r,i],i),"mouseup"!==t.type&&"click"!==t.type&&"contextmenu"!==t.type||Q(o.onClick,[t,r,i],i)),l=!tt(r,n),(l||e)&&(i._active=r,i._updateHoverStyles(r,n,e)),i._lastEvent=c,l}}const eo=()=>J(to.instances,(t=>t._plugins.invalidate())),io=!0;function no(){throw new Error("This method is not implemented: Check that a complete date adapter is provided.")}Object.defineProperties(to,{defaults:{enumerable:io,value:xt},instances:{enumerable:io,value:Qn},overrides:{enumerable:io,value:ft},registry:{enumerable:io,value:An},version:{enumerable:io,value:"3.3.1"},getChart:{enumerable:io,value:Jn},register:{enumerable:io,value:(...t)=>{An.add(...t),eo()}},unregister:{enumerable:io,value:(...t)=>{An.remove(...t),eo()}}});class oo{constructor(t){this.options=t||{}}formats(){return no()}parse(t,e){return no()}format(t,e){return no()}add(t,e,i){return no()}diff(t,e,i){return no()}startOf(t,e,i){return no()}endOf(t,e){return no()}}oo.override=function(t){Object.assign(oo.prototype,t)};var so={_date:oo};function ao(t){const e=function(t){if(!t._cache.$bar){const e=t.getMatchingVisibleMetas("bar");let i=[];for(let n=0,o=e.length;n<o;n++)i=i.concat(e[n].controller.getAllParsedValues(t));t._cache.$bar=he(i.sort(((t,e)=>t-e)))}return t._cache.$bar}(t);let i,n,o,s,a=t._length;const r=()=>{32767!==o&&-32768!==o&&(ht(s)&&(a=Math.min(a,Math.abs(o-s)||a)),s=o)};for(i=0,n=e.length;i<n;++i)o=t.getPixelForValue(e[i]),r();for(s=void 0,i=0,n=t.ticks.length;i<n;++i)o=t.getPixelForTick(i),r();return a}function ro(t,e,i,n){return Y(t)?function(t,e,i,n){const o=i.parse(t[0],n),s=i.parse(t[1],n),a=Math.min(o,s),r=Math.max(o,s);let l=a,c=r;Math.abs(a)>Math.abs(r)&&(l=r,c=a),e[i.axis]=c,e._custom={barStart:l,barEnd:c,start:o,end:s,min:a,max:r}}(t,e,i,n):e[i.axis]=i.parse(t,n),e}function lo(t,e,i,n){const o=t.iScale,s=t.vScale,a=o.getLabels(),r=o===s,l=[];let c,h,d,u;for(c=i,h=i+n;c<h;++c)u=e[c],d={},d[o.axis]=r||o.parse(a[c],c),l.push(ro(u,d,s,c));return l}function co(t){return t&&void 0!==t.barStart&&void 0!==t.barEnd}class ho extends Ri{parsePrimitiveData(t,e,i,n){return lo(t,e,i,n)}parseArrayData(t,e,i,n){return lo(t,e,i,n)}parseObjectData(t,e,i,n){const{iScale:o,vScale:s}=t,{xAxisKey:a="x",yAxisKey:r="y"}=this._parsing,l="x"===o.axis?a:r,c="x"===s.axis?a:r,h=[];let d,u,f,g;for(d=i,u=i+n;d<u;++d)g=e[d],f={},f[o.axis]=o.parse(lt(g,l),d),h.push(ro(lt(g,c),f,s,d));return h}updateRangeFromParsed(t,e,i,n){super.updateRangeFromParsed(t,e,i,n);const o=i._custom;o&&e===this._cachedMeta.vScale&&(t.min=Math.min(t.min,o.min),t.max=Math.max(t.max,o.max))}getLabelAndValue(t){const e=this._cachedMeta,{iScale:i,vScale:n}=e,o=this.getParsed(t),s=o._custom,a=co(s)?"["+s.start+", "+s.end+"]":""+n.getLabelForValue(o[n.axis]);return{label:""+i.getLabelForValue(o[i.axis]),value:a}}initialize(){const t=this;t.enableOptionSharing=!0,super.initialize();t._cachedMeta.stack=t.getDataset().stack}update(t){const e=this._cachedMeta;this.updateElements(e.data,0,e.data.length,t)}updateElements(t,e,i,n){const o=this,s="reset"===n,a=o._cachedMeta.vScale,r=a.getBasePixel(),l=a.isHorizontal(),c=o._getRuler(),h=o.resolveDataElementOptions(e,n),d=o.getSharedOptions(h),u=o.includeOptions(n,d);o.updateSharedOptions(d,n,h);for(let h=e;h<e+i;h++){const e=o.getParsed(h),i=s||$(e[a.axis])?{base:r,head:r}:o._calculateBarValuePixels(h),f=o._calculateBarIndexPixels(h,c),g=(e._stacks||{})[a.axis],p={horizontal:l,base:i.base,enableBorderRadius:!g||co(e._custom)||o.index===g._top||o.index===g._bottom,x:l?i.head:f.center,y:l?f.center:i.head,height:l?f.size:void 0,width:l?void 0:f.size};u&&(p.options=d||o.resolveDataElementOptions(h,n)),o.updateElement(t[h],h,p,n)}}_getStacks(t,e){const i=this._cachedMeta.iScale,n=i.getMatchingVisibleMetas(this._type),o=i.options.stacked,s=n.length,a=[];let r,l;for(r=0;r<s;++r){if(l=n[r],void 0!==e){const t=l.controller.getParsed(e)[l.controller._cachedMeta.vScale.axis];if($(t)||isNaN(t))continue}if((!1===o||-1===a.indexOf(l.stack)||void 0===o&&void 0===l.stack)&&a.push(l.stack),l.index===t)break}return a.length||a.push(void 0),a}_getStackCount(t){return this._getStacks(void 0,t).length}_getStackIndex(t,e,i){const n=this._getStacks(t,i),o=void 0!==e?n.indexOf(e):-1;return-1===o?n.length-1:o}_getRuler(){const t=this,e=t.options,i=t._cachedMeta,n=i.iScale,o=[];let s,a;for(s=0,a=i.data.length;s<a;++s)o.push(n.getPixelForValue(t.getParsed(s)[n.axis],s));const r=e.barThickness;return{min:r||ao(n),pixels:o,start:n._startPixel,end:n._endPixel,stackCount:t._getStackCount(),scale:n,grouped:e.grouped,ratio:r?1:e.categoryPercentage*e.barPercentage}}_calculateBarValuePixels(t){const e=this,{vScale:i,_stacked:n}=e._cachedMeta,{base:o,minBarLength:s}=e.options,a=e.getParsed(t),r=a._custom,l=co(r);let c,h,d=a[i.axis],u=0,f=n?e.applyStack(i,a,n):d;f!==d&&(u=f-d,f=d),l&&(d=r.barStart,f=r.barEnd-r.barStart,0!==d&&Dt(d)!==Dt(r.barEnd)&&(u=0),u+=d);const g=$(o)||l?u:o;let p=i.getPixelForValue(g);c=this.chart.getDataVisibility(t)?i.getPixelForValue(u+f):p,h=c-p,void 0!==s&&Math.abs(h)<s&&(h=h<0?-s:s,0===d&&(p-=h/2),c=p+h);const m=o||0;if(p===i.getPixelForValue(m)){const t=i.getLineWidthForValue(m)/2;h>0?(p+=t,h-=t):h<0&&(p-=t,h+=t)}return{size:h,base:p,head:c,center:c+h/2}}_calculateBarIndexPixels(t,e){const i=this,n=e.scale,o=i.options,s=o.skipNull,a=K(o.maxBarThickness,1/0);let r,l;if(e.grouped){const n=s?i._getStackCount(t):e.stackCount,c="flex"===o.barThickness?function(t,e,i,n){const o=e.pixels,s=o[t];let a=t>0?o[t-1]:null,r=t<o.length-1?o[t+1]:null;const l=i.categoryPercentage;null===a&&(a=s-(null===r?e.end-e.start:r-s)),null===r&&(r=s+s-a);const c=s-(s-Math.min(a,r))/2*l;return{chunk:Math.abs(r-a)/2*l/n,ratio:i.barPercentage,start:c}}(t,e,o,n):function(t,e,i,n){const o=i.barThickness;let s,a;return $(o)?(s=e.min*i.categoryPercentage,a=i.barPercentage):(s=o*n,a=1),{chunk:s/n,ratio:a,start:e.pixels[t]-s/2}}(t,e,o,n),h=i._getStackIndex(i.index,i._cachedMeta.stack,s?t:void 0);r=c.start+c.chunk*h+c.chunk/2,l=Math.min(a,c.chunk*c.ratio)}else r=n.getPixelForValue(i.getParsed(t)[n.axis],t),l=Math.min(a,e.min*e.ratio);return{base:r-l/2,head:r+l/2,center:r,size:l}}draw(){const t=this,e=t.chart,i=t._cachedMeta,n=i.vScale,o=i.data,s=o.length;let a=0;for(Zt(e.ctx,e.chartArea);a<s;++a)null!==t.getParsed(a)[n.axis]&&o[a].draw(t._ctx);Qt(e.ctx)}}ho.id="bar",ho.defaults={datasetElementType:!1,dataElementType:"bar",categoryPercentage:.8,barPercentage:.9,grouped:!0,animations:{numbers:{type:"number",properties:["x","y","base","width","height"]}}},ho.overrides={interaction:{mode:"index"},scales:{_index_:{type:"category",offset:!0,grid:{offset:!0}},_value_:{type:"linear",beginAtZero:!0}}};class uo extends Ri{initialize(){this.enableOptionSharing=!0,super.initialize()}parseObjectData(t,e,i,n){const{xScale:o,yScale:s}=t,{xAxisKey:a="x",yAxisKey:r="y"}=this._parsing,l=[];let c,h,d;for(c=i,h=i+n;c<h;++c)d=e[c],l.push({x:o.parse(lt(d,a),c),y:s.parse(lt(d,r),c),_custom:d&&d.r&&+d.r});return l}getMaxOverflow(){const{data:t,_parsed:e}=this._cachedMeta;let i=0;for(let n=t.length-1;n>=0;--n)i=Math.max(i,t[n].size()/2,e[n]._custom);return i>0&&i}getLabelAndValue(t){const e=this._cachedMeta,{xScale:i,yScale:n}=e,o=this.getParsed(t),s=i.getLabelForValue(o.x),a=n.getLabelForValue(o.y),r=o._custom;return{label:e.label,value:"("+s+", "+a+(r?", "+r:"")+")"}}update(t){const e=this._cachedMeta.data;this.updateElements(e,0,e.length,t)}updateElements(t,e,i,n){const o=this,s="reset"===n,{iScale:a,vScale:r}=o._cachedMeta,l=o.resolveDataElementOptions(e,n),c=o.getSharedOptions(l),h=o.includeOptions(n,c),d=a.axis,u=r.axis;for(let l=e;l<e+i;l++){const e=t[l],i=!s&&o.getParsed(l),c={},f=c[d]=s?a.getPixelForDecimal(.5):a.getPixelForValue(i[d]),g=c[u]=s?r.getBasePixel():r.getPixelForValue(i[u]);c.skip=isNaN(f)||isNaN(g),h&&(c.options=o.resolveDataElementOptions(l,n),s&&(c.options.radius=0)),o.updateElement(e,l,c,n)}o.updateSharedOptions(c,n,l)}resolveDataElementOptions(t,e){const i=this.getParsed(t);let n=super.resolveDataElementOptions(t,e);n.$shared&&(n=Object.assign({},n,{$shared:!1}));const o=n.radius;return"active"!==e&&(n.radius=0),n.radius+=K(i&&i._custom,o),n}}uo.id="bubble",uo.defaults={datasetElementType:!1,dataElementType:"point",animations:{numbers:{type:"number",properties:["x","y","borderWidth","radius"]}}},uo.overrides={scales:{x:{type:"linear"},y:{type:"linear"}},plugins:{tooltip:{callbacks:{title:()=>""}}}};class fo extends Ri{constructor(t,e){super(t,e),this.enableOptionSharing=!0,this.innerRadius=void 0,this.outerRadius=void 0,this.offsetX=void 0,this.offsetY=void 0}linkScales(){}parse(t,e){const i=this.getDataset().data,n=this._cachedMeta;let o,s;for(o=t,s=t+e;o<s;++o)n._parsed[o]=+i[o]}_getRotation(){return Et(this.options.rotation-90)}_getCircumference(){return Et(this.options.circumference)}_getRotationExtents(){let t=_t,e=-_t;const i=this;for(let n=0;n<i.chart.data.datasets.length;++n)if(i.chart.isDatasetVisible(n)){const o=i.chart.getDatasetMeta(n).controller,s=o._getRotation(),a=o._getCircumference();t=Math.min(t,s),e=Math.max(e,s+a)}return{rotation:t,circumference:e-t}}update(t){const e=this,i=e.chart,{chartArea:n}=i,o=e._cachedMeta,s=o.data,a=e.getMaxBorderWidth()+e.getMaxOffset(s)+e.options.spacing,r=Math.max((Math.min(n.width,n.height)-a)/2,0),l=Math.min(G(e.options.cutout,r),1),c=e._getRingWeight(e.index),{circumference:h,rotation:d}=e._getRotationExtents(),{ratioX:u,ratioY:f,offsetX:g,offsetY:p}=function(t,e,i){let n=1,o=1,s=0,a=0;if(e<_t){const r=t,l=r+e,c=Math.cos(r),h=Math.sin(r),d=Math.cos(l),u=Math.sin(l),f=(t,e,n)=>Ht(t,r,l,!0)?1:Math.max(e,e*i,n,n*i),g=(t,e,n)=>Ht(t,r,l,!0)?-1:Math.min(e,e*i,n,n*i),p=f(0,c,d),m=f(Mt,h,u),x=g(bt,c,d),b=g(bt+Mt,h,u);n=(p-x)/2,o=(m-b)/2,s=-(p+x)/2,a=-(m+b)/2}return{ratioX:n,ratioY:o,offsetX:s,offsetY:a}}(d,h,l),m=(n.width-a)/u,x=(n.height-a)/f,b=Math.max(Math.min(m,x)/2,0),_=Z(e.options.radius,b),y=(_-Math.max(_*l,0))/e._getVisibleDatasetWeightTotal();e.offsetX=g*_,e.offsetY=p*_,o.total=e.calculateTotal(),e.outerRadius=_-y*e._getRingWeightOffset(e.index),e.innerRadius=Math.max(e.outerRadius-y*c,0),e.updateElements(s,0,s.length,t)}_circumference(t,e){const i=this,n=i.options,o=i._cachedMeta,s=i._getCircumference();return e&&n.animation.animateRotate||!this.chart.getDataVisibility(t)||null===o._parsed[t]?0:i.calculateCircumference(o._parsed[t]*s/_t)}updateElements(t,e,i,n){const o=this,s="reset"===n,a=o.chart,r=a.chartArea,l=a.options.animation,c=(r.left+r.right)/2,h=(r.top+r.bottom)/2,d=s&&l.animateScale,u=d?0:o.innerRadius,f=d?0:o.outerRadius,g=o.resolveDataElementOptions(e,n),p=o.getSharedOptions(g),m=o.includeOptions(n,p);let x,b=o._getRotation();for(x=0;x<e;++x)b+=o._circumference(x,s);for(x=e;x<e+i;++x){const e=o._circumference(x,s),i=t[x],a={x:c+o.offsetX,y:h+o.offsetY,startAngle:b,endAngle:b+e,circumference:e,outerRadius:f,innerRadius:u};m&&(a.options=p||o.resolveDataElementOptions(x,n)),b+=e,o.updateElement(i,x,a,n)}o.updateSharedOptions(p,n,g)}calculateTotal(){const t=this._cachedMeta,e=t.data;let i,n=0;for(i=0;i<e.length;i++){const e=t._parsed[i];null!==e&&!isNaN(e)&&this.chart.getDataVisibility(i)&&(n+=Math.abs(e))}return n}calculateCircumference(t){const e=this._cachedMeta.total;return e>0&&!isNaN(t)?_t*(Math.abs(t)/e):0}getLabelAndValue(t){const e=this._cachedMeta,i=this.chart,n=i.data.labels||[],o=zi(e._parsed[t],i.options.locale);return{label:n[t]||"",value:o}}getMaxBorderWidth(t){const e=this;let i=0;const n=e.chart;let o,s,a,r,l;if(!t)for(o=0,s=n.data.datasets.length;o<s;++o)if(n.isDatasetVisible(o)){a=n.getDatasetMeta(o),t=a.data,r=a.controller,r!==e&&r.configure();break}if(!t)return 0;for(o=0,s=t.length;o<s;++o)l=r.resolveDataElementOptions(o),"inner"!==l.borderAlign&&(i=Math.max(i,l.borderWidth||0,l.hoverBorderWidth||0));return i}getMaxOffset(t){let e=0;for(let i=0,n=t.length;i<n;++i){const t=this.resolveDataElementOptions(i);e=Math.max(e,t.offset||0,t.hoverOffset||0)}return e}_getRingWeightOffset(t){let e=0;for(let i=0;i<t;++i)this.chart.isDatasetVisible(i)&&(e+=this._getRingWeight(i));return e}_getRingWeight(t){return Math.max(K(this.chart.data.datasets[t].weight,1),0)}_getVisibleDatasetWeightTotal(){return this._getRingWeightOffset(this.chart.data.datasets.length)||1}}fo.id="doughnut",fo.defaults={datasetElementType:!1,dataElementType:"arc",animation:{animateRotate:!0,animateScale:!1},animations:{numbers:{type:"number",properties:["circumference","endAngle","innerRadius","outerRadius","startAngle","x","y","offset","borderWidth","spacing"]}},cutout:"50%",rotation:0,circumference:360,radius:"100%",spacing:0,indexAxis:"r"},fo.descriptors={_scriptable:t=>"spacing"!==t,_indexable:t=>"spacing"!==t},fo.overrides={aspectRatio:1,plugins:{legend:{labels:{generateLabels(t){const e=t.data;return e.labels.length&&e.datasets.length?e.labels.map(((e,i)=>{const n=t.getDatasetMeta(0).controller.getStyle(i);return{text:e,fillStyle:n.backgroundColor,strokeStyle:n.borderColor,lineWidth:n.borderWidth,hidden:!t.getDataVisibility(i),index:i}})):[]}},onClick(t,e,i){i.chart.toggleDataVisibility(e.index),i.chart.update()}},tooltip:{callbacks:{title:()=>"",label(t){let e=t.label;const i=": "+t.formattedValue;return Y(e)?(e=e.slice(),e[0]+=i):e+=i,e}}}}};class go extends Ri{initialize(){this.enableOptionSharing=!0,super.initialize()}update(t){const e=this,i=e._cachedMeta,{dataset:n,data:o=[],_dataset:s}=i,a=e.chart._animationsDisabled;let{start:r,count:l}=function(t,e,i){const n=e.length;let o=0,s=n;if(t._sorted){const{iScale:a,_parsed:r}=t,l=a.axis,{min:c,max:h,minDefined:d,maxDefined:u}=a.getUserBounds();d&&(o=Nt(Math.min(oe(r,a.axis,c).lo,i?n:oe(e,l,a.getPixelForValue(c)).lo),0,n-1)),s=u?Nt(Math.max(oe(r,a.axis,h).hi+1,i?0:oe(e,l,a.getPixelForValue(h)).hi+1),o,n)-o:n-o}return{start:o,count:s}}(i,o,a);e._drawStart=r,e._drawCount=l,function(t){const{xScale:e,yScale:i,_scaleRanges:n}=t,o={xmin:e.min,xmax:e.max,ymin:i.min,ymax:i.max};if(!n)return t._scaleRanges=o,!0;const s=n.xmin!==e.min||n.xmax!==e.max||n.ymin!==i.min||n.ymax!==i.max;return Object.assign(n,o),s}(i)&&(r=0,l=o.length),n._decimated=!!s._decimated,n.points=o;const c=e.resolveDatasetElementOptions(t);e.options.showLine||(c.borderWidth=0),c.segment=e.options.segment,e.updateElement(n,void 0,{animated:!a,options:c},t),e.updateElements(o,r,l,t)}updateElements(t,e,i,n){const o=this,s="reset"===n,{iScale:a,vScale:r,_stacked:l}=o._cachedMeta,c=o.resolveDataElementOptions(e,n),h=o.getSharedOptions(c),d=o.includeOptions(n,h),u=a.axis,f=r.axis,g=o.options.spanGaps,p=Tt(g)?g:Number.POSITIVE_INFINITY,m=o.chart._animationsDisabled||s||"none"===n;let x=e>0&&o.getParsed(e-1);for(let c=e;c<e+i;++c){const e=t[c],i=o.getParsed(c),g=m?e:{},b=$(i[f]),_=g[u]=a.getPixelForValue(i[u],c),y=g[f]=s||b?r.getBasePixel():r.getPixelForValue(l?o.applyStack(r,i,l):i[f],c);g.skip=isNaN(_)||isNaN(y)||b,g.stop=c>0&&i[u]-x[u]>p,g.parsed=i,d&&(g.options=h||o.resolveDataElementOptions(c,n)),m||o.updateElement(e,c,g,n),x=i}o.updateSharedOptions(h,n,c)}getMaxOverflow(){const t=this,e=t._cachedMeta,i=e.dataset,n=i.options&&i.options.borderWidth||0,o=e.data||[];if(!o.length)return n;const s=o[0].size(t.resolveDataElementOptions(0)),a=o[o.length-1].size(t.resolveDataElementOptions(o.length-1));return Math.max(n,s,a)/2}draw(){const t=this._cachedMeta;t.dataset.updateControlPoints(this.chart.chartArea,t.iScale.axis),super.draw()}}go.id="line",go.defaults={datasetElementType:"line",dataElementType:"point",showLine:!0,spanGaps:!1},go.overrides={scales:{_index_:{type:"category"},_value_:{type:"linear"}}};class po extends Ri{constructor(t,e){super(t,e),this.innerRadius=void 0,this.outerRadius=void 0}getLabelAndValue(t){const e=this._cachedMeta,i=this.chart,n=i.data.labels||[],o=zi(e._parsed[t].r,i.options.locale);return{label:n[t]||"",value:o}}update(t){const e=this._cachedMeta.data;this._updateRadius(),this.updateElements(e,0,e.length,t)}_updateRadius(){const t=this,e=t.chart,i=e.chartArea,n=e.options,o=Math.min(i.right-i.left,i.bottom-i.top),s=Math.max(o/2,0),a=(s-Math.max(n.cutoutPercentage?s/100*n.cutoutPercentage:1,0))/e.getVisibleDatasetCount();t.outerRadius=s-a*t.index,t.innerRadius=t.outerRadius-a}updateElements(t,e,i,n){const o=this,s="reset"===n,a=o.chart,r=o.getDataset(),l=a.options.animation,c=o._cachedMeta.rScale,h=c.xCenter,d=c.yCenter,u=c.getIndexAngle(0)-.5*bt;let f,g=u;const p=360/o.countVisibleElements();for(f=0;f<e;++f)g+=o._computeAngle(f,n,p);for(f=e;f<e+i;f++){const e=t[f];let i=g,m=g+o._computeAngle(f,n,p),x=a.getDataVisibility(f)?c.getDistanceFromCenterForValue(r.data[f]):0;g=m,s&&(l.animateScale&&(x=0),l.animateRotate&&(i=m=u));const b={x:h,y:d,innerRadius:0,outerRadius:x,startAngle:i,endAngle:m,options:o.resolveDataElementOptions(f,n)};o.updateElement(e,f,b,n)}}countVisibleElements(){const t=this.getDataset(),e=this._cachedMeta;let i=0;return e.data.forEach(((e,n)=>{!isNaN(t.data[n])&&this.chart.getDataVisibility(n)&&i++})),i}_computeAngle(t,e,i){return this.chart.getDataVisibility(t)?Et(this.resolveDataElementOptions(t,e).angle||i):0}}po.id="polarArea",po.defaults={dataElementType:"arc",animation:{animateRotate:!0,animateScale:!0},animations:{numbers:{type:"number",properties:["x","y","startAngle","endAngle","innerRadius","outerRadius"]}},indexAxis:"r",startAngle:0},po.overrides={aspectRatio:1,plugins:{legend:{labels:{generateLabels(t){const e=t.data;return e.labels.length&&e.datasets.length?e.labels.map(((e,i)=>{const n=t.getDatasetMeta(0).controller.getStyle(i);return{text:e,fillStyle:n.backgroundColor,strokeStyle:n.borderColor,lineWidth:n.borderWidth,hidden:!t.getDataVisibility(i),index:i}})):[]}},onClick(t,e,i){i.chart.toggleDataVisibility(e.index),i.chart.update()}},tooltip:{callbacks:{title:()=>"",label:t=>t.chart.data.labels[t.dataIndex]+": "+t.formattedValue}}},scales:{r:{type:"radialLinear",angleLines:{display:!1},beginAtZero:!0,grid:{circular:!0},pointLabels:{display:!1},startAngle:0}}};class mo extends fo{}mo.id="pie",mo.defaults={cutout:0,rotation:0,circumference:360,radius:"100%"};class xo extends Ri{getLabelAndValue(t){const e=this._cachedMeta.vScale,i=this.getParsed(t);return{label:e.getLabels()[t],value:""+e.getLabelForValue(i[e.axis])}}update(t){const e=this,i=e._cachedMeta,n=i.dataset,o=i.data||[],s=i.iScale.getLabels();if(n.points=o,"resize"!==t){const i=e.resolveDatasetElementOptions(t);e.options.showLine||(i.borderWidth=0);const a={_loop:!0,_fullLoop:s.length===o.length,options:i};e.updateElement(n,void 0,a,t)}e.updateElements(o,0,o.length,t)}updateElements(t,e,i,n){const o=this,s=o.getDataset(),a=o._cachedMeta.rScale,r="reset"===n;for(let l=e;l<e+i;l++){const e=t[l],i=o.resolveDataElementOptions(l,n),c=a.getPointPositionForValue(l,s.data[l]),h=r?a.xCenter:c.x,d=r?a.yCenter:c.y,u={x:h,y:d,angle:c.angle,skip:isNaN(h)||isNaN(d),options:i};o.updateElement(e,l,u,n)}}}xo.id="radar",xo.defaults={datasetElementType:"line",dataElementType:"point",indexAxis:"r",showLine:!0,elements:{line:{fill:"start"}}},xo.overrides={aspectRatio:1,scales:{r:{type:"radialLinear"}}};class bo extends go{}bo.id="scatter",bo.defaults={showLine:!1,fill:!1},bo.overrides={interaction:{mode:"point"},plugins:{tooltip:{callbacks:{title:()=>"",label:t=>"("+t.label+", "+t.formattedValue+")"}}},scales:{x:{type:"linear"},y:{type:"linear"}}};var _o=Object.freeze({__proto__:null,BarController:ho,BubbleController:uo,DoughnutController:fo,LineController:go,PolarAreaController:po,PieController:mo,RadarController:xo,ScatterController:bo});function yo(t,e,i){const{startAngle:n,pixelMargin:o,x:s,y:a,outerRadius:r,innerRadius:l}=e;let c=o/r;t.beginPath(),t.arc(s,a,r,n-c,i+c),l>o?(c=o/l,t.arc(s,a,l,i+c,n-c,!0)):t.arc(s,a,o,i+Mt,n-Mt),t.closePath(),t.clip()}function vo(t,e,i,n){const o=Re(t.options.borderRadius,["outerStart","outerEnd","innerStart","innerEnd"]);const s=(i-e)/2,a=Math.min(s,n*e/2),r=t=>{const e=(i-Math.min(s,t))*n/2;return Nt(t,0,Math.min(s,e))};return{outerStart:r(o.outerStart),outerEnd:r(o.outerEnd),innerStart:Nt(o.innerStart,0,a),innerEnd:Nt(o.innerEnd,0,a)}}function wo(t,e,i,n){return{x:i+t*Math.cos(e),y:n+t*Math.sin(e)}}function Mo(t,e,i,n,o){const{x:s,y:a,startAngle:r,pixelMargin:l,innerRadius:c}=e,h=Math.max(e.outerRadius+n+i-l,0),d=c>0?c+n+i+l:0;let u=0;const f=o-r;if(n){const t=((c>0?c-n:0)+(h>0?h-n:0))/2;u=(f-(0!==t?f*t/(t+n):f))/2}const g=(f-Math.max(.001,f*h-i/bt)/h)/2,p=r+g+u,m=o-g-u,{outerStart:x,outerEnd:b,innerStart:_,innerEnd:y}=vo(e,d,h,m-p),v=h-x,w=h-b,M=p+x/v,k=m-b/w,S=d+_,P=d+y,D=p+_/S,C=m-y/P;if(t.beginPath(),t.arc(s,a,h,M,k),b>0){const e=wo(w,k,s,a);t.arc(e.x,e.y,b,k,m+Mt)}const O=wo(P,m,s,a);if(t.lineTo(O.x,O.y),y>0){const e=wo(P,C,s,a);t.arc(e.x,e.y,y,m+Mt,C+Math.PI)}if(t.arc(s,a,d,m-y/d,p+_/d,!0),_>0){const e=wo(S,D,s,a);t.arc(e.x,e.y,_,D+Math.PI,p-Mt)}const T=wo(v,p,s,a);if(t.lineTo(T.x,T.y),x>0){const e=wo(v,M,s,a);t.arc(e.x,e.y,x,p-Mt,M)}t.closePath()}function ko(t,e,i,n,o){const{options:s}=e,a="inner"===s.borderAlign;s.borderWidth&&(a?(t.lineWidth=2*s.borderWidth,t.lineJoin="round"):(t.lineWidth=s.borderWidth,t.lineJoin="bevel"),e.fullCircles&&function(t,e,i){const{x:n,y:o,startAngle:s,pixelMargin:a,fullCircles:r}=e,l=Math.max(e.outerRadius-a,0),c=e.innerRadius+a;let h;for(i&&yo(t,e,s+_t),t.beginPath(),t.arc(n,o,c,s+_t,s,!0),h=0;h<r;++h)t.stroke();for(t.beginPath(),t.arc(n,o,l,s,s+_t),h=0;h<r;++h)t.stroke()}(t,e,a),a&&yo(t,e,o),Mo(t,e,i,n,o),t.stroke())}class So extends Ei{constructor(t){super(),this.options=void 0,this.circumference=void 0,this.startAngle=void 0,this.endAngle=void 0,this.innerRadius=void 0,this.outerRadius=void 0,this.pixelMargin=0,this.fullCircles=0,t&&Object.assign(this,t)}inRange(t,e,i){const n=this.getProps(["x","y"],i),{angle:o,distance:s}=Ft(n,{x:t,y:e}),{startAngle:a,endAngle:r,innerRadius:l,outerRadius:c,circumference:h}=this.getProps(["startAngle","endAngle","innerRadius","outerRadius","circumference"],i),d=this.options.spacing/2;return(h>=_t||Ht(o,a,r))&&(s>=l+d&&s<=c+d)}getCenterPoint(t){const{x:e,y:i,startAngle:n,endAngle:o,innerRadius:s,outerRadius:a}=this.getProps(["x","y","startAngle","endAngle","innerRadius","outerRadius","circumference"],t),{offset:r,spacing:l}=this.options,c=(n+o)/2,h=(s+a+l+r)/2;return{x:e+Math.cos(c)*h,y:i+Math.sin(c)*h}}tooltipPosition(t){return this.getCenterPoint(t)}draw(t){const e=this,{options:i,circumference:n}=e,o=(i.offset||0)/2,s=(i.spacing||0)/2;if(e.pixelMargin="inner"===i.borderAlign?.33:0,e.fullCircles=n>_t?Math.floor(n/_t):0,0===n||e.innerRadius<0||e.outerRadius<0)return;t.save();let a=0;if(o){a=o/2;const i=(e.startAngle+e.endAngle)/2;t.translate(Math.cos(i)*a,Math.sin(i)*a),e.circumference>=bt&&(a=o)}t.fillStyle=i.backgroundColor,t.strokeStyle=i.borderColor;const r=function(t,e,i,n){const{fullCircles:o,startAngle:s,circumference:a}=e;let r=e.endAngle;if(o){Mo(t,e,i,n,s+_t);for(let e=0;e<o;++e)t.fill();isNaN(a)||(r=s+a%_t,a%_t==0&&(r+=_t))}return Mo(t,e,i,n,r),t.fill(),r}(t,e,a,s);ko(t,e,a,s,r),t.restore()}}function Po(t,e,i=e){t.lineCap=K(i.borderCapStyle,e.borderCapStyle),t.setLineDash(K(i.borderDash,e.borderDash)),t.lineDashOffset=K(i.borderDashOffset,e.borderDashOffset),t.lineJoin=K(i.borderJoinStyle,e.borderJoinStyle),t.lineWidth=K(i.borderWidth,e.borderWidth),t.strokeStyle=K(i.borderColor,e.borderColor)}function Do(t,e,i){t.lineTo(i.x,i.y)}function Co(t,e,i={}){const n=t.length,{start:o=0,end:s=n-1}=i,{start:a,end:r}=e,l=Math.max(o,a),c=Math.min(s,r),h=o<a&&s<a||o>r&&s>r;return{count:n,start:l,loop:e.loop,ilen:c<l&&!h?n+c-l:c-l}}function Oo(t,e,i,n){const{points:o,options:s}=e,{count:a,start:r,loop:l,ilen:c}=Co(o,i,n),h=function(t){return t.stepped?Jt:t.tension||"monotone"===t.cubicInterpolationMode?te:Do}(s);let d,u,f,{move:g=!0,reverse:p}=n||{};for(d=0;d<=c;++d)u=o[(r+(p?c-d:d))%a],u.skip||(g?(t.moveTo(u.x,u.y),g=!1):h(t,f,u,p,s.stepped),f=u);return l&&(u=o[(r+(p?c:0))%a],h(t,f,u,p,s.stepped)),!!l}function To(t,e,i,n){const o=e.points,{count:s,start:a,ilen:r}=Co(o,i,n),{move:l=!0,reverse:c}=n||{};let h,d,u,f,g,p,m=0,x=0;const b=t=>(a+(c?r-t:t))%s,_=()=>{f!==g&&(t.lineTo(m,g),t.lineTo(m,f),t.lineTo(m,p))};for(l&&(d=o[b(0)],t.moveTo(d.x,d.y)),h=0;h<=r;++h){if(d=o[b(h)],d.skip)continue;const e=d.x,i=d.y,n=0|e;n===u?(i<f?f=i:i>g&&(g=i),m=(x*m+e)/++x):(_(),t.lineTo(e,i),u=n,x=0,f=g=i),p=i}_()}function Ao(t){const e=t.options,i=e.borderDash&&e.borderDash.length;return!(t._decimated||t._loop||e.tension||"monotone"===e.cubicInterpolationMode||e.stepped||i)?To:Oo}So.id="arc",So.defaults={borderAlign:"center",borderColor:"#fff",borderRadius:0,borderWidth:2,offset:0,spacing:0,angle:void 0},So.defaultRoutes={backgroundColor:"backgroundColor"};const Lo="function"==typeof Path2D;function Ro(t,e,i,n){Lo&&1===e.segments.length?function(t,e,i,n){let o=e._path;o||(o=e._path=new Path2D,e.path(o,i,n)&&o.closePath()),Po(t,e.options),t.stroke(o)}(t,e,i,n):function(t,e,i,n){const{segments:o,options:s}=e,a=Ao(e);for(const r of o)Po(t,s,r.style),t.beginPath(),a(t,e,r,{start:i,end:i+n-1})&&t.closePath(),t.stroke()}(t,e,i,n)}class Eo extends Ei{constructor(t){super(),this.animated=!0,this.options=void 0,this._loop=void 0,this._fullLoop=void 0,this._path=void 0,this._points=void 0,this._segments=void 0,this._decimated=!1,this._pointsUpdated=!1,t&&Object.assign(this,t)}updateControlPoints(t,e){const i=this,n=i.options;if((n.tension||"monotone"===n.cubicInterpolationMode)&&!n.stepped&&!i._pointsUpdated){const o=n.spanGaps?i._loop:i._fullLoop;gn(i._points,n,t,o,e),i._pointsUpdated=!0}}set points(t){const e=this;e._points=t,delete e._segments,delete e._path,e._pointsUpdated=!1}get points(){return this._points}get segments(){return this._segments||(this._segments=Sn(this,this.options.segment))}first(){const t=this.segments,e=this.points;return t.length&&e[t[0].start]}last(){const t=this.segments,e=this.points,i=t.length;return i&&e[t[i-1].end]}interpolate(t,e){const i=this,n=i.options,o=t[e],s=i.points,a=kn(i,{property:e,start:o,end:o});if(!a.length)return;const r=[],l=function(t){return t.stepped?mn:t.tension||"monotone"===t.cubicInterpolationMode?xn:pn}(n);let c,h;for(c=0,h=a.length;c<h;++c){const{start:i,end:h}=a[c],d=s[i],u=s[h];if(d===u){r.push(d);continue}const f=l(d,u,Math.abs((o-d[e])/(u[e]-d[e])),n.stepped);f[e]=t[e],r.push(f)}return 1===r.length?r[0]:r}pathSegment(t,e,i){return Ao(this)(t,this,e,i)}path(t,e,i){const n=this,o=n.segments,s=Ao(n);let a=n._loop;e=e||0,i=i||n.points.length-e;for(const r of o)a&=s(t,n,r,{start:e,end:e+i-1});return!!a}draw(t,e,i,n){const o=this,s=o.options||{};(o.points||[]).length&&s.borderWidth&&(t.save(),Ro(t,o,i,n),t.restore(),o.animated&&(o._pointsUpdated=!1,o._path=void 0))}}function Io(t,e,i,n){const o=t.options,{[i]:s}=t.getProps([i],n);return Math.abs(e-s)<o.radius+o.hitRadius}Eo.id="line",Eo.defaults={borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderWidth:3,capBezierPoints:!0,cubicInterpolationMode:"default",fill:!1,spanGaps:!1,stepped:!1,tension:0},Eo.defaultRoutes={backgroundColor:"backgroundColor",borderColor:"borderColor"},Eo.descriptors={_scriptable:!0,_indexable:t=>"borderDash"!==t&&"fill"!==t};class zo extends Ei{constructor(t){super(),this.options=void 0,this.parsed=void 0,this.skip=void 0,this.stop=void 0,t&&Object.assign(this,t)}inRange(t,e,i){const n=this.options,{x:o,y:s}=this.getProps(["x","y"],i);return Math.pow(t-o,2)+Math.pow(e-s,2)<Math.pow(n.hitRadius+n.radius,2)}inXRange(t,e){return Io(this,t,"x",e)}inYRange(t,e){return Io(this,t,"y",e)}getCenterPoint(t){const{x:e,y:i}=this.getProps(["x","y"],t);return{x:e,y:i}}size(t){let e=(t=t||this.options||{}).radius||0;e=Math.max(e,e&&t.hoverRadius||0);return 2*(e+(e&&t.borderWidth||0))}draw(t){const e=this,i=e.options;e.skip||i.radius<.1||(t.strokeStyle=i.borderColor,t.lineWidth=i.borderWidth,t.fillStyle=i.backgroundColor,Kt(t,i,e.x,e.y))}getRange(){const t=this.options||{};return t.radius+t.hitRadius}}function Fo(t,e){const{x:i,y:n,base:o,width:s,height:a}=t.getProps(["x","y","base","width","height"],e);let r,l,c,h,d;return t.horizontal?(d=a/2,r=Math.min(i,o),l=Math.max(i,o),c=n-d,h=n+d):(d=s/2,r=i-d,l=i+d,c=Math.min(n,o),h=Math.max(n,o)),{left:r,top:c,right:l,bottom:h}}function Vo(t){let e=t.options.borderSkipped;const i={};return e?(e=t.horizontal?Bo(e,"left","right",t.base>t.x):Bo(e,"bottom","top",t.base<t.y),i[e]=!0,i):i}function Bo(t,e,i,n){var o,s,a;return n?(a=i,t=Wo(t=(o=t)===(s=e)?a:o===a?s:o,i,e)):t=Wo(t,e,i),t}function Wo(t,e,i){return"start"===t?e:"end"===t?i:t}function Ho(t,e,i,n){return t?0:Math.max(Math.min(e,n),i)}function No(t){const e=Fo(t),i=e.right-e.left,n=e.bottom-e.top,o=function(t,e,i){const n=t.options.borderWidth,o=Vo(t),s=Ee(n);return{t:Ho(o.top,s.top,0,i),r:Ho(o.right,s.right,0,e),b:Ho(o.bottom,s.bottom,0,i),l:Ho(o.left,s.left,0,e)}}(t,i/2,n/2),s=function(t,e,i){const{enableBorderRadius:n}=t.getProps(["enableBorderRadius"]),o=t.options.borderRadius,s=Ie(o),a=Math.min(e,i),r=Vo(t),l=n||U(o);return{topLeft:Ho(!l||r.top||r.left,s.topLeft,0,a),topRight:Ho(!l||r.top||r.right,s.topRight,0,a),bottomLeft:Ho(!l||r.bottom||r.left,s.bottomLeft,0,a),bottomRight:Ho(!l||r.bottom||r.right,s.bottomRight,0,a)}}(t,i/2,n/2);return{outer:{x:e.left,y:e.top,w:i,h:n,radius:s},inner:{x:e.left+o.l,y:e.top+o.t,w:i-o.l-o.r,h:n-o.t-o.b,radius:{topLeft:Math.max(0,s.topLeft-Math.max(o.t,o.l)),topRight:Math.max(0,s.topRight-Math.max(o.t,o.r)),bottomLeft:Math.max(0,s.bottomLeft-Math.max(o.b,o.l)),bottomRight:Math.max(0,s.bottomRight-Math.max(o.b,o.r))}}}}function jo(t,e,i,n){const o=null===e,s=null===i,a=t&&!(o&&s)&&Fo(t,n);return a&&(o||e>=a.left&&e<=a.right)&&(s||i>=a.top&&i<=a.bottom)}function $o(t,e){t.rect(e.x,e.y,e.w,e.h)}zo.id="point",zo.defaults={borderWidth:1,hitRadius:1,hoverBorderWidth:1,hoverRadius:4,pointStyle:"circle",radius:3,rotation:0},zo.defaultRoutes={backgroundColor:"backgroundColor",borderColor:"borderColor"};class Yo extends Ei{constructor(t){super(),this.options=void 0,this.horizontal=void 0,this.base=void 0,this.width=void 0,this.height=void 0,t&&Object.assign(this,t)}draw(t){const e=this.options,{inner:i,outer:n}=No(this),o=(s=n.radius).topLeft||s.topRight||s.bottomLeft||s.bottomRight?ie:$o;var s;t.save(),n.w===i.w&&n.h===i.h||(t.beginPath(),o(t,n),t.clip(),o(t,i),t.fillStyle=e.borderColor,t.fill("evenodd")),t.beginPath(),o(t,i),t.fillStyle=e.backgroundColor,t.fill(),t.restore()}inRange(t,e,i){return jo(this,t,e,i)}inXRange(t,e){return jo(this,t,null,e)}inYRange(t,e){return jo(this,null,t,e)}getCenterPoint(t){const{x:e,y:i,base:n,horizontal:o}=this.getProps(["x","y","base","horizontal"],t);return{x:o?(e+n)/2:e,y:o?i:(i+n)/2}}getRange(t){return"x"===t?this.width/2:this.height/2}}Yo.id="bar",Yo.defaults={borderSkipped:"start",borderWidth:0,borderRadius:0,enableBorderRadius:!0,pointStyle:void 0},Yo.defaultRoutes={backgroundColor:"backgroundColor",borderColor:"borderColor"};var Uo=Object.freeze({__proto__:null,ArcElement:So,LineElement:Eo,PointElement:zo,BarElement:Yo});function Xo(t){if(t._decimated){const e=t._data;delete t._decimated,delete t._data,Object.defineProperty(t,"data",{value:e})}}function qo(t){t.data.datasets.forEach((t=>{Xo(t)}))}var Ko={id:"decimation",defaults:{algorithm:"min-max",enabled:!1},beforeElementsUpdate:(t,e,i)=>{if(!i.enabled)return void qo(t);const n=t.width;t.data.datasets.forEach(((e,o)=>{const{_data:s,indexAxis:a}=e,r=t.getDatasetMeta(o),l=s||e.data;if("y"===Ve([a,t.options.indexAxis]))return;if("line"!==r.type)return;const c=t.scales[r.xAxisID];if("linear"!==c.type&&"time"!==c.type)return;if(t.options.parsing)return;let h,{start:d,count:u}=function(t,e){const i=e.length;let n,o=0;const{iScale:s}=t,{min:a,max:r,minDefined:l,maxDefined:c}=s.getUserBounds();return l&&(o=Nt(oe(e,s.axis,a).lo,0,i-1)),n=c?Nt(oe(e,s.axis,r).hi+1,o,i)-o:i-o,{start:o,count:n}}(r,l);if(u<=4*n)Xo(e);else{switch($(s)&&(e._data=l,delete e.data,Object.defineProperty(e,"data",{configurable:!0,enumerable:!0,get:function(){return this._decimated},set:function(t){this._data=t}})),i.algorithm){case"lttb":h=function(t,e,i,n,o){const s=o.samples||n;if(s>=i)return t.slice(e,e+i);const a=[],r=(i-2)/(s-2);let l=0;const c=e+i-1;let h,d,u,f,g,p=e;for(a[l++]=t[p],h=0;h<s-2;h++){let n,o=0,s=0;const c=Math.floor((h+1)*r)+1+e,m=Math.min(Math.floor((h+2)*r)+1,i)+e,x=m-c;for(n=c;n<m;n++)o+=t[n].x,s+=t[n].y;o/=x,s/=x;const b=Math.floor(h*r)+1+e,_=Math.floor((h+1)*r)+1+e,{x:y,y:v}=t[p];for(u=f=-1,n=b;n<_;n++)f=.5*Math.abs((y-o)*(t[n].y-v)-(y-t[n].x)*(s-v)),f>u&&(u=f,d=t[n],g=n);a[l++]=d,p=g}return a[l++]=t[c],a}(l,d,u,n,i);break;case"min-max":h=function(t,e,i,n){let o,s,a,r,l,c,h,d,u,f,g=0,p=0;const m=[],x=e+i-1,b=t[e].x,_=t[x].x-b;for(o=e;o<e+i;++o){s=t[o],a=(s.x-b)/_*n,r=s.y;const e=0|a;if(e===l)r<u?(u=r,c=o):r>f&&(f=r,h=o),g=(p*g+s.x)/++p;else{const i=o-1;if(!$(c)&&!$(h)){const e=Math.min(c,h),n=Math.max(c,h);e!==d&&e!==i&&m.push({...t[e],x:g}),n!==d&&n!==i&&m.push({...t[n],x:g})}o>0&&i!==d&&m.push(t[i]),m.push(s),l=e,p=0,u=f=r,c=h=d=o}}return m}(l,d,u,n);break;default:throw new Error(`Unsupported decimation algorithm '${i.algorithm}'`)}e._decimated=h}}))},destroy(t){qo(t)}};function Go(t,e,i){const n=function(t){const e=t.options,i=e.fill;let n=K(i&&i.target,i);return void 0===n&&(n=!!e.backgroundColor),!1!==n&&null!==n&&(!0===n?"origin":n)}(t);if(U(n))return!isNaN(n.value)&&n;let o=parseFloat(n);return X(o)&&Math.floor(o)===o?("-"!==n[0]&&"+"!==n[0]||(o=e+o),!(o===e||o<0||o>=i)&&o):["origin","start","end","stack"].indexOf(n)>=0&&n}class Zo{constructor(t){this.x=t.x,this.y=t.y,this.radius=t.radius}pathSegment(t,e,i){const{x:n,y:o,radius:s}=this;return e=e||{start:0,end:_t},t.arc(n,o,s,e.end,e.start,!0),!i.bounds}interpolate(t){const{x:e,y:i,radius:n}=this,o=t.angle;return{x:e+Math.cos(o)*n,y:i+Math.sin(o)*n,angle:o}}}function Qo(t){return(t.scale||{}).getPointPositionForValue?function(t){const{scale:e,fill:i}=t,n=e.options,o=e.getLabels().length,s=[],a=n.reverse?e.max:e.min,r=n.reverse?e.min:e.max;let l,c,h;if(h="start"===i?a:"end"===i?r:U(i)?i.value:e.getBaseValue(),n.grid.circular)return c=e.getPointPositionForValue(0,a),new Zo({x:c.x,y:c.y,radius:e.getDistanceFromCenterForValue(h)});for(l=0;l<o;++l)s.push(e.getPointPositionForValue(l,h));return s}(t):function(t){const{scale:e={},fill:i}=t;let n,o=null;return"start"===i?o=e.bottom:"end"===i?o=e.top:U(i)?o=e.getPixelForValue(i.value):e.getBasePixel&&(o=e.getBasePixel()),X(o)?(n=e.isHorizontal(),{x:n?o:null,y:n?null:o}):null}(t)}function Jo(t){const{chart:e,scale:i,index:n,line:o}=t,s=[],a=o.segments,r=o.points,l=function(t,e){const i=[],n=t.getSortedVisibleDatasetMetas();for(let t=0;t<n.length;t++){const o=n[t];if(o.index===e)break;ts(o)&&i.unshift(o.dataset)}return i}(e,n);l.push(ns({x:null,y:i.bottom},o));for(let t=0;t<a.length;t++){const e=a[t];for(let t=e.start;t<=e.end;t++)es(s,r[t],l)}return new Eo({points:s,options:{}})}const ts=t=>"line"===t.type&&!t.hidden;function es(t,e,i){const n=[];for(let o=0;o<i.length;o++){const s=i[o],{first:a,last:r,point:l}=is(s,e,"x");if(!(!l||a&&r))if(a)n.unshift(l);else if(t.push(l),!r)break}t.push(...n)}function is(t,e,i){const n=t.interpolate(e,i);if(!n)return{};const o=n[i],s=t.segments,a=t.points;let r=!1,l=!1;for(let t=0;t<s.length;t++){const e=s[t],n=a[e.start][i],c=a[e.end][i];if(o>=n&&o<=c){r=o===n,l=o===c;break}}return{first:r,last:l,point:n}}function ns(t,e){let i=[],n=!1;return Y(t)?(n=!0,i=t):i=function(t,e){const{x:i=null,y:n=null}=t||{},o=e.points,s=[];return e.segments.forEach((t=>{const e=o[t.start],a=o[t.end];null!==n?(s.push({x:e.x,y:n}),s.push({x:a.x,y:n})):null!==i&&(s.push({x:i,y:e.y}),s.push({x:i,y:a.y}))})),s}(t,e),i.length?new Eo({points:i,options:{tension:0},_loop:n,_fullLoop:n}):null}function os(t,e,i){let n=t[e].fill;const o=[e];let s;if(!i)return n;for(;!1!==n&&-1===o.indexOf(n);){if(!X(n))return n;if(s=t[n],!s)return!1;if(s.visible)return n;o.push(n),n=s.fill}return!1}function ss(t,e,i){t.beginPath(),e.path(t),t.lineTo(e.last().x,i),t.lineTo(e.first().x,i),t.closePath(),t.clip()}function as(t,e,i,n){if(n)return;let o=e[t],s=i[t];return"angle"===t&&(o=Wt(o),s=Wt(s)),{property:t,start:o,end:s}}function rs(t,e,i,n){return t&&e?n(t[i],e[i]):t?t[i]:e?e[i]:0}function ls(t,e,i){const{top:n,bottom:o}=e.chart.chartArea,{property:s,start:a,end:r}=i||{};"x"===s&&(t.beginPath(),t.rect(a,n,r-a,o-n),t.clip())}function cs(t,e,i,n){const o=e.interpolate(i,n);o&&t.lineTo(o.x,o.y)}function hs(t,e){const{line:i,target:n,property:o,color:s,scale:a}=e,r=function(t,e,i){const n=t.segments,o=t.points,s=e.points,a=[];for(const t of n){const n=as(i,o[t.start],o[t.end],t.loop);if(!e.segments){a.push({source:t,target:n,start:o[t.start],end:o[t.end]});continue}const r=kn(e,n);for(const e of r){const r=as(i,s[e.start],s[e.end],e.loop),l=Mn(t,o,r);for(const t of l)a.push({source:t,target:e,start:{[i]:rs(n,r,"start",Math.max)},end:{[i]:rs(n,r,"end",Math.min)}})}}return a}(i,n,o);for(const{source:e,target:l,start:c,end:h}of r){const{style:{backgroundColor:r=s}={}}=e;t.save(),t.fillStyle=r,ls(t,a,as(o,c,h)),t.beginPath();const d=!!i.pathSegment(t,e);d?t.closePath():cs(t,n,h,o);const u=!!n.pathSegment(t,l,{move:d,reverse:!0}),f=d&&u;f||cs(t,n,c,o),t.closePath(),t.fill(f?"evenodd":"nonzero"),t.restore()}}function ds(t,e,i){const n=function(t){const{chart:e,fill:i,line:n}=t;if(X(i))return function(t,e){const i=t.getDatasetMeta(e);return i&&t.isDatasetVisible(e)?i.dataset:null}(e,i);if("stack"===i)return Jo(t);const o=Qo(t);return o instanceof Zo?o:ns(o,n)}(e),{line:o,scale:s,axis:a}=e,r=o.options,l=r.fill,c=r.backgroundColor,{above:h=c,below:d=c}=l||{};n&&o.points.length&&(Zt(t,i),function(t,e){const{line:i,target:n,above:o,below:s,area:a,scale:r}=e,l=i._loop?"angle":e.axis;t.save(),"x"===l&&s!==o&&(ss(t,n,a.top),hs(t,{line:i,target:n,color:o,scale:r,property:l}),t.restore(),t.save(),ss(t,n,a.bottom)),hs(t,{line:i,target:n,color:s,scale:r,property:l}),t.restore()}(t,{line:o,target:n,above:h,below:d,area:i,scale:s,axis:a}),Qt(t))}var us={id:"filler",afterDatasetsUpdate(t,e,i){const n=(t.data.datasets||[]).length,o=[];let s,a,r,l;for(a=0;a<n;++a)s=t.getDatasetMeta(a),r=s.dataset,l=null,r&&r.options&&r instanceof Eo&&(l={visible:t.isDatasetVisible(a),index:a,fill:Go(r,a,n),chart:t,axis:s.controller.options.indexAxis,scale:s.vScale,line:r}),s.$filler=l,o.push(l);for(a=0;a<n;++a)l=o[a],l&&!1!==l.fill&&(l.fill=os(o,a,i.propagate))},beforeDraw(t,e,i){const n="beforeDraw"===i.drawTime,o=t.getSortedVisibleDatasetMetas(),s=t.chartArea;for(let e=o.length-1;e>=0;--e){const i=o[e].$filler;i&&(i.line.updateControlPoints(s,i.axis),n&&ds(t.ctx,i,s))}},beforeDatasetsDraw(t,e,i){if("beforeDatasetsDraw"!==i.drawTime)return;const n=t.getSortedVisibleDatasetMetas();for(let e=n.length-1;e>=0;--e){const i=n[e].$filler;i&&ds(t.ctx,i,t.chartArea)}},beforeDatasetDraw(t,e,i){const n=e.meta.$filler;n&&!1!==n.fill&&"beforeDatasetDraw"===i.drawTime&&ds(t.ctx,n,t.chartArea)},defaults:{propagate:!0,drawTime:"beforeDatasetDraw"}};const fs=(t,e)=>{let{boxHeight:i=e,boxWidth:n=e}=t;return t.usePointStyle&&(i=Math.min(i,e),n=Math.min(n,e)),{boxWidth:n,boxHeight:i,itemHeight:Math.max(e,i)}};class gs extends Ei{constructor(t){super(),this._added=!1,this.legendHitBoxes=[],this._hoveredItem=null,this.doughnutMode=!1,this.chart=t.chart,this.options=t.options,this.ctx=t.ctx,this.legendItems=void 0,this.columnSizes=void 0,this.lineWidths=void 0,this.maxHeight=void 0,this.maxWidth=void 0,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.height=void 0,this.width=void 0,this._margins=void 0,this.position=void 0,this.weight=void 0,this.fullSize=void 0}update(t,e,i){const n=this;n.maxWidth=t,n.maxHeight=e,n._margins=i,n.setDimensions(),n.buildLabels(),n.fit()}setDimensions(){const t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height)}buildLabels(){const t=this,e=t.options.labels||{};let i=Q(e.generateLabels,[t.chart],t)||[];e.filter&&(i=i.filter((i=>e.filter(i,t.chart.data)))),e.sort&&(i=i.sort(((i,n)=>e.sort(i,n,t.chart.data)))),t.options.reverse&&i.reverse(),t.legendItems=i}fit(){const t=this,{options:e,ctx:i}=t;if(!e.display)return void(t.width=t.height=0);const n=e.labels,o=Fe(n.font),s=o.size,a=t._computeTitleHeight(),{boxWidth:r,itemHeight:l}=fs(n,s);let c,h;i.font=o.string,t.isHorizontal()?(c=t.maxWidth,h=t._fitRows(a,s,r,l)+10):(h=t.maxHeight,c=t._fitCols(a,s,r,l)+10),t.width=Math.min(c,e.maxWidth||t.maxWidth),t.height=Math.min(h,e.maxHeight||t.maxHeight)}_fitRows(t,e,i,n){const o=this,{ctx:s,maxWidth:a,options:{labels:{padding:r}}}=o,l=o.legendHitBoxes=[],c=o.lineWidths=[0],h=n+r;let d=t;s.textAlign="left",s.textBaseline="middle";let u=-1,f=-h;return o.legendItems.forEach(((t,o)=>{const g=i+e/2+s.measureText(t.text).width;(0===o||c[c.length-1]+g+2*r>a)&&(d+=h,c[c.length-(o>0?0:1)]=0,f+=h,u++),l[o]={left:0,top:f,row:u,width:g,height:n},c[c.length-1]+=g+r})),d}_fitCols(t,e,i,n){const o=this,{ctx:s,maxHeight:a,options:{labels:{padding:r}}}=o,l=o.legendHitBoxes=[],c=o.columnSizes=[],h=a-t;let d=r,u=0,f=0,g=0,p=0,m=0;return o.legendItems.forEach(((t,o)=>{const a=i+e/2+s.measureText(t.text).width;o>0&&f+e+2*r>h&&(d+=u+r,c.push({width:u,height:f}),g+=u+r,m++,p=0,u=f=0),u=Math.max(u,a),f+=e+r,l[o]={left:g,top:p,col:m,width:a,height:n},p+=n+r})),d+=u,c.push({width:u,height:f}),d}adjustHitBoxes(){const t=this;if(!t.options.display)return;const e=t._computeTitleHeight(),{legendHitBoxes:i,options:{align:n,labels:{padding:s}}}=t;if(this.isHorizontal()){let a=0,r=o(n,t.left+s,t.right-t.lineWidths[a]);for(const l of i)a!==l.row&&(a=l.row,r=o(n,t.left+s,t.right-t.lineWidths[a])),l.top+=t.top+e+s,l.left=r,r+=l.width+s}else{let a=0,r=o(n,t.top+e+s,t.bottom-t.columnSizes[a].height);for(const l of i)l.col!==a&&(a=l.col,r=o(n,t.top+e+s,t.bottom-t.columnSizes[a].height)),l.top=r,l.left+=t.left+s,r+=l.height+s}}isHorizontal(){return"top"===this.options.position||"bottom"===this.options.position}draw(){const t=this;if(t.options.display){const e=t.ctx;Zt(e,t),t._draw(),Qt(e)}}_draw(){const t=this,{options:e,columnSizes:i,lineWidths:n,ctx:a}=t,{align:r,labels:l}=e,c=xt.color,h=bn(e.rtl,t.left,t.width),d=Fe(l.font),{color:u,padding:f}=l,g=d.size,p=g/2;let m;t.drawTitle(),a.textAlign=h.textAlign("left"),a.textBaseline="middle",a.lineWidth=.5,a.font=d.string;const{boxWidth:x,boxHeight:b,itemHeight:_}=fs(l,g),y=t.isHorizontal(),v=this._computeTitleHeight();m=y?{x:o(r,t.left+f,t.right-n[0]),y:t.top+f+v,line:0}:{x:t.left+f,y:o(r,t.top+v+f,t.bottom-i[0].height),line:0},_n(t.ctx,e.textDirection);const w=_+f;t.legendItems.forEach(((e,M)=>{a.strokeStyle=e.fontColor||u,a.fillStyle=e.fontColor||u;const k=a.measureText(e.text).width,S=h.textAlign(e.textAlign||(e.textAlign=l.textAlign)),P=x+g/2+k;let D=m.x,C=m.y;h.setWidth(t.width),y?M>0&&D+P+f>t.right&&(C=m.y+=w,m.line++,D=m.x=o(r,t.left+f,t.right-n[m.line])):M>0&&C+w>t.bottom&&(D=m.x=D+i[m.line].width+f,m.line++,C=m.y=o(r,t.top+v+f,t.bottom-i[m.line].height));!function(t,e,i){if(isNaN(x)||x<=0||isNaN(b)||b<0)return;a.save();const n=K(i.lineWidth,1);if(a.fillStyle=K(i.fillStyle,c),a.lineCap=K(i.lineCap,"butt"),a.lineDashOffset=K(i.lineDashOffset,0),a.lineJoin=K(i.lineJoin,"miter"),a.lineWidth=n,a.strokeStyle=K(i.strokeStyle,c),a.setLineDash(K(i.lineDash,[])),l.usePointStyle){const o={radius:x*Math.SQRT2/2,pointStyle:i.pointStyle,rotation:i.rotation,borderWidth:n},s=h.xPlus(t,x/2);Kt(a,o,s,e+p)}else{const o=e+Math.max((g-b)/2,0),s=h.leftForLtr(t,x),r=Ie(i.borderRadius);a.beginPath(),Object.values(r).some((t=>0!==t))?ie(a,{x:s,y:o,w:x,h:b,radius:r}):a.rect(s,o,x,b),a.fill(),0!==n&&a.stroke()}a.restore()}(h.x(D),C,e),D=s(S,D+x+p,t.right),function(t,e,i){ee(a,i.text,t,e+_/2,d,{strikethrough:i.hidden,textAlign:i.textAlign})}(h.x(D),C,e),y?m.x+=P+f:m.y+=w})),yn(t.ctx,e.textDirection)}drawTitle(){const t=this,e=t.options,i=e.title,s=Fe(i.font),a=ze(i.padding);if(!i.display)return;const r=bn(e.rtl,t.left,t.width),l=t.ctx,c=i.position,h=s.size/2,d=a.top+h;let u,f=t.left,g=t.width;if(this.isHorizontal())g=Math.max(...t.lineWidths),u=t.top+d,f=o(e.align,f,t.right-g);else{const i=t.columnSizes.reduce(((t,e)=>Math.max(t,e.height)),0);u=d+o(e.align,t.top,t.bottom-i-e.labels.padding-t._computeTitleHeight())}const p=o(c,f,f+g);l.textAlign=r.textAlign(n(c)),l.textBaseline="middle",l.strokeStyle=i.color,l.fillStyle=i.color,l.font=s.string,ee(l,i.text,p,u,s)}_computeTitleHeight(){const t=this.options.title,e=Fe(t.font),i=ze(t.padding);return t.display?e.lineHeight+i.height:0}_getLegendItemAt(t,e){const i=this;let n,o,s;if(t>=i.left&&t<=i.right&&e>=i.top&&e<=i.bottom)for(s=i.legendHitBoxes,n=0;n<s.length;++n)if(o=s[n],t>=o.left&&t<=o.left+o.width&&e>=o.top&&e<=o.top+o.height)return i.legendItems[n];return null}handleEvent(t){const e=this,i=e.options;if(!function(t,e){if("mousemove"===t&&(e.onHover||e.onLeave))return!0;if(e.onClick&&("click"===t||"mouseup"===t))return!0;return!1}(t.type,i))return;const n=e._getLegendItemAt(t.x,t.y);if("mousemove"===t.type){const a=e._hoveredItem,r=(s=n,null!==(o=a)&&null!==s&&o.datasetIndex===s.datasetIndex&&o.index===s.index);a&&!r&&Q(i.onLeave,[t,a,e],e),e._hoveredItem=n,n&&!r&&Q(i.onHover,[t,n,e],e)}else n&&Q(i.onClick,[t,n,e],e);var o,s}}var ps={id:"legend",_element:gs,start(t,e,i){const n=t.legend=new gs({ctx:t.ctx,options:i,chart:t});Ge.configure(t,n,i),Ge.addBox(t,n)},stop(t){Ge.removeBox(t,t.legend),delete t.legend},beforeUpdate(t,e,i){const n=t.legend;Ge.configure(t,n,i),n.options=i},afterUpdate(t){const e=t.legend;e.buildLabels(),e.adjustHitBoxes()},afterEvent(t,e){e.replay||t.legend.handleEvent(e.event)},defaults:{display:!0,position:"top",align:"center",fullSize:!0,reverse:!1,weight:1e3,onClick(t,e,i){const n=e.datasetIndex,o=i.chart;o.isDatasetVisible(n)?(o.hide(n),e.hidden=!0):(o.show(n),e.hidden=!1)},onHover:null,onLeave:null,labels:{color:t=>t.chart.options.color,boxWidth:40,padding:10,generateLabels(t){const e=t.data.datasets,{labels:{usePointStyle:i,pointStyle:n,textAlign:o,color:s}}=t.legend.options;return t._getSortedDatasetMetas().map((t=>{const a=t.controller.getStyle(i?0:void 0),r=ze(a.borderWidth);return{text:e[t.index].label,fillStyle:a.backgroundColor,fontColor:s,hidden:!t.visible,lineCap:a.borderCapStyle,lineDash:a.borderDash,lineDashOffset:a.borderDashOffset,lineJoin:a.borderJoinStyle,lineWidth:(r.width+r.height)/4,strokeStyle:a.borderColor,pointStyle:n||a.pointStyle,rotation:a.rotation,textAlign:o||a.textAlign,borderRadius:0,datasetIndex:t.index}}),this)}},title:{color:t=>t.chart.options.color,display:!1,position:"center",text:""}},descriptors:{_scriptable:t=>!t.startsWith("on"),labels:{_scriptable:t=>!["generateLabels","filter","sort"].includes(t)}}};class ms extends Ei{constructor(t){super(),this.chart=t.chart,this.options=t.options,this.ctx=t.ctx,this._padding=void 0,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.width=void 0,this.height=void 0,this.position=void 0,this.weight=void 0,this.fullSize=void 0}update(t,e){const i=this,n=i.options;if(i.left=0,i.top=0,!n.display)return void(i.width=i.height=i.right=i.bottom=0);i.width=i.right=t,i.height=i.bottom=e;const o=Y(n.text)?n.text.length:1;i._padding=ze(n.padding);const s=o*Fe(n.font).lineHeight+i._padding.height;i.isHorizontal()?i.height=s:i.width=s}isHorizontal(){const t=this.options.position;return"top"===t||"bottom"===t}_drawArgs(t){const{top:e,left:i,bottom:n,right:s,options:a}=this,r=a.align;let l,c,h,d=0;return this.isHorizontal()?(c=o(r,i,s),h=e+t,l=s-i):("left"===a.position?(c=i+t,h=o(r,n,e),d=-.5*bt):(c=s-t,h=o(r,e,n),d=.5*bt),l=n-e),{titleX:c,titleY:h,maxWidth:l,rotation:d}}draw(){const t=this,e=t.ctx,i=t.options;if(!i.display)return;const o=Fe(i.font),s=o.lineHeight/2+t._padding.top,{titleX:a,titleY:r,maxWidth:l,rotation:c}=t._drawArgs(s);ee(e,i.text,0,0,o,{color:i.color,maxWidth:l,rotation:c,textAlign:n(i.align),textBaseline:"middle",translation:[a,r]})}}var xs={id:"title",_element:ms,start(t,e,i){!function(t,e){const i=new ms({ctx:t.ctx,options:e,chart:t});Ge.configure(t,i,e),Ge.addBox(t,i),t.titleBlock=i}(t,i)},stop(t){const e=t.titleBlock;Ge.removeBox(t,e),delete t.titleBlock},beforeUpdate(t,e,i){const n=t.titleBlock;Ge.configure(t,n,i),n.options=i},defaults:{align:"center",display:!1,font:{weight:"bold"},fullSize:!0,padding:10,position:"top",text:"",weight:2e3},defaultRoutes:{color:"color"},descriptors:{_scriptable:!0,_indexable:!1}};const bs={average(t){if(!t.length)return!1;let e,i,n=0,o=0,s=0;for(e=0,i=t.length;e<i;++e){const i=t[e].element;if(i&&i.hasValue()){const t=i.tooltipPosition();n+=t.x,o+=t.y,++s}}return{x:n/s,y:o/s}},nearest(t,e){if(!t.length)return!1;let i,n,o,s=e.x,a=e.y,r=Number.POSITIVE_INFINITY;for(i=0,n=t.length;i<n;++i){const n=t[i].element;if(n&&n.hasValue()){const t=Vt(e,n.getCenterPoint());t<r&&(r=t,o=n)}}if(o){const t=o.tooltipPosition();s=t.x,a=t.y}return{x:s,y:a}}};function _s(t,e){return e&&(Y(e)?Array.prototype.push.apply(t,e):t.push(e)),t}function ys(t){return("string"==typeof t||t instanceof String)&&t.indexOf("\n")>-1?t.split("\n"):t}function vs(t,e){const{element:i,datasetIndex:n,index:o}=e,s=t.getDatasetMeta(n).controller,{label:a,value:r}=s.getLabelAndValue(o);return{chart:t,label:a,parsed:s.getParsed(o),raw:t.data.datasets[n].data[o],formattedValue:r,dataset:s.getDataset(),dataIndex:o,datasetIndex:n,element:i}}function ws(t,e){const i=t._chart.ctx,{body:n,footer:o,title:s}=t,{boxWidth:a,boxHeight:r}=e,l=Fe(e.bodyFont),c=Fe(e.titleFont),h=Fe(e.footerFont),d=s.length,u=o.length,f=n.length,g=ze(e.padding);let p=g.height,m=0,x=n.reduce(((t,e)=>t+e.before.length+e.lines.length+e.after.length),0);if(x+=t.beforeBody.length+t.afterBody.length,d&&(p+=d*c.lineHeight+(d-1)*e.titleSpacing+e.titleMarginBottom),x){p+=f*(e.displayColors?Math.max(r,l.lineHeight):l.lineHeight)+(x-f)*l.lineHeight+(x-1)*e.bodySpacing}u&&(p+=e.footerMarginTop+u*h.lineHeight+(u-1)*e.footerSpacing);let b=0;const _=function(t){m=Math.max(m,i.measureText(t).width+b)};return i.save(),i.font=c.string,J(t.title,_),i.font=l.string,J(t.beforeBody.concat(t.afterBody),_),b=e.displayColors?a+2:0,J(n,(t=>{J(t.before,_),J(t.lines,_),J(t.after,_)})),b=0,i.font=h.string,J(t.footer,_),i.restore(),m+=g.width,{width:m,height:p}}function Ms(t,e,i,n){const{x:o,width:s}=i,{width:a,chartArea:{left:r,right:l}}=t;let c="center";return"center"===n?c=o<=(r+l)/2?"left":"right":o<=s/2?c="left":o>=a-s/2&&(c="right"),function(t,e,i,n){const{x:o,width:s}=n,a=i.caretSize+i.caretPadding;return"left"===t&&o+s+a>e.width||"right"===t&&o-s-a<0||void 0}(c,t,e,i)&&(c="center"),c}function ks(t,e,i){const n=e.yAlign||function(t,e){const{y:i,height:n}=e;return i<n/2?"top":i>t.height-n/2?"bottom":"center"}(t,i);return{xAlign:e.xAlign||Ms(t,e,i,n),yAlign:n}}function Ss(t,e,i,n){const{caretSize:o,caretPadding:s,cornerRadius:a}=t,{xAlign:r,yAlign:l}=i,c=o+s,h=a+s;let d=function(t,e){let{x:i,width:n}=t;return"right"===e?i-=n:"center"===e&&(i-=n/2),i}(e,r);const u=function(t,e,i){let{y:n,height:o}=t;return"top"===e?n+=i:n-="bottom"===e?o+i:o/2,n}(e,l,c);return"center"===l?"left"===r?d+=c:"right"===r&&(d-=c):"left"===r?d-=h:"right"===r&&(d+=h),{x:Nt(d,0,n.width-e.width),y:Nt(u,0,n.height-e.height)}}function Ps(t,e,i){const n=ze(i.padding);return"center"===e?t.x+t.width/2:"right"===e?t.x+t.width-n.right:t.x+n.left}function Ds(t){return _s([],ys(t))}function Cs(t,e){const i=e&&e.dataset&&e.dataset.tooltip&&e.dataset.tooltip.callbacks;return i?t.override(i):t}class Os extends Ei{constructor(t){super(),this.opacity=0,this._active=[],this._chart=t._chart,this._eventPosition=void 0,this._size=void 0,this._cachedAnimations=void 0,this._tooltipItems=[],this.$animations=void 0,this.$context=void 0,this.options=t.options,this.dataPoints=void 0,this.title=void 0,this.beforeBody=void 0,this.body=void 0,this.afterBody=void 0,this.footer=void 0,this.xAlign=void 0,this.yAlign=void 0,this.x=void 0,this.y=void 0,this.height=void 0,this.width=void 0,this.caretX=void 0,this.caretY=void 0,this.labelColors=void 0,this.labelPointStyles=void 0,this.labelTextColors=void 0}initialize(t){this.options=t,this._cachedAnimations=void 0,this.$context=void 0}_resolveAnimations(){const t=this,e=t._cachedAnimations;if(e)return e;const i=t._chart,n=t.options.setContext(t.getContext()),o=n.enabled&&i.options.animation&&n.animations,s=new vi(t._chart,o);return o._cacheable&&(t._cachedAnimations=Object.freeze(s)),s}getContext(){const t=this;return t.$context||(t.$context=(e=t._chart.getContext(),i=t,n=t._tooltipItems,Object.assign(Object.create(e),{tooltip:i,tooltipItems:n,type:"tooltip"})));var e,i,n}getTitle(t,e){const i=this,{callbacks:n}=e,o=n.beforeTitle.apply(i,[t]),s=n.title.apply(i,[t]),a=n.afterTitle.apply(i,[t]);let r=[];return r=_s(r,ys(o)),r=_s(r,ys(s)),r=_s(r,ys(a)),r}getBeforeBody(t,e){return Ds(e.callbacks.beforeBody.apply(this,[t]))}getBody(t,e){const i=this,{callbacks:n}=e,o=[];return J(t,(t=>{const e={before:[],lines:[],after:[]},s=Cs(n,t);_s(e.before,ys(s.beforeLabel.call(i,t))),_s(e.lines,s.label.call(i,t)),_s(e.after,ys(s.afterLabel.call(i,t))),o.push(e)})),o}getAfterBody(t,e){return Ds(e.callbacks.afterBody.apply(this,[t]))}getFooter(t,e){const i=this,{callbacks:n}=e,o=n.beforeFooter.apply(i,[t]),s=n.footer.apply(i,[t]),a=n.afterFooter.apply(i,[t]);let r=[];return r=_s(r,ys(o)),r=_s(r,ys(s)),r=_s(r,ys(a)),r}_createItems(t){const e=this,i=e._active,n=e._chart.data,o=[],s=[],a=[];let r,l,c=[];for(r=0,l=i.length;r<l;++r)c.push(vs(e._chart,i[r]));return t.filter&&(c=c.filter(((e,i,o)=>t.filter(e,i,o,n)))),t.itemSort&&(c=c.sort(((e,i)=>t.itemSort(e,i,n)))),J(c,(i=>{const n=Cs(t.callbacks,i);o.push(n.labelColor.call(e,i)),s.push(n.labelPointStyle.call(e,i)),a.push(n.labelTextColor.call(e,i))})),e.labelColors=o,e.labelPointStyles=s,e.labelTextColors=a,e.dataPoints=c,c}update(t,e){const i=this,n=i.options.setContext(i.getContext()),o=i._active;let s,a=[];if(o.length){const t=bs[n.position].call(i,o,i._eventPosition);a=i._createItems(n),i.title=i.getTitle(a,n),i.beforeBody=i.getBeforeBody(a,n),i.body=i.getBody(a,n),i.afterBody=i.getAfterBody(a,n),i.footer=i.getFooter(a,n);const e=i._size=ws(i,n),r=Object.assign({},t,e),l=ks(i._chart,n,r),c=Ss(n,r,l,i._chart);i.xAlign=l.xAlign,i.yAlign=l.yAlign,s={opacity:1,x:c.x,y:c.y,width:e.width,height:e.height,caretX:t.x,caretY:t.y}}else 0!==i.opacity&&(s={opacity:0});i._tooltipItems=a,i.$context=void 0,s&&i._resolveAnimations().update(i,s),t&&n.external&&n.external.call(i,{chart:i._chart,tooltip:i,replay:e})}drawCaret(t,e,i,n){const o=this.getCaretPosition(t,i,n);e.lineTo(o.x1,o.y1),e.lineTo(o.x2,o.y2),e.lineTo(o.x3,o.y3)}getCaretPosition(t,e,i){const{xAlign:n,yAlign:o}=this,{cornerRadius:s,caretSize:a}=i,{x:r,y:l}=t,{width:c,height:h}=e;let d,u,f,g,p,m;return"center"===o?(p=l+h/2,"left"===n?(d=r,u=d-a,g=p+a,m=p-a):(d=r+c,u=d+a,g=p-a,m=p+a),f=d):(u="left"===n?r+s+a:"right"===n?r+c-s-a:this.caretX,"top"===o?(g=l,p=g-a,d=u-a,f=u+a):(g=l+h,p=g+a,d=u+a,f=u-a),m=g),{x1:d,x2:u,x3:f,y1:g,y2:p,y3:m}}drawTitle(t,e,i){const n=this,o=n.title,s=o.length;let a,r,l;if(s){const c=bn(i.rtl,n.x,n.width);for(t.x=Ps(n,i.titleAlign,i),e.textAlign=c.textAlign(i.titleAlign),e.textBaseline="middle",a=Fe(i.titleFont),r=i.titleSpacing,e.fillStyle=i.titleColor,e.font=a.string,l=0;l<s;++l)e.fillText(o[l],c.x(t.x),t.y+a.lineHeight/2),t.y+=a.lineHeight+r,l+1===s&&(t.y+=i.titleMarginBottom-r)}}_drawColorBox(t,e,i,n,o){const s=this,a=s.labelColors[i],r=s.labelPointStyles[i],{boxHeight:l,boxWidth:c}=o,h=Fe(o.bodyFont),d=Ps(s,"left",o),u=n.x(d),f=l<h.lineHeight?(h.lineHeight-l)/2:0,g=e.y+f;if(o.usePointStyle){const e={radius:Math.min(c,l)/2,pointStyle:r.pointStyle,rotation:r.rotation,borderWidth:1},i=n.leftForLtr(u,c)+c/2,s=g+l/2;t.strokeStyle=o.multiKeyBackground,t.fillStyle=o.multiKeyBackground,Kt(t,e,i,s),t.strokeStyle=a.borderColor,t.fillStyle=a.backgroundColor,Kt(t,e,i,s)}else{t.lineWidth=a.borderWidth||1,t.strokeStyle=a.borderColor,t.setLineDash(a.borderDash||[]),t.lineDashOffset=a.borderDashOffset||0;const e=n.leftForLtr(u,c),i=n.leftForLtr(n.xPlus(u,1),c-2),s=Ie(a.borderRadius);Object.values(s).some((t=>0!==t))?(t.beginPath(),t.fillStyle=o.multiKeyBackground,ie(t,{x:e,y:g,w:c,h:l,radius:s}),t.fill(),t.stroke(),t.fillStyle=a.backgroundColor,t.beginPath(),ie(t,{x:i,y:g+1,w:c-2,h:l-2,radius:s}),t.fill()):(t.fillStyle=o.multiKeyBackground,t.fillRect(e,g,c,l),t.strokeRect(e,g,c,l),t.fillStyle=a.backgroundColor,t.fillRect(i,g+1,c-2,l-2))}t.fillStyle=s.labelTextColors[i]}drawBody(t,e,i){const n=this,{body:o}=n,{bodySpacing:s,bodyAlign:a,displayColors:r,boxHeight:l,boxWidth:c}=i,h=Fe(i.bodyFont);let d=h.lineHeight,u=0;const f=bn(i.rtl,n.x,n.width),g=function(i){e.fillText(i,f.x(t.x+u),t.y+d/2),t.y+=d+s},p=f.textAlign(a);let m,x,b,_,y,v,w;for(e.textAlign=a,e.textBaseline="middle",e.font=h.string,t.x=Ps(n,p,i),e.fillStyle=i.bodyColor,J(n.beforeBody,g),u=r&&"right"!==p?"center"===a?c/2+1:c+2:0,_=0,v=o.length;_<v;++_){for(m=o[_],x=n.labelTextColors[_],e.fillStyle=x,J(m.before,g),b=m.lines,r&&b.length&&(n._drawColorBox(e,t,_,f,i),d=Math.max(h.lineHeight,l)),y=0,w=b.length;y<w;++y)g(b[y]),d=h.lineHeight;J(m.after,g)}u=0,d=h.lineHeight,J(n.afterBody,g),t.y-=s}drawFooter(t,e,i){const n=this,o=n.footer,s=o.length;let a,r;if(s){const l=bn(i.rtl,n.x,n.width);for(t.x=Ps(n,i.footerAlign,i),t.y+=i.footerMarginTop,e.textAlign=l.textAlign(i.footerAlign),e.textBaseline="middle",a=Fe(i.footerFont),e.fillStyle=i.footerColor,e.font=a.string,r=0;r<s;++r)e.fillText(o[r],l.x(t.x),t.y+a.lineHeight/2),t.y+=a.lineHeight+i.footerSpacing}}drawBackground(t,e,i,n){const{xAlign:o,yAlign:s}=this,{x:a,y:r}=t,{width:l,height:c}=i,h=n.cornerRadius;e.fillStyle=n.backgroundColor,e.strokeStyle=n.borderColor,e.lineWidth=n.borderWidth,e.beginPath(),e.moveTo(a+h,r),"top"===s&&this.drawCaret(t,e,i,n),e.lineTo(a+l-h,r),e.quadraticCurveTo(a+l,r,a+l,r+h),"center"===s&&"right"===o&&this.drawCaret(t,e,i,n),e.lineTo(a+l,r+c-h),e.quadraticCurveTo(a+l,r+c,a+l-h,r+c),"bottom"===s&&this.drawCaret(t,e,i,n),e.lineTo(a+h,r+c),e.quadraticCurveTo(a,r+c,a,r+c-h),"center"===s&&"left"===o&&this.drawCaret(t,e,i,n),e.lineTo(a,r+h),e.quadraticCurveTo(a,r,a+h,r),e.closePath(),e.fill(),n.borderWidth>0&&e.stroke()}_updateAnimationTarget(t){const e=this,i=e._chart,n=e.$animations,o=n&&n.x,s=n&&n.y;if(o||s){const n=bs[t.position].call(e,e._active,e._eventPosition);if(!n)return;const a=e._size=ws(e,t),r=Object.assign({},n,e._size),l=ks(i,t,r),c=Ss(t,r,l,i);o._to===c.x&&s._to===c.y||(e.xAlign=l.xAlign,e.yAlign=l.yAlign,e.width=a.width,e.height=a.height,e.caretX=n.x,e.caretY=n.y,e._resolveAnimations().update(e,c))}}draw(t){const e=this,i=e.options.setContext(e.getContext());let n=e.opacity;if(!n)return;e._updateAnimationTarget(i);const o={width:e.width,height:e.height},s={x:e.x,y:e.y};n=Math.abs(n)<.001?0:n;const a=ze(i.padding),r=e.title.length||e.beforeBody.length||e.body.length||e.afterBody.length||e.footer.length;i.enabled&&r&&(t.save(),t.globalAlpha=n,e.drawBackground(s,t,o,i),_n(t,i.textDirection),s.y+=a.top,e.drawTitle(s,t,i),e.drawBody(s,t,i),e.drawFooter(s,t,i),yn(t,i.textDirection),t.restore())}getActiveElements(){return this._active||[]}setActiveElements(t,e){const i=this,n=i._active,o=t.map((({datasetIndex:t,index:e})=>{const n=i._chart.getDatasetMeta(t);if(!n)throw new Error("Cannot find a dataset at index "+t);return{datasetIndex:t,element:n.data[e],index:e}})),s=!tt(n,o),a=i._positionChanged(o,e);(s||a)&&(i._active=o,i._eventPosition=e,i.update(!0))}handleEvent(t,e){const i=this,n=i.options,o=i._active||[];let s=!1,a=[];"mouseout"!==t.type&&(a=i._chart.getElementsAtEventForMode(t,n.mode,n,e),n.reverse&&a.reverse());const r=i._positionChanged(a,t);return s=e||!tt(a,o)||r,s&&(i._active=a,(n.enabled||n.external)&&(i._eventPosition={x:t.x,y:t.y},i.update(!0,e))),s}_positionChanged(t,e){const{caretX:i,caretY:n,options:o}=this,s=bs[o.position].call(this,t,e);return!1!==s&&(i!==s.x||n!==s.y)}}Os.positioners=bs;var Ts={id:"tooltip",_element:Os,positioners:bs,afterInit(t,e,i){i&&(t.tooltip=new Os({_chart:t,options:i}))},beforeUpdate(t,e,i){t.tooltip&&t.tooltip.initialize(i)},reset(t,e,i){t.tooltip&&t.tooltip.initialize(i)},afterDraw(t){const e=t.tooltip,i={tooltip:e};!1!==t.notifyPlugins("beforeTooltipDraw",i)&&(e&&e.draw(t.ctx),t.notifyPlugins("afterTooltipDraw",i))},afterEvent(t,e){if(t.tooltip){const i=e.replay;t.tooltip.handleEvent(e.event,i)&&(e.changed=!0)}},defaults:{enabled:!0,external:null,position:"average",backgroundColor:"rgba(0,0,0,0.8)",titleColor:"#fff",titleFont:{weight:"bold"},titleSpacing:2,titleMarginBottom:6,titleAlign:"left",bodyColor:"#fff",bodySpacing:2,bodyFont:{},bodyAlign:"left",footerColor:"#fff",footerSpacing:2,footerMarginTop:6,footerFont:{weight:"bold"},footerAlign:"left",padding:6,caretPadding:2,caretSize:5,cornerRadius:6,boxHeight:(t,e)=>e.bodyFont.size,boxWidth:(t,e)=>e.bodyFont.size,multiKeyBackground:"#fff",displayColors:!0,borderColor:"rgba(0,0,0,0)",borderWidth:0,animation:{duration:400,easing:"easeOutQuart"},animations:{numbers:{type:"number",properties:["x","y","width","height","caretX","caretY"]},opacity:{easing:"linear",duration:200}},callbacks:{beforeTitle:N,title(t){if(t.length>0){const e=t[0],i=e.chart.data.labels,n=i?i.length:0;if(this&&this.options&&"dataset"===this.options.mode)return e.dataset.label||"";if(e.label)return e.label;if(n>0&&e.dataIndex<n)return i[e.dataIndex]}return""},afterTitle:N,beforeBody:N,beforeLabel:N,label(t){if(this&&this.options&&"dataset"===this.options.mode)return t.label+": "+t.formattedValue||t.formattedValue;let e=t.dataset.label||"";e&&(e+=": ");const i=t.formattedValue;return $(i)||(e+=i),e},labelColor(t){const e=t.chart.getDatasetMeta(t.datasetIndex).controller.getStyle(t.dataIndex);return{borderColor:e.borderColor,backgroundColor:e.backgroundColor,borderWidth:e.borderWidth,borderDash:e.borderDash,borderDashOffset:e.borderDashOffset,borderRadius:0}},labelTextColor(){return this.options.bodyColor},labelPointStyle(t){const e=t.chart.getDatasetMeta(t.datasetIndex).controller.getStyle(t.dataIndex);return{pointStyle:e.pointStyle,rotation:e.rotation}},afterLabel:N,afterBody:N,beforeFooter:N,footer:N,afterFooter:N}},defaultRoutes:{bodyFont:"font",footerFont:"font",titleFont:"font"},descriptors:{_scriptable:t=>"filter"!==t&&"itemSort"!==t&&"external"!==t,_indexable:!1,callbacks:{_scriptable:!1,_indexable:!1},animation:{_fallback:!1},animations:{_fallback:"animation"}},additionalOptionScopes:["interaction"]},As=Object.freeze({__proto__:null,Decimation:Ko,Filler:us,Legend:ps,Title:xs,Tooltip:Ts});function Ls(t,e,i){const n=t.indexOf(e);if(-1===n)return((t,e,i)=>"string"==typeof e?t.push(e)-1:isNaN(e)?null:i)(t,e,i);return n!==t.lastIndexOf(e)?i:n}class Rs extends Xi{constructor(t){super(t),this._startValue=void 0,this._valueRange=0}parse(t,e){if($(t))return null;const i=this.getLabels();return((t,e)=>null===t?null:Nt(Math.round(t),0,e))(e=isFinite(e)&&i[e]===t?e:Ls(i,t,K(e,t)),i.length-1)}determineDataLimits(){const t=this,{minDefined:e,maxDefined:i}=t.getUserBounds();let{min:n,max:o}=t.getMinMax(!0);"ticks"===t.options.bounds&&(e||(n=0),i||(o=t.getLabels().length-1)),t.min=n,t.max=o}buildTicks(){const t=this,e=t.min,i=t.max,n=t.options.offset,o=[];let s=t.getLabels();s=0===e&&i===s.length-1?s:s.slice(e,i+1),t._valueRange=Math.max(s.length-(n?0:1),1),t._startValue=t.min-(n?.5:0);for(let t=e;t<=i;t++)o.push({value:t});return o}getLabelForValue(t){const e=this.getLabels();return t>=0&&t<e.length?e[t]:t}configure(){const t=this;super.configure(),t.isHorizontal()||(t._reversePixels=!t._reversePixels)}getPixelForValue(t){const e=this;return"number"!=typeof t&&(t=e.parse(t)),null===t?NaN:e.getPixelForDecimal((t-e._startValue)/e._valueRange)}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getValueForPixel(t){const e=this;return Math.round(e._startValue+e.getDecimalForPixel(t)*e._valueRange)}getBasePixel(){return this.bottom}}function Es(t,e,{horizontal:i,minRotation:n}){const o=Et(n),s=(i?Math.sin(o):Math.cos(o))||.001,a=.75*e*(""+t).length;return Math.min(e/s,a)}Rs.id="category",Rs.defaults={ticks:{callback:Rs.prototype.getLabelForValue}};class Is extends Xi{constructor(t){super(t),this.start=void 0,this.end=void 0,this._startValue=void 0,this._endValue=void 0,this._valueRange=0}parse(t,e){return $(t)||("number"==typeof t||t instanceof Number)&&!isFinite(+t)?null:+t}handleTickRangeOptions(){const t=this,{beginAtZero:e}=t.options,{minDefined:i,maxDefined:n}=t.getUserBounds();let{min:o,max:s}=t;const a=t=>o=i?o:t,r=t=>s=n?s:t;if(e){const t=Dt(o),e=Dt(s);t<0&&e<0?r(0):t>0&&e>0&&a(0)}o===s&&(r(s+1),e||a(o-1)),t.min=o,t.max=s}getTickLimit(){const t=this,e=t.options.ticks;let i,{maxTicksLimit:n,stepSize:o}=e;return o?i=Math.ceil(t.max/o)-Math.floor(t.min/o)+1:(i=t.computeTickLimit(),n=n||11),n&&(i=Math.min(n,i)),i}computeTickLimit(){return Number.POSITIVE_INFINITY}buildTicks(){const t=this,e=t.options,i=e.ticks;let n=t.getTickLimit();n=Math.max(2,n);const o=function(t,e){const i=[],{bounds:n,step:o,min:s,max:a,precision:r,count:l,maxTicks:c,maxDigits:h,includeBounds:d}=t,u=o||1,f=c-1,{min:g,max:p}=e,m=!$(s),x=!$(a),b=!$(l),_=(p-g)/(h+1);let y,v,w,M,k=Ct((p-g)/f/u)*u;if(k<1e-14&&!m&&!x)return[{value:g},{value:p}];M=Math.ceil(p/k)-Math.floor(g/k),M>f&&(k=Ct(M*k/f/u)*u),$(r)||(y=Math.pow(10,r),k=Math.ceil(k*y)/y),"ticks"===n?(v=Math.floor(g/k)*k,w=Math.ceil(p/k)*k):(v=g,w=p),m&&x&&o&&Lt((a-s)/o,k/1e3)?(M=Math.min((a-s)/k,c),k=(a-s)/M,v=s,w=a):b?(v=m?s:v,w=x?a:w,M=l-1,k=(w-v)/M):(M=(w-v)/k,M=At(M,Math.round(M),k/1e3)?Math.round(M):Math.ceil(M));const S=Math.max(zt(k),zt(v));y=Math.pow(10,$(r)?S:r),v=Math.round(v*y)/y,w=Math.round(w*y)/y;let P=0;for(m&&(d&&v!==s?(i.push({value:s}),v<s&&P++,At(Math.round((v+P*k)*y)/y,s,Es(s,_,t))&&P++):v<s&&P++);P<M;++P)i.push({value:Math.round((v+P*k)*y)/y});return x&&d&&w!==a?At(i[i.length-1].value,a,Es(a,_,t))?i[i.length-1].value=a:i.push({value:a}):x&&w!==a||i.push({value:w}),i}({maxTicks:n,bounds:e.bounds,min:e.min,max:e.max,precision:i.precision,step:i.stepSize,count:i.count,maxDigits:t._maxDigits(),horizontal:t.isHorizontal(),minRotation:i.minRotation||0,includeBounds:!1!==i.includeBounds},t._range||t);return"ticks"===e.bounds&&Rt(o,t,"value"),e.reverse?(o.reverse(),t.start=t.max,t.end=t.min):(t.start=t.min,t.end=t.max),o}configure(){const t=this,e=t.ticks;let i=t.min,n=t.max;if(super.configure(),t.options.offset&&e.length){const t=(n-i)/Math.max(e.length-1,1)/2;i-=t,n+=t}t._startValue=i,t._endValue=n,t._valueRange=n-i}getLabelForValue(t){return zi(t,this.chart.options.locale)}}class zs extends Is{determineDataLimits(){const t=this,{min:e,max:i}=t.getMinMax(!0);t.min=X(e)?e:0,t.max=X(i)?i:1,t.handleTickRangeOptions()}computeTickLimit(){const t=this,e=t.isHorizontal(),i=e?t.width:t.height,n=Et(t.options.ticks.minRotation),o=(e?Math.sin(n):Math.cos(n))||.001,s=t._resolveTickFontOptions(0);return Math.ceil(i/Math.min(40,s.lineHeight/o))}getPixelForValue(t){return null===t?NaN:this.getPixelForDecimal((t-this._startValue)/this._valueRange)}getValueForPixel(t){return this._startValue+this.getDecimalForPixel(t)*this._valueRange}}function Fs(t){return 1===t/Math.pow(10,Math.floor(Pt(t)))}zs.id="linear",zs.defaults={ticks:{callback:Vi.formatters.numeric}};class Vs extends Xi{constructor(t){super(t),this.start=void 0,this.end=void 0,this._startValue=void 0,this._valueRange=0}parse(t,e){const i=Is.prototype.parse.apply(this,[t,e]);if(0!==i)return X(i)&&i>0?i:null;this._zero=!0}determineDataLimits(){const t=this,{min:e,max:i}=t.getMinMax(!0);t.min=X(e)?Math.max(0,e):null,t.max=X(i)?Math.max(0,i):null,t.options.beginAtZero&&(t._zero=!0),t.handleTickRangeOptions()}handleTickRangeOptions(){const t=this,{minDefined:e,maxDefined:i}=t.getUserBounds();let n=t.min,o=t.max;const s=t=>n=e?n:t,a=t=>o=i?o:t,r=(t,e)=>Math.pow(10,Math.floor(Pt(t))+e);n===o&&(n<=0?(s(1),a(10)):(s(r(n,-1)),a(r(o,1)))),n<=0&&s(r(o,-1)),o<=0&&a(r(n,1)),t._zero&&t.min!==t._suggestedMin&&n===r(t.min,0)&&s(r(n,-1)),t.min=n,t.max=o}buildTicks(){const t=this,e=t.options,i=function(t,e){const i=Math.floor(Pt(e.max)),n=Math.ceil(e.max/Math.pow(10,i)),o=[];let s=q(t.min,Math.pow(10,Math.floor(Pt(e.min)))),a=Math.floor(Pt(s)),r=Math.floor(s/Math.pow(10,a)),l=a<0?Math.pow(10,Math.abs(a)):1;do{o.push({value:s,major:Fs(s)}),++r,10===r&&(r=1,++a,l=a>=0?1:l),s=Math.round(r*Math.pow(10,a)*l)/l}while(a<i||a===i&&r<n);const c=q(t.max,s);return o.push({value:c,major:Fs(s)}),o}({min:t._userMin,max:t._userMax},t);return"ticks"===e.bounds&&Rt(i,t,"value"),e.reverse?(i.reverse(),t.start=t.max,t.end=t.min):(t.start=t.min,t.end=t.max),i}getLabelForValue(t){return void 0===t?"0":zi(t,this.chart.options.locale)}configure(){const t=this,e=t.min;super.configure(),t._startValue=Pt(e),t._valueRange=Pt(t.max)-Pt(e)}getPixelForValue(t){const e=this;return void 0!==t&&0!==t||(t=e.min),null===t||isNaN(t)?NaN:e.getPixelForDecimal(t===e.min?0:(Pt(t)-e._startValue)/e._valueRange)}getValueForPixel(t){const e=this,i=e.getDecimalForPixel(t);return Math.pow(10,e._startValue+i*e._valueRange)}}function Bs(t){const e=t.ticks;if(e.display&&t.display){const t=ze(e.backdropPadding);return K(e.font&&e.font.size,xt.font.size)+t.height}return 0}function Ws(t,e,i,n,o){return t===n||t===o?{start:e-i/2,end:e+i/2}:t<n||t>o?{start:e-i,end:e}:{start:e,end:e+i}}function Hs(t){return 0===t||180===t?"center":t<180?"left":"right"}function Ns(t,e,i){90===t||270===t?i.y-=e.h/2:(t>270||t<90)&&(i.y-=e.h)}function js(t,e,i,n){const{ctx:o}=t;if(i)o.arc(t.xCenter,t.yCenter,e,0,_t);else{let i=t.getPointPosition(0,e);o.moveTo(i.x,i.y);for(let s=1;s<n;s++)i=t.getPointPosition(s,e),o.lineTo(i.x,i.y)}}function $s(t){return Tt(t)?t:0}Vs.id="logarithmic",Vs.defaults={ticks:{callback:Vi.formatters.logarithmic,major:{enabled:!0}}};class Ys extends Is{constructor(t){super(t),this.xCenter=void 0,this.yCenter=void 0,this.drawingArea=void 0,this._pointLabels=[],this._pointLabelItems=[]}setDimensions(){const t=this;t.width=t.maxWidth,t.height=t.maxHeight,t.paddingTop=Bs(t.options)/2,t.xCenter=Math.floor(t.width/2),t.yCenter=Math.floor((t.height-t.paddingTop)/2),t.drawingArea=Math.min(t.height-t.paddingTop,t.width)/2}determineDataLimits(){const t=this,{min:e,max:i}=t.getMinMax(!1);t.min=X(e)&&!isNaN(e)?e:0,t.max=X(i)&&!isNaN(i)?i:0,t.handleTickRangeOptions()}computeTickLimit(){return Math.ceil(this.drawingArea/Bs(this.options))}generateTickLabels(t){const e=this;Is.prototype.generateTickLabels.call(e,t),e._pointLabels=e.getLabels().map(((t,i)=>{const n=Q(e.options.pointLabels.callback,[t,i],e);return n||0===n?n:""}))}fit(){const t=this,e=t.options;e.display&&e.pointLabels.display?function(t){const e={l:0,r:t.width,t:0,b:t.height-t.paddingTop},i={};let n,o,s;const a=[],r=[],l=t.getLabels().length;for(n=0;n<l;n++){const l=t.options.pointLabels.setContext(t.getContext(n));r[n]=l.padding,s=t.getPointPosition(n,t.drawingArea+r[n]);const u=Fe(l.font);t.ctx.font=u.string,c=t.ctx,h=u.lineHeight,o=Y(d=t._pointLabels[n])?{w:Ut(c,c.font,d),h:d.length*h}:{w:c.measureText(d).width,h:h},a[n]=o;const f=t.getIndexAngle(n),g=It(f),p=Ws(g,s.x,o.w,0,180),m=Ws(g,s.y,o.h,90,270);p.start<e.l&&(e.l=p.start,i.l=f),p.end>e.r&&(e.r=p.end,i.r=f),m.start<e.t&&(e.t=m.start,i.t=f),m.end>e.b&&(e.b=m.end,i.b=f)}var c,h,d;t._setReductions(t.drawingArea,e,i),t._pointLabelItems=[];const u=t.options,f=Bs(u),g=t.getDistanceFromCenterForValue(u.ticks.reverse?t.min:t.max);for(n=0;n<l;n++){const e=0===n?f/2:0,i=t.getPointPosition(n,g+e+r[n]),o=It(t.getIndexAngle(n)),s=a[n];Ns(o,s,i);const l=Hs(o);let c;c="left"===l?i.x:"center"===l?i.x-s.w/2:i.x-s.w;const h=c+s.w;t._pointLabelItems[n]={x:i.x,y:i.y,textAlign:l,left:c,top:i.y,right:h,bottom:i.y+s.h}}}(t):t.setCenterPoint(0,0,0,0)}_setReductions(t,e,i){const n=this;let o=e.l/Math.sin(i.l),s=Math.max(e.r-n.width,0)/Math.sin(i.r),a=-e.t/Math.cos(i.t),r=-Math.max(e.b-(n.height-n.paddingTop),0)/Math.cos(i.b);o=$s(o),s=$s(s),a=$s(a),r=$s(r),n.drawingArea=Math.max(t/2,Math.min(Math.floor(t-(o+s)/2),Math.floor(t-(a+r)/2))),n.setCenterPoint(o,s,a,r)}setCenterPoint(t,e,i,n){const o=this,s=o.width-e-o.drawingArea,a=t+o.drawingArea,r=i+o.drawingArea,l=o.height-o.paddingTop-n-o.drawingArea;o.xCenter=Math.floor((a+s)/2+o.left),o.yCenter=Math.floor((r+l)/2+o.top+o.paddingTop)}getIndexAngle(t){return Wt(t*(_t/this.getLabels().length)+Et(this.options.startAngle||0))}getDistanceFromCenterForValue(t){const e=this;if($(t))return NaN;const i=e.drawingArea/(e.max-e.min);return e.options.reverse?(e.max-t)*i:(t-e.min)*i}getValueForDistanceFromCenter(t){if($(t))return NaN;const e=this,i=t/(e.drawingArea/(e.max-e.min));return e.options.reverse?e.max-i:e.min+i}getPointPosition(t,e){const i=this,n=i.getIndexAngle(t)-Mt;return{x:Math.cos(n)*e+i.xCenter,y:Math.sin(n)*e+i.yCenter,angle:n}}getPointPositionForValue(t,e){return this.getPointPosition(t,this.getDistanceFromCenterForValue(e))}getBasePosition(t){return this.getPointPositionForValue(t||0,this.getBaseValue())}getPointLabelPosition(t){const{left:e,top:i,right:n,bottom:o}=this._pointLabelItems[t];return{left:e,top:i,right:n,bottom:o}}drawBackground(){const t=this,{backgroundColor:e,grid:{circular:i}}=t.options;if(e){const n=t.ctx;n.save(),n.beginPath(),js(t,t.getDistanceFromCenterForValue(t._endValue),i,t.getLabels().length),n.closePath(),n.fillStyle=e,n.fill(),n.restore()}}drawGrid(){const t=this,e=t.ctx,i=t.options,{angleLines:n,grid:o}=i,s=t.getLabels().length;let a,r,l;if(i.pointLabels.display&&function(t,e){const{ctx:i,options:{pointLabels:n}}=t;for(let o=e-1;o>=0;o--){const e=n.setContext(t.getContext(o)),s=Fe(e.font),{x:a,y:r,textAlign:l,left:c,top:h,right:d,bottom:u}=t._pointLabelItems[o],{backdropColor:f}=e;if(!$(f)){const t=ze(e.backdropPadding);i.fillStyle=f,i.fillRect(c-t.left,h-t.top,d-c+t.width,u-h+t.height)}ee(i,t._pointLabels[o],a,r+s.lineHeight/2,s,{color:e.color,textAlign:l,textBaseline:"middle"})}}(t,s),o.display&&t.ticks.forEach(((e,i)=>{if(0!==i){r=t.getDistanceFromCenterForValue(e.value);const n=o.setContext(t.getContext(i-1));!function(t,e,i,n){const o=t.ctx,s=e.circular,{color:a,lineWidth:r}=e;!s&&!n||!a||!r||i<0||(o.save(),o.strokeStyle=a,o.lineWidth=r,o.setLineDash(e.borderDash),o.lineDashOffset=e.borderDashOffset,o.beginPath(),js(t,i,s,n),o.closePath(),o.stroke(),o.restore())}(t,n,r,s)}})),n.display){for(e.save(),a=t.getLabels().length-1;a>=0;a--){const o=n.setContext(t.getContext(a)),{color:s,lineWidth:c}=o;c&&s&&(e.lineWidth=c,e.strokeStyle=s,e.setLineDash(o.borderDash),e.lineDashOffset=o.borderDashOffset,r=t.getDistanceFromCenterForValue(i.ticks.reverse?t.min:t.max),l=t.getPointPosition(a,r),e.beginPath(),e.moveTo(t.xCenter,t.yCenter),e.lineTo(l.x,l.y),e.stroke())}e.restore()}}drawBorder(){}drawLabels(){const t=this,e=t.ctx,i=t.options,n=i.ticks;if(!n.display)return;const o=t.getIndexAngle(0);let s,a;e.save(),e.translate(t.xCenter,t.yCenter),e.rotate(o),e.textAlign="center",e.textBaseline="middle",t.ticks.forEach(((o,r)=>{if(0===r&&!i.reverse)return;const l=n.setContext(t.getContext(r)),c=Fe(l.font);if(s=t.getDistanceFromCenterForValue(t.ticks[r].value),l.showLabelBackdrop){a=e.measureText(o.label).width,e.fillStyle=l.backdropColor;const t=ze(l.backdropPadding);e.fillRect(-a/2-t.left,-s-c.size/2-t.top,a+t.width,c.size+t.height)}ee(e,o.label,0,-s,c,{color:l.color})})),e.restore()}drawTitle(){}}Ys.id="radialLinear",Ys.defaults={display:!0,animate:!0,position:"chartArea",angleLines:{display:!0,lineWidth:1,borderDash:[],borderDashOffset:0},grid:{circular:!1},startAngle:0,ticks:{showLabelBackdrop:!0,callback:Vi.formatters.numeric},pointLabels:{backdropColor:void 0,backdropPadding:2,display:!0,font:{size:10},callback:t=>t,padding:5}},Ys.defaultRoutes={"angleLines.color":"borderColor","pointLabels.color":"color","ticks.color":"color"},Ys.descriptors={angleLines:{_fallback:"grid"}};const Us={millisecond:{common:!0,size:1,steps:1e3},second:{common:!0,size:1e3,steps:60},minute:{common:!0,size:6e4,steps:60},hour:{common:!0,size:36e5,steps:24},day:{common:!0,size:864e5,steps:30},week:{common:!1,size:6048e5,steps:4},month:{common:!0,size:2628e6,steps:12},quarter:{common:!1,size:7884e6,steps:4},year:{common:!0,size:3154e7}},Xs=Object.keys(Us);function qs(t,e){return t-e}function Ks(t,e){if($(e))return null;const i=t._adapter,{parser:n,round:o,isoWeekday:s}=t._parseOpts;let a=e;return"function"==typeof n&&(a=n(a)),X(a)||(a="string"==typeof n?i.parse(a,n):i.parse(a)),null===a?null:(o&&(a="week"!==o||!Tt(s)&&!0!==s?i.startOf(a,o):i.startOf(a,"isoWeek",s)),+a)}function Gs(t,e,i,n){const o=Xs.length;for(let s=Xs.indexOf(t);s<o-1;++s){const t=Us[Xs[s]],o=t.steps?t.steps:Number.MAX_SAFE_INTEGER;if(t.common&&Math.ceil((i-e)/(o*t.size))<=n)return Xs[s]}return Xs[o-1]}function Zs(t,e,i){if(i){if(i.length){const{lo:n,hi:o}=ne(i,e);t[i[n]>=e?i[n]:i[o]]=!0}}else t[e]=!0}function Qs(t,e,i){const n=[],o={},s=e.length;let a,r;for(a=0;a<s;++a)r=e[a],o[r]=a,n.push({value:r,major:!1});return 0!==s&&i?function(t,e,i,n){const o=t._adapter,s=+o.startOf(e[0].value,n),a=e[e.length-1].value;let r,l;for(r=s;r<=a;r=+o.add(r,1,n))l=i[r],l>=0&&(e[l].major=!0);return e}(t,n,o,i):n}class Js extends Xi{constructor(t){super(t),this._cache={data:[],labels:[],all:[]},this._unit="day",this._majorUnit=void 0,this._offsets={},this._normalized=!1,this._parseOpts=void 0}init(t,e){const i=t.time||(t.time={}),n=this._adapter=new so._date(t.adapters.date);st(i.displayFormats,n.formats()),this._parseOpts={parser:i.parser,round:i.round,isoWeekday:i.isoWeekday},super.init(t),this._normalized=e.normalized}parse(t,e){return void 0===t?null:Ks(this,t)}beforeLayout(){super.beforeLayout(),this._cache={data:[],labels:[],all:[]}}determineDataLimits(){const t=this,e=t.options,i=t._adapter,n=e.time.unit||"day";let{min:o,max:s,minDefined:a,maxDefined:r}=t.getUserBounds();function l(t){a||isNaN(t.min)||(o=Math.min(o,t.min)),r||isNaN(t.max)||(s=Math.max(s,t.max))}a&&r||(l(t._getLabelBounds()),"ticks"===e.bounds&&"labels"===e.ticks.source||l(t.getMinMax(!1))),o=X(o)&&!isNaN(o)?o:+i.startOf(Date.now(),n),s=X(s)&&!isNaN(s)?s:+i.endOf(Date.now(),n)+1,t.min=Math.min(o,s-1),t.max=Math.max(o+1,s)}_getLabelBounds(){const t=this.getLabelTimestamps();let e=Number.POSITIVE_INFINITY,i=Number.NEGATIVE_INFINITY;return t.length&&(e=t[0],i=t[t.length-1]),{min:e,max:i}}buildTicks(){const t=this,e=t.options,i=e.time,n=e.ticks,o="labels"===n.source?t.getLabelTimestamps():t._generate();"ticks"===e.bounds&&o.length&&(t.min=t._userMin||o[0],t.max=t._userMax||o[o.length-1]);const s=t.min,a=ae(o,s,t.max);return t._unit=i.unit||(n.autoSkip?Gs(i.minUnit,t.min,t.max,t._getLabelCapacity(s)):function(t,e,i,n,o){for(let s=Xs.length-1;s>=Xs.indexOf(i);s--){const i=Xs[s];if(Us[i].common&&t._adapter.diff(o,n,i)>=e-1)return i}return Xs[i?Xs.indexOf(i):0]}(t,a.length,i.minUnit,t.min,t.max)),t._majorUnit=n.major.enabled&&"year"!==t._unit?function(t){for(let e=Xs.indexOf(t)+1,i=Xs.length;e<i;++e)if(Us[Xs[e]].common)return Xs[e]}(t._unit):void 0,t.initOffsets(o),e.reverse&&a.reverse(),Qs(t,a,t._majorUnit)}initOffsets(t){const e=this;let i,n,o=0,s=0;e.options.offset&&t.length&&(i=e.getDecimalForValue(t[0]),o=1===t.length?1-i:(e.getDecimalForValue(t[1])-i)/2,n=e.getDecimalForValue(t[t.length-1]),s=1===t.length?n:(n-e.getDecimalForValue(t[t.length-2]))/2);const a=t.length<3?.5:.25;o=Nt(o,0,a),s=Nt(s,0,a),e._offsets={start:o,end:s,factor:1/(o+1+s)}}_generate(){const t=this,e=t._adapter,i=t.min,n=t.max,o=t.options,s=o.time,a=s.unit||Gs(s.minUnit,i,n,t._getLabelCapacity(i)),r=K(s.stepSize,1),l="week"===a&&s.isoWeekday,c=Tt(l)||!0===l,h={};let d,u,f=i;if(c&&(f=+e.startOf(f,"isoWeek",l)),f=+e.startOf(f,c?"day":a),e.diff(n,i,a)>1e5*r)throw new Error(i+" and "+n+" are too far apart with stepSize of "+r+" "+a);const g="data"===o.ticks.source&&t.getDataTimestamps();for(d=f,u=0;d<n;d=+e.add(d,r,a),u++)Zs(h,d,g);return d!==n&&"ticks"!==o.bounds&&1!==u||Zs(h,d,g),Object.keys(h).sort(((t,e)=>t-e)).map((t=>+t))}getLabelForValue(t){const e=this._adapter,i=this.options.time;return i.tooltipFormat?e.format(t,i.tooltipFormat):e.format(t,i.displayFormats.datetime)}_tickFormatFunction(t,e,i,n){const o=this,s=o.options,a=s.time.displayFormats,r=o._unit,l=o._majorUnit,c=r&&a[r],h=l&&a[l],d=i[e],u=l&&h&&d&&d.major,f=o._adapter.format(t,n||(u?h:c)),g=s.ticks.callback;return g?Q(g,[f,e,i],o):f}generateTickLabels(t){let e,i,n;for(e=0,i=t.length;e<i;++e)n=t[e],n.label=this._tickFormatFunction(n.value,e,t)}getDecimalForValue(t){const e=this;return null===t?NaN:(t-e.min)/(e.max-e.min)}getPixelForValue(t){const e=this,i=e._offsets,n=e.getDecimalForValue(t);return e.getPixelForDecimal((i.start+n)*i.factor)}getValueForPixel(t){const e=this,i=e._offsets,n=e.getDecimalForPixel(t)/i.factor-i.end;return e.min+n*(e.max-e.min)}_getLabelSize(t){const e=this,i=e.options.ticks,n=e.ctx.measureText(t).width,o=Et(e.isHorizontal()?i.maxRotation:i.minRotation),s=Math.cos(o),a=Math.sin(o),r=e._resolveTickFontOptions(0).size;return{w:n*s+r*a,h:n*a+r*s}}_getLabelCapacity(t){const e=this,i=e.options.time,n=i.displayFormats,o=n[i.unit]||n.millisecond,s=e._tickFormatFunction(t,0,Qs(e,[t],e._majorUnit),o),a=e._getLabelSize(s),r=Math.floor(e.isHorizontal()?e.width/a.w:e.height/a.h)-1;return r>0?r:1}getDataTimestamps(){const t=this;let e,i,n=t._cache.data||[];if(n.length)return n;const o=t.getMatchingVisibleMetas();if(t._normalized&&o.length)return t._cache.data=o[0].controller.getAllParsedValues(t);for(e=0,i=o.length;e<i;++e)n=n.concat(o[e].controller.getAllParsedValues(t));return t._cache.data=t.normalize(n)}getLabelTimestamps(){const t=this,e=t._cache.labels||[];let i,n;if(e.length)return e;const o=t.getLabels();for(i=0,n=o.length;i<n;++i)e.push(Ks(t,o[i]));return t._cache.labels=t._normalized?e:t.normalize(e)}normalize(t){return he(t.sort(qs))}}function ta(t,e,i){let n,o,s,a;if(i)n=Math.floor(e),o=Math.ceil(e),s=t[n],a=t[o];else{const i=ne(t,e);s=i.lo,a=i.hi,n=t[s],o=t[a]}const r=o-n;return r?s+(a-s)*(e-n)/r:s}Js.id="time",Js.defaults={bounds:"data",adapters:{},time:{parser:!1,unit:!1,round:!1,isoWeekday:!1,minUnit:"millisecond",displayFormats:{}},ticks:{source:"auto",major:{enabled:!1}}};class ea extends Js{constructor(t){super(t),this._table=[],this._maxIndex=void 0}initOffsets(){const t=this,e=t._getTimestampsForTable();t._table=t.buildLookupTable(e),t._maxIndex=t._table.length-1,super.initOffsets(e)}buildLookupTable(t){const{min:e,max:i}=this;if(!t.length)return[{time:e,pos:0},{time:i,pos:1}];const n=[e];let o,s,a;for(o=0,s=t.length;o<s;++o)a=t[o],a>e&&a<i&&n.push(a);return n.push(i),n}_getTimestampsForTable(){const t=this;let e=t._cache.all||[];if(e.length)return e;const i=t.getDataTimestamps(),n=t.getLabelTimestamps();return e=i.length&&n.length?t.normalize(i.concat(n)):i.length?i:n,e=t._cache.all=e,e}getPixelForValue(t,e){const i=this,n=i._offsets,o=i._normalized&&i._maxIndex>0&&!$(e)?e/i._maxIndex:i.getDecimalForValue(t);return i.getPixelForDecimal((n.start+o)*n.factor)}getDecimalForValue(t){return ta(this._table,t)/this._maxIndex}getValueForPixel(t){const e=this,i=e._offsets,n=e.getDecimalForPixel(t)/i.factor-i.end;return ta(e._table,n*this._maxIndex,!0)}}ea.id="timeseries",ea.defaults=Js.defaults;var ia=Object.freeze({__proto__:null,CategoryScale:Rs,LinearScale:zs,LogarithmicScale:Vs,RadialLinearScale:Ys,TimeScale:Js,TimeSeriesScale:ea});return to.register(_o,ia,Uo,As),to.helpers={...On},to._adapters=so,to.Animation=_i,to.Animations=vi,to.animator=a,to.controllers=An.controllers.items,to.DatasetController=Ri,to.Element=Ei,to.elements=Uo,to.Interaction=Oe,to.layouts=Ge,to.platforms=ui,to.Scale=Xi,to.Ticks=Vi,Object.assign(to,_o,ia,Uo,As,ui),to.Chart=to,"undefined"!=typeof window&&(window.Chart=to),to})); | PypiClean |
/DeepCellTL-0.12.5.tar.gz/DeepCellTL-0.12.5/deepcell/model_zoo/fpn.py | """Feature pyramid network utility functions"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import re
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, Conv3D, DepthwiseConv2D
from tensorflow.keras.layers import Softmax
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import UpSampling2D, UpSampling3D
from tensorflow.keras.layers import BatchNormalization
from deepcell.layers import UpsampleLike
from deepcell.utils.misc_utils import get_sorted_keys
def create_pyramid_level(backbone_input,
upsamplelike_input=None,
addition_input=None,
upsample_type='upsamplelike',
level=5,
ndim=2,
lite=False,
interpolation='bilinear',
feature_size=256,
z_axis_convolutions=False):
"""Create a pyramid layer from a particular backbone input layer.
Args:
backbone_input (tensorflow.keras.Layer): Backbone layer to use to
create they pyramid layer.
upsamplelike_input (tensor): Optional input to use
as a template for shape to upsample to.
addition_input (tensorflow.keras.Layer): Optional layer to add to
pyramid layer after convolution and upsampling.
upsample_type (str): Choice of upsampling methods
from ``['upsamplelike','upsampling2d','upsampling3d']``.
level (int): Level to use in layer names.
feature_size (int): Number of filters for the convolutional layer.
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Returns:
tuple: Pyramid layer after processing, upsampled pyramid layer
Raises:
ValueError: ``ndim`` is not 2 or 3
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``
"""
# Check input to ndims
acceptable_ndims = {2, 3}
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
reduced_name = 'C{}_reduced'.format(level)
upsample_name = 'P{}_upsampled'.format(level)
addition_name = 'P{}_merged'.format(level)
final_name = 'P{}'.format(level)
# Apply 1x1 conv to backbone layer
if ndim == 2:
pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1),
padding='same', name=reduced_name)(backbone_input)
else:
pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1),
padding='same', name=reduced_name)(backbone_input)
# Add and then 3x3 conv
if addition_input is not None:
pyramid = Add(name=addition_name)([pyramid, addition_input])
# Upsample pyramid input
if upsamplelike_input is not None and upsample_type == 'upsamplelike':
pyramid_upsample = UpsampleLike(name=upsample_name)(
[pyramid, upsamplelike_input])
elif upsample_type == 'upsamplelike':
pyramid_upsample = None
else:
upsampling = UpSampling2D if ndim == 2 else UpSampling3D
size = (2, 2) if ndim == 2 else (1, 2, 2)
upsampling_kwargs = {
'size': size,
'name': upsample_name,
'interpolation': interpolation
}
if ndim > 2:
del upsampling_kwargs['interpolation']
pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)
if ndim == 2:
if lite:
pyramid_final = DepthwiseConv2D((3, 3), strides=(1, 1),
padding='same',
name=final_name)(pyramid)
else:
pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1),
padding='same', name=final_name)(pyramid)
else:
z = 3 if z_axis_convolutions else 1
pyramid_final = Conv3D(feature_size, (z, 3, 3), strides=(1, 1, 1),
padding='same', name=final_name)(pyramid)
return pyramid_final, pyramid_upsample
def __create_pyramid_features(backbone_dict,
ndim=2,
feature_size=256,
include_final_layers=True,
lite=False,
upsample_type='upsamplelike',
interpolation='bilinear',
z_axis_convolutions=False):
"""Creates the FPN layers on top of the backbone features.
Args:
backbone_dict (dictionary): A dictionary of the backbone layers, with
the names as keys, e.g. ``{'C0': C0, 'C1': C1, 'C2': C2, ...}``
feature_size (int): The feature size to use for
the resulting feature levels.
include_final_layers (bool): Add two coarser pyramid levels
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
upsample_type (str): Choice of upsampling methods
from ``['upsamplelike','upsamling2d','upsampling3d']``.
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Returns:
dict: The feature pyramid names and levels,
e.g. ``{'P3': P3, 'P4': P4, ...}``
Each backbone layer gets a pyramid level, and two additional levels
are added, e.g. ``[C3, C4, C5]`` --> ``[P3, P4, P5, P6, P7]``
Raises:
ValueError: ``ndim`` is not 2 or 3
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``
"""
# Check input to ndims
acceptable_ndims = [2, 3]
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
# Get names of the backbone levels and place in ascending order
backbone_names = get_sorted_keys(backbone_dict)
backbone_features = [backbone_dict[name] for name in backbone_names]
pyramid_names = []
pyramid_finals = []
pyramid_upsamples = []
# Reverse lists
backbone_names.reverse()
backbone_features.reverse()
for i, N in enumerate(backbone_names):
level = int(re.findall(r'\d+', N)[0])
pyramid_names.append('P{}'.format(level))
backbone_input = backbone_features[i]
# Don't add for the bottom of the pyramid
if i == 0:
if len(backbone_features) > 1:
upsamplelike_input = backbone_features[i + 1]
else:
upsamplelike_input = None
addition_input = None
# Don't upsample for the top of the pyramid
elif i == len(backbone_names) - 1:
upsamplelike_input = None
addition_input = pyramid_upsamples[-1]
# Otherwise, add and upsample
else:
upsamplelike_input = backbone_features[i + 1]
addition_input = pyramid_upsamples[-1]
pf, pu = create_pyramid_level(backbone_input,
upsamplelike_input=upsamplelike_input,
addition_input=addition_input,
upsample_type=upsample_type,
level=level,
ndim=ndim,
lite=lite,
interpolation=interpolation,
z_axis_convolutions=z_axis_convolutions)
pyramid_finals.append(pf)
pyramid_upsamples.append(pu)
# Add the final two pyramid layers
if include_final_layers:
# "Second to last pyramid layer is obtained via a
# 3x3 stride-2 conv on the coarsest backbone"
N = backbone_names[0]
F = backbone_features[0]
level = int(re.findall(r'\d+', N)[0]) + 1
P_minus_2_name = 'P{}'.format(level)
if ndim == 2:
P_minus_2 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_2_name)(F)
else:
P_minus_2 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_2_name)(F)
pyramid_names.insert(0, P_minus_2_name)
pyramid_finals.insert(0, P_minus_2)
# "Last pyramid layer is computed by applying ReLU
# followed by a 3x3 stride-2 conv on second to last layer"
level = int(re.findall(r'\d+', N)[0]) + 2
P_minus_1_name = 'P{}'.format(level)
P_minus_1 = Activation('relu', name='{}_relu'.format(N))(P_minus_2)
if ndim == 2:
P_minus_1 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
else:
P_minus_1 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
pyramid_names.insert(0, P_minus_1_name)
pyramid_finals.insert(0, P_minus_1)
pyramid_dict = dict(zip(pyramid_names, pyramid_finals))
return pyramid_dict
def semantic_upsample(x,
n_upsample,
target=None,
n_filters=64,
ndim=2,
semantic_id=0,
upsample_type='upsamplelike',
interpolation='bilinear'):
"""Performs iterative rounds of 2x upsampling and
convolutions with a 3x3 filter to remove aliasing effects.
Args:
x (tensor): The input tensor to be upsampled.
n_upsample (int): The number of 2x upsamplings.
target (tensor): An optional tensor with the target shape.
n_filters (int): The number of filters for
the 3x3 convolution.
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
semantic_id (int): ID of the semantic head.
upsample_type (str): Choice of upsampling layer to use from
``['upsamplelike', 'upsampling2d', 'upsampling3d']``.
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Raises:
ValueError: ``ndim`` is not 2 or 3.
ValueError: ``interpolation`` not in ``['bilinear', 'nearest']``.
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``.
ValueError: ``target`` is ``None`` and
``upsample_type`` is ``'upsamplelike'``
Returns:
tensor: The upsampled tensor.
"""
# Check input to ndims
acceptable_ndims = [2, 3]
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
# Check that there is a target if upsamplelike is used
if upsample_type == 'upsamplelike' and target is None:
raise ValueError('upsamplelike requires a target.')
conv = Conv2D if ndim == 2 else Conv3D
conv_kernel = (3, 3) if ndim == 2 else (1, 3, 3)
upsampling = UpSampling2D if ndim == 2 else UpSampling3D
size = (2, 2) if ndim == 2 else (1, 2, 2)
if n_upsample > 0:
for i in range(n_upsample):
x = conv(n_filters, conv_kernel, strides=1, padding='same',
name='conv_{}_semantic_upsample_{}'.format(
i, semantic_id))(x)
# Define kwargs for upsampling layer
upsample_name = 'upsampling_{}_semantic_upsample_{}'.format(
i, semantic_id)
if upsample_type == 'upsamplelike':
if i == n_upsample - 1 and target is not None:
x = UpsampleLike(name=upsample_name)([x, target])
else:
upsampling_kwargs = {
'size': size,
'name': upsample_name,
'interpolation': interpolation
}
if ndim > 2:
del upsampling_kwargs['interpolation']
x = upsampling(**upsampling_kwargs)(x)
else:
x = conv(n_filters, conv_kernel, strides=1, padding='same',
name='conv_final_semantic_upsample_{}'.format(semantic_id))(x)
if upsample_type == 'upsamplelike' and target is not None:
upsample_name = 'upsampling_{}_semanticupsample_{}'.format(
0, semantic_id)
x = UpsampleLike(name=upsample_name)([x, target])
return x
def __create_semantic_head(pyramid_dict,
input_target=None,
n_classes=3,
n_filters=128,
n_dense=128,
semantic_id=0,
ndim=2,
include_top=True,
target_level=2,
upsample_type='upsamplelike',
interpolation='bilinear',
**kwargs):
"""Creates a semantic head from a feature pyramid network.
Args:
pyramid_dict (dict): Dictionary of pyramid names and features.
input_target (tensor): Optional tensor with the input image.
n_classes (int): The number of classes to be predicted.
n_filters (int): The number of convolutional filters.
n_dense (int): Number of dense filters.
semantic_id (int): ID of the semantic head.
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
include_top (bool): Whether to include the final layer of the model
target_level (int): The level we need to reach. Performs
2x upsampling until we're at the target level.
upsample_type (str): Choice of upsampling layer to use from
``['upsamplelike', 'upsampling2d', 'upsampling3d']``.
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Raises:
ValueError: ``ndim`` must be 2 or 3
ValueError: ``interpolation`` not in ``['bilinear', 'nearest']``
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``
Returns:
tensorflow.keras.Layer: The semantic segmentation head
"""
# Check input to ndims
if ndim not in {2, 3}:
raise ValueError('ndim must be either 2 or 3. '
'Received ndim = {}'.format(ndim))
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
# Check that there is an input_target if upsamplelike is used
if upsample_type == 'upsamplelike' and input_target is None:
raise ValueError('upsamplelike requires an input_target.')
conv = Conv2D if ndim == 2 else Conv3D
conv_kernel = (1,) * ndim
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if n_classes == 1:
include_top = False
# Get pyramid names and features into list form
pyramid_names = get_sorted_keys(pyramid_dict)
pyramid_features = [pyramid_dict[name] for name in pyramid_names]
# Reverse pyramid names and features
pyramid_names.reverse()
pyramid_features.reverse()
# Previous method of building feature pyramids
# semantic_features, semantic_names = [], []
# for N, P in zip(pyramid_names, pyramid_features):
# # Get level and determine how much to upsample
# level = int(re.findall(r'\d+', N)[0])
#
# n_upsample = level - target_level
# target = semantic_features[-1] if len(semantic_features) > 0 else None
#
# # Use semantic upsample to get semantic map
# semantic_features.append(semantic_upsample(
# P, n_upsample, n_filters=n_filters, target=target, ndim=ndim,
# upsample_type=upsample_type, interpolation=interpolation,
# semantic_id=semantic_id))
# semantic_names.append('Q{}'.format(level))
# Add all the semantic features
# semantic_sum = semantic_features[0]
# for semantic_feature in semantic_features[1:]:
# semantic_sum = Add()([semantic_sum, semantic_feature])
# TODO: bad name but using the same name more clearly indicates
# how to integrate the previous version
semantic_sum = pyramid_features[-1]
# Final upsampling
# min_level = int(re.findall(r'\d+', pyramid_names[-1])[0])
# n_upsample = min_level - target_level
n_upsample = target_level
x = semantic_upsample(semantic_sum, n_upsample,
# n_filters=n_filters, # TODO: uncomment and retrain
target=input_target, ndim=ndim,
upsample_type=upsample_type, semantic_id=semantic_id,
interpolation=interpolation)
# Apply conv in place of previous tensor product
x = conv(n_dense, conv_kernel, strides=1, padding='same',
name='conv_0_semantic_{}'.format(semantic_id))(x)
x = BatchNormalization(axis=channel_axis,
name='batch_normalization_0_semantic_{}'.format(semantic_id))(x)
x = Activation('relu', name='relu_0_semantic_{}'.format(semantic_id))(x)
# Apply conv and softmax layer
x = conv(n_classes, conv_kernel, strides=1,
padding='same', name='conv_1_semantic_{}'.format(semantic_id))(x)
if include_top:
x = Softmax(axis=channel_axis,
dtype=K.floatx(),
name='semantic_{}'.format(semantic_id))(x)
else:
x = Activation('relu',
dtype=K.floatx(),
name='semantic_{}'.format(semantic_id))(x)
return x | PypiClean |
/src/main/python/DMC_GUI.py | try:
import IPython
shell = IPython.get_ipython()
shell.enable_matplotlib(gui='qt')
except:
pass
# choose backend for matplotlib to be Qt5Agg. If not, no plots can be made :'-(
import matplotlib
matplotlib.use('Qt5Agg')
from DMCpy import _tools # Useful tools useful across DMC
try:
import _tools as _guitools
except ImportError:
import DMCGui.src.main.python._tools as _guitools
import numpy as np
import matplotlib.pyplot as plt
import sys
import datetime
from time import sleep
from os import path
import os
plt.ion()
from PyQt5 import QtWidgets, QtCore, QtGui, Qt
try:
from Views.main import Ui_MainWindow
from Views.DataSetManager import DataSetManager
from Views.dataOverviewManager import dataOverviewManager
from Views.collapsibleBox import CollapsibleBox
from DMC_Data import GuiDataFile,GuiDataSet
from DataModels import DataSetModel,DataFileModel
# from StateMachine import StateMachine
from GuiStates import States
from AboutDialog import AboutDialog
from HelpDialog import HelpDialog
# from generateScripts import initGenerateScript,setupGenerateScript
from _tools import loadSetting,updateSetting,ProgressBarDecoratorArguments
except ModuleNotFoundError:
sys.path.append('.')
from DMCGui.src.main.python.Views.main import Ui_MainWindow
from DMCGui.src.main.python.Views.DataSetManager import DataSetManager
from DMCGui.src.main.python.Views.collapsibleBox import CollapsibleBox
from DMCGui.src.main.python.Views.dataOverviewManager import dataOverviewManager
from DMCGui.src.main.python.DMC_Data import GuiDataFile,GuiDataSet
from DMCGui.src.main.python.DataModels import DataSetModel,DataFileModel
from DMCGui.src.main.python.GuiStates import States
# from DMCGui.src.main.python.StateMachine import StateMachine
from DMCGui.src.main.python.AboutDialog import AboutDialog
from DMCGui.src.main.python.HelpDialog import HelpDialog
# from DMCGui.src.main.python.generateScripts import initGenerateScript,setupGenerateScript
from DMCGui.src.main.python._tools import loadSetting,updateSetting,ProgressBarDecoratorArguments
import sys
from enum import Enum
from pathlib import Path
home = str(Path.home())
class DMCMainWindow(QtWidgets.QMainWindow):
mask_changed = QtCore.pyqtSignal()
state_changed = QtCore.pyqtSignal(Enum)
def __init__(self,AppContext):
super(DMCMainWindow, self).__init__()
self.currentState = States.STARTUP
self.ui = Ui_MainWindow()
self.AppContext = AppContext
self.version = self.AppContext.build_settings['version']
### Settings saved in .DMCGuiSettings
self.settingsFile = path.join(home,'.DMCGuiSettings')
self.views = []
guiSettings = loadSetting(self.settingsFile,'guiSettings')
#if guiSettings is None:
# self.theme = 'light'
#else:
# if not 'theme' in guiSettings:
# self.theme = 'light'
# else:
# self.theme = guiSettings['theme']
self.ui.setupUi(self)
self.update()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.AppContext.get_resource('Icons/Own/icon.png')))
self.setWindowIcon(icon)
# List to hold all views that need to be setup
self.views = []
## Set up DataSetManager
self.ui.dataSetManager = DataSetManager(self.ui.fixedOpen,self)
self.update()
self.views.append(self.ui.dataSetManager)
# Lists of views in shown order
self.nameList = ["Data Overview"]# Currently empty'View3D','QE line','Q plane','1D cuts','1D raw data','Masking'] # 'Normalization'
self.viewClasses = [dataOverviewManager]#View3DManager,QELineManager,QPlaneManager,Cut1DManager,Raw1DManager]#[View3D,View3D,View3D,Cut1D,Raw1D] # NormalizationManager
self.startState = [True]#True,False,False,False,True,False] # If not collapsed #False
# Find correct layout to insert views
vlay = QtWidgets.QVBoxLayout(self.ui.collapsibleContainer)
# Insert all views
self.boxContainers = []
for name,Type,state in zip(self.nameList,self.viewClasses,self.startState):
self.update()
box = CollapsibleBox(name,startState=state)
self.boxContainers.append(box)
vlay.addWidget(box)
lay = QtWidgets.QVBoxLayout()
widget = Type(guiWindow=self)
#if Type == NormalizationManager: # Get a reference to the sample manager directly in self
# self.normalizationManager = widget
self.views.append(widget)
lay.addWidget(widget)
box.setContentLayout(lay)
vlay.addStretch()
#self.maskingManager = MaskManager(self)
self.windows = [] # Holder for generated plotting windows
self.dataSets = []
self.current_timer = None
self.blockItems = [getattr(self.ui,item) for item in self.ui.__dict__ if '_button' in item[-7:]] # Collect all items to block on calls
self.lineEdits = [getattr(self.ui,item) for item in self.ui.__dict__ if '_lineEdit' in item[-9:]] # Collect all lineedits
self.radioButtons = [getattr(self.ui,item) for item in self.ui.__dict__ if '_radioButton' in item] # Collect all radiobuttons
self.spinBoxes = [getattr(self.ui,item) for item in self.ui.__dict__ if '_spinBox' in item[-8:]] # Collect all spinboxes
self.checkBoxes = [getattr(self.ui,item) for item in self.ui.__dict__ if '_checkBox' in item[-9:]] # Collect all checkboxes
self.update()
#initGenerateScript(self)
for view in self.views: # Run through all views to set them up
view.setup()
#setupGenerateScript(self)
#self.update()
self.setupMenu()
self.update()
self.updateGuiState()
self.update()
#self.stateMachine.run()
self.update()
#self.loadFolder() # Load last folder as default
self.loadedGuiSettings = None
self.ui.menubar.setNativeMenuBar(False)
if sys.platform.lower() == 'darwin':
## Update image of arrows to correct style on mac
correctedArrows = """QToolButton::down-arrow {
image: url("""+self.AppContext.get_resource('down.png')+""");
}
QToolButton::right-arrow {
image: url("""+self.AppContext.get_resource('right.png')+""");
}"""
self.setStyleSheet(self.styleSheet()+correctedArrows)
def setupMenu(self): # Set up all QActions and menus
self.ui.actionExit.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/cross-button.png')))
self.ui.actionExit.setToolTip('Exit the application')
self.ui.actionExit.setStatusTip(self.ui.actionExit.toolTip())
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionAbout.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/information-button.png')))
self.ui.actionAbout.setToolTip('Show About')
self.ui.actionAbout.setStatusTip(self.ui.actionAbout.toolTip())
self.ui.actionAbout.triggered.connect(self.about)
self.ui.actionHelp.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/question-button.png')))
self.ui.actionHelp.setToolTip('Show Help')
self.ui.actionHelp.setStatusTip(self.ui.actionHelp.toolTip())
self.ui.actionHelp.triggered.connect(self.help)
self.ui.actionSave_GUI_state.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/folder-save.png')))
self.ui.actionSave_GUI_state.setToolTip('Save current Gui setup')
self.ui.actionSave_GUI_state.setStatusTip(self.ui.actionSave_GUI_state.toolTip())
self.ui.actionSave_GUI_state.triggered.connect(self.saveCurrentGui)
#self.actionSave_GUI_state_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+S"), self)
#self.actionSave_GUI_state_shortcut.activated.connect(self.saveCurrentGui)
self.ui.actionLoad_GUI_state.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/folder--arrow.png')))
self.ui.actionLoad_GUI_state.setToolTip('Load Gui setup')
self.ui.actionLoad_GUI_state.setStatusTip(self.ui.actionLoad_GUI_state.toolTip())
self.ui.actionLoad_GUI_state.triggered.connect(self.loadGui)
#self.actionLoad_GUI_state_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+O"), self)
#self.actionLoad_GUI_state_shortcut.activated.connect(self.loadGui)
#self.ui.actionGenerate_View3d_script.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/script-3D.png')))
#self.ui.actionGenerate_View3d_script.setToolTip('Generate 3D Script')
#self.ui.actionGenerate_View3d_script.setStatusTip(self.ui.actionGenerate_View3d_script.toolTip())
#self.ui.actionGenerate_View3d_script.triggered.connect(self.generate3DScript)
#self.ui.actionGenerate_QELine_script.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/script-QE.png')))
#self.ui.actionGenerate_QELine_script.setToolTip('Generate QELine Script')
#self.ui.actionGenerate_QELine_script.setStatusTip(self.ui.actionGenerate_QELine_script.toolTip())
#self.ui.actionGenerate_QELine_script.triggered.connect(self.generateQELineScript)
#self.ui.actionGenerate_QPlane_script.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/script-QP.png')))
#self.ui.actionGenerate_QPlane_script.setToolTip('Generate QPlane Script')
#self.ui.actionGenerate_QPlane_script.setStatusTip(self.ui.actionGenerate_QPlane_script.toolTip())
#self.ui.actionGenerate_QPlane_script.triggered.connect(self.generateQPlaneScript)
#self.ui.actionGenerate_1d_script.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/script-1D.png')))
#self.ui.actionGenerate_1d_script.setToolTip('Generate Cut1D Script')
#self.ui.actionGenerate_1d_script.setStatusTip(self.ui.actionGenerate_1d_script.toolTip())
#self.ui.actionGenerate_1d_script.triggered.connect(self.generateCut1DScript)
#self.ui.actionOpen_mask_gui.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/mask-open.png')))
#self.ui.actionOpen_mask_gui.setDisabled(True)
#self.ui.actionOpen_mask_gui.setToolTip('Open Mask Gui')
#self.ui.actionOpen_mask_gui.setStatusTip(self.ui.actionOpen_mask_gui.toolTip())
#self.ui.actionOpen_mask_gui.triggered.connect(self.maskingManager.setWindowVisible)
#self.ui.actionLoad_mask.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/mask-load.png')))
#self.ui.actionLoad_mask.setDisabled(True)
#self.ui.actionLoad_mask.setToolTip('Load Mask - Not Implemented')
#self.ui.actionLoad_mask.setStatusTip(self.ui.actionLoad_mask.toolTip())
##self.ui.actionLoad_mask.triggered.connect(self.maskingManager.getMasks)
self.ui.actionSettings.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/settings.png')))
self.ui.actionSettings.setDisabled(False)
self.ui.actionSettings.setToolTip('Change View Settings')
self.ui.actionSettings.setStatusTip(self.ui.actionSettings.toolTip())
self.ui.actionSettings.triggered.connect(self.settingsDialog)
self.ui.actionClose_Windows.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/CloseWindows.png')))
self.ui.actionClose_Windows.setDisabled(False)
self.ui.actionClose_Windows.setToolTip('Close All Plotting Windows')
self.ui.actionClose_Windows.setStatusTip(self.ui.actionClose_Windows.toolTip())
self.ui.actionClose_Windows.triggered.connect(self.closeWindows)
#self.ui.actionNormalizationWidget.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/ruler.png')))
#self.ui.actionNormalizationWidget.setDisabled(False)
#self.ui.actionNormalizationWidget.setToolTip('Generate a script to normalize data absolutely')
#self.ui.actionNormalizationWidget.setStatusTip(self.ui.actionNormalizationWidget.toolTip())
#self.ui.actionNormalizationWidget.triggered.connect(self.absolutNormalizationTool)
#self.ui.actionPredictionWidget.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/predict.png')))
#self.ui.actionPredictionWidget.setDisabled(False)
#self.ui.actionPredictionWidget.setToolTip('Predict scan coverage')
#self.ui.actionPredictionWidget.setStatusTip(self.ui.actionPredictionWidget.toolTip())
#self.ui.actionPredictionWidget.triggered.connect(self.predictionTool)
#self.ui.actionMolecularWeight.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/balance.png')))
##self.ui.actionMolecularWeight.setDisabled(False)
#self.ui.actionMolecularWeight.setToolTip('Calculate Molecular Mass from Chemical Formula')
#self.ui.actionMolecularWeight.setStatusTip(self.ui.actionMolecularWeight.toolTip())
#self.ui.actionMolecularWeight.triggered.connect(self.molarMassTool)
#self.ui.actionNeutronCalculations.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/calculator.png')))
#self.ui.actionNeutronCalculations.setDisabled(False)
#self.ui.actionNeutronCalculations.setToolTip('Calculate standard neutron quantities')
#self.ui.actionNeutronCalculations.setStatusTip(self.ui.actionNeutronCalculations.toolTip())
#self.ui.actionNeutronCalculations.triggered.connect(self.neutronCalculationTool)
#self.ui.actionElectronicLogbook.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/book--pencil.png')))
#self.ui.actionElectronicLogbook.setDisabled(True)
#self.ui.actionElectronicLogbook.setToolTip('Generate Electronic Logbook from files')
#self.ui.actionElectronicLogbook.setStatusTip(self.ui.actionElectronicLogbook.toolTip())
#self.ui.actionElectronicLogbook.triggered.connect(self.electronicLogbookTool)
def getProgressBarValue(self):
return self.ui.progressBar.value
def setProgressBarValue(self,value):
if not hasattr(self,'ui.progressBar.value'):
self.ui.progressBar.value = 0
self.ui.progressBar.setValue(value)
self.ui.progressBar.value = value
def setProgressBarLabelText(self,text):
if self.current_timer:
self.current_timer.stop()
self.ui.progressBar_label.setText(text)
def setProgressBarMaximum(self,value):
self.ui.progressBar.setMaximum(value)
def resetProgressBar(self):
self.setProgressBarValue(0)
self.setProgressBarLabelText('Ready')
def saveSettingsDialog(self,event):
res = QtWidgets.QMessageBox.question(self,
"Exit - Save Gui Settings",
"Do you want to save Gui Settings?",
QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if res == QtWidgets.QMessageBox.Save:
if self.saveCurrentGui() is True: # Successful saving
self.closeWindows()
event.accept()
else:
event.ignore()
elif res == QtWidgets.QMessageBox.No:
self.closeWindows()
event.accept()
return 1
else:
event.ignore()
return 0
def quitDialog(self,event):
res = QtWidgets.QMessageBox.question(self,
"Exit",
"Do you want to exit the Gui?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if res == QtWidgets.QMessageBox.Yes:
self.closeWindows()
event.accept()
return 1
else:
event.ignore()
return 0
def closeEvent(self, event):
if self.loadedGuiSettings is None:
if not self.saveSettingsDialog(event): # The dialog is cancelled
return
elif np.all([s1==s2 for s1,s2 in zip(self.loadedGuiSettings.values(),self.generateCurrentGuiSettings().values())]):
if not self.quitDialog(event):
return
else:
if not self.saveSettingsDialog(event): # The dialog is cancelled
return
self.closeWindows()
@ProgressBarDecoratorArguments(runningText='Closing Windows',completedText='Windows Closed')
def closeWindows(self):
if hasattr(self,'windows'):
for window in self.windows:
try:
plt.close(window)
except:
try:
window.close()
except:
pass
return True
def about(self):
dialog = AboutDialog(self.AppContext.get_resource('About.txt'),version=self.version)
dialog.exec_()
def help(self):
dialog = HelpDialog(self.AppContext.get_resource('Help.txt'))
dialog.exec_()
#def setupStateMachine(self):
# self.stateMachine = StateMachine([empty,partial,raw,converted],self)
def update(self):
QtWidgets.QApplication.processEvents()
QtWidgets.QApplication.processEvents()
@ProgressBarDecoratorArguments(runningText='Saving Gui Settings',completedText='Gui Settings Saved',failedText='Cancelled')
def saveCurrentGui(self): # save data set and files in format DataSetNAME DataFileLocation DataFileLocation:DataSetNAME
#DataSet = [self.dataSets[I].name for I in range(self.DataSetModel.rowCount(None))]
settingsDict = self.generateCurrentGuiSettings(updateProgressBar=True)
if not hasattr(self,'loadedSettingsFile'):
self.loadedSettingsFile = home
saveSettings,_ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File',self.loadedSettingsFile)
if saveSettings is None or saveSettings == '':
return False
if not saveSettings.split('.')[-1] == 'DMCGuiSettings':
saveSettings+='.DMCGuiSettings'
for key,value in settingsDict.items():
updateSetting(saveSettings,key,value)
self.loadedGuiSettings = self.generateCurrentGuiSettings()
return True
def generateCurrentGuiSettings(self,updateProgressBar=False):
saveString = []
if updateProgressBar: self.setProgressBarMaximum(len(self.DataSetModel.dataSets))
for i,ds in enumerate(self.DataSetModel.dataSets):
dsDict = {'name':ds.name}
localstring = [os.path.join(df.folder,df.fileName) for df in ds]
dsDict['files']=localstring
saveString.append(dsDict)
if updateProgressBar: self.setProgressBarValue((i+1))
lineEditString = self.generateCurrentLineEditSettings()
radioButtonString = self.generateCurrentRadioButtonSettings()
spinBoxString = self.generateCurrentSpinBoxSettings()
checkBoxString = self.generateCurrentcheckBoxSettings()
fileDir = self.getCurrentDirectory()
infos = self.DataFileInfoModel.currentInfos()
guiSettings = self.guiSettings()
returnDict = {'dataSet':saveString, 'lineEdits':lineEditString, 'radioButtons': radioButtonString,'spinBoxes':spinBoxString,
'checkBoxes':checkBoxString,'fileDir':fileDir, 'infos':infos, 'guiSettings':guiSettings}
return returnDict
def generateCurrentLineEditSettings(self):
lineEditValueString = {}
for item in self.lineEdits:
lineEditValueString[item.objectName()] = item.text()
return lineEditValueString
def generateCurrentSpinBoxSettings(self):
spinBoxValueString = {}
for item in self.spinBoxes:
spinBoxValueString[item.objectName()] = item.value()
return spinBoxValueString
def generateCurrentcheckBoxSettings(self):
chechValueString = {}
for item in self.checkBoxes:
chechValueString[item.objectName()] = item.isChecked()
return chechValueString
def generateCurrentRadioButtonSettings(self):
radioButtonString = {}
for item in self.radioButtons:
radioButtonString[item.objectName()] = item.isChecked()
return radioButtonString
def loadFolder(self):
fileDir = loadSetting(self.settingsFile,'fileDir')
if not fileDir is None:
self.setCurrentDirectory(fileDir)
@ProgressBarDecoratorArguments(runningText='Loading gui settings',completedText='Loading Done',failedText='Cancelled')
def loadGui(self,presetFileLocation=None):
# Load saveFile
if not hasattr(self,'loadedSettingsFolder'):
folder = home
else:
folder = self.loadedSettingsFolder
if presetFileLocation is None: # When no file is provided, open file dialogue
settingsFile,_ = QtWidgets.QFileDialog.getOpenFileName(self,"Open GUI settings file", folder,"Setting (*.DMCGuiSettings);;All Files (*)")
else:
settingsFile = presetFileLocation
self.update()
self.loadedSettingsFolder = os.path.dirname(settingsFile)
self.loadedSettingsFile = settingsFile
if settingsFile is None or settingsFile == '':
return False
self.setProgressBarLabelText('Deleating Old Data Sets and Files')
while self.DataSetModel.rowCount(None)>0:
self.DataSetModel.delete(self.DataSetModel.getCurrentDatasetIndex())
else:
self.DataSetModel.layoutChanged.emit()
self.DataFileModel.updateCurrentDataSetIndex()
self.update()
dataSetString = loadSetting(settingsFile,'dataSet')
totalFiles = np.sum([len(dsDict['files'])+1 for dsDict in dataSetString])+1
# Get estimate of total number of data files
self.setProgressBarMaximum(totalFiles)
counter = 0
for dsDict in dataSetString:
self.setProgressBarLabelText('Loading Data Set')
DSName = dsDict['name']
files = dsDict['files']
dfs = None
if len(files)!=0: # If files in dataset, continue
dfs = []
for dfLocation in files:
df = GuiDataFile(dfLocation)
self.update()
dfs.append(df)
counter+=1
self.setProgressBarValue(counter)
if DSName == '':
continue
ds = GuiDataSet(name=DSName,dataFiles=dfs)
if 'binning' in dsDict:
if not np.any([b is None for b in dsDict['binning']]):
binnings = dsDict['binning']
for df,binning in zip(ds,binnings):
df.binning = binning
self.setProgressBarLabelText('Converting Data Set')
ds.convertDataFile(guiWindow=self,setProgressBarMaximum=False)
self.update()
self.DataSetModel.append(ds)
self.DataSetModel.layoutChanged.emit()
self.update()
counter+=1
self.setProgressBarValue(counter)
DataFileListInfos = loadSetting(settingsFile,'infos')
if not DataFileListInfos is None:
self.DataFileInfoModel.infos = DataFileListInfos
self.loadGuiSettings(file=settingsFile)
self.loadLineEdits(file=settingsFile)
self.loadRadioButtons(file=settingsFile)
self.loadSpinBoxes(file=settingsFile)
self.loadCheckBoxes(file=settingsFile)
self.DataSetModel.layoutChanged.emit()
self.DataFileInfoModel.layoutChanged.emit()
self.DataFileModel.updateCurrentDataSetIndex()
self.update()
self.loadedGuiSettings = self.generateCurrentGuiSettings()
return True
def guiSettings(self):
boxStates = [b.state for b in self.boxContainers]
settingsDict = {'boxStates':boxStates}
return settingsDict
def loadGuiSettings(self,file=None):
if file is None:
file = self.settingsFile
guiSettings = loadSetting(file,'guiSettings')
boxStates = guiSettings['boxStates']
if not boxStates is None:
for box,value in zip(self.boxContainers,boxStates):
try:
if box.state != value:
box.on_pressed()
except AttributeError:
pass
def loadLineEdits(self,file=None):
if file is None:
file = self.settingsFile
lineEditValueString = loadSetting(file,'lineEdits')
if not lineEditValueString is None:
if isinstance(lineEditValueString,str):
print('Please save a new gui state to comply with the new version')
return
for item,value in lineEditValueString.items():
try:
getattr(self.ui,item).setText(value)
except AttributeError:
pass
def loadRadioButtons(self,file=None):
if file is None:
file = self.settingsFile
radioButtonString = loadSetting(file,'radioButtons')
if not radioButtonString is None:
if isinstance(radioButtonString,str):
print('Please save a new gui state to comply with the new version')
return
for item,value in radioButtonString.items():
try:
getattr(self.ui,item).setChecked(value)
except AttributeError:
pass
def loadSpinBoxes(self,file=None):
if file is None:
file = self.settingsFile
spinBoxValueString = loadSetting(file,'spinBoxes')
if not spinBoxValueString is None:
if isinstance(spinBoxValueString,str):
print('Please save a new gui state to comply with the new version')
return
for item,value in spinBoxValueString.items():
try:
getattr(self.ui,item).setValue(value)
except AttributeError:
pass
def loadCheckBoxes(self,file=None):
if file is None:
file = self.settingsFile
checkBoxString = loadSetting(file,'checkBoxes')
if not checkBoxString is None:
if isinstance(checkBoxString,str):
print('Please save a new gui state to comply with the new version')
return
for item,value in checkBoxString.items():
try:
getattr(self.ui,item).setChecked(value)
except AttributeError:
pass
def getCurrentDirectory(self):
return self.ui.DataSet_path_lineEdit.text()
def setCurrentDirectory(self,folder):
self.currentFolder = folder
self.ui.DataSet_path_lineEdit.setText(folder)
def resetProgressBarTimed(self):
if self.current_timer:
self.current_timer.stop()
self.current_timer = QtCore.QTimer()
self.current_timer.timeout.connect(self.resetProgressBar)
self.current_timer.setSingleShot(True)
self.current_timer.start(3000)
def changeTheme(self,name):
if not name in themes.keys():
raise AttributeError('Theme name not recognized. Got {}, but allowed are: '.format(name),', '.join(themes.keys()))
app = QtWidgets.QApplication.instance()
self.theme = name
themes[name](app)
#palette = app.palette()
#print('Palette:',palette)
#for view in self.views:
# view.setPalette(palette)
def settingsDialog(self):
# Get infos from DataFileInfoModel
dataFileInfoModelPossibleSettings,dataFileInfoModelInitial = self.DataFileInfoModel.settingsDialog()
# Create a widget holding check boxes for all possible settings
dFIMLayout = QtWidgets.QVBoxLayout()
dFIMTitleLabel = QtWidgets.QLabel(text='Select infos to be shown for selected file(s)')
dFIMTitleLabel.setAlignment(QtCore.Qt.AlignCenter)
# Add title to layout
dFIMLayout.addWidget(dFIMTitleLabel)
# make check boxes for all settings
dFIMcheckBoxes = []
for setting in dataFileInfoModelPossibleSettings.values():
checkBox = QtWidgets.QCheckBox()
dFIMcheckBoxes.append(checkBox)
name = setting.location
checkBox.setText(name)
checkBox.setChecked(setting in dataFileInfoModelInitial)
dFIMLayout.addWidget(checkBox)
# accept function arguments: self (dialog), layout which was passed in
def dFIMAcceptFunction(self,layout,possibleSettings=dataFileInfoModelPossibleSettings):
self.dMFIASettings = []
for idx,setting in enumerate(possibleSettings.values()): # Loop through all the possible settings
box = layout.itemAt(idx+1).widget() # Skip 0 as it is a QLabel
if box.isChecked():# If checked add the corresponding setting to list of loaded settings
self.dMFIASettings.append(setting.location)
# Create layout for gui settings
guiSettingsLayout = QtWidgets.QVBoxLayout()
# Create radiobuttons
def guiSettingsAcceptFunction(self,layout):
length = layout.count()-1 # first entry is QLabel
# settings holds a list of possible settings for all setting fields
layouts = [guiSettingsLayout,dFIMLayout]
acceptFunctions = [guiSettingsAcceptFunction,dFIMAcceptFunction]
dialog = settingsBoxDialog(layouts=layouts,acceptFunctions=acceptFunctions)
dialog.resize(dialog.sizeHint())
if dialog.exec_(): # Execute the dialog
self.DataFileInfoModel.infos = dialog.dMFIASettings # update settings
self.DataFileInfoModel.layoutChanged.emit()
else:
return
def updateGuiState(self):
"""Method to be called to emit gui state changed signal"""
previous = self.currentState
if self.DataSetModel.rowCount(None)==0:
self.currentState = States.EMPTY
elif self.DataFileModel.rowCount(None)==0:
self.currentState = States.RAW
else:
self.currentState = States.FULL
if self.currentState == previous: # Nothing actually changed in this call
pass#print('Nothing changed... Stil in ',previous)
else:
#print('Changing from {} to {}'.format(previous,self.currentState))
self.state_changed.emit(self.currentState)
def molarMassTool(self):
molecularCalculationManager = MolecularCalculationManager()
self.windows.append(molecularCalculationManager)
molecularCalculationManager.show()
def neutronCalculationTool(self):
calculatorManager = CalculatorManager()
self.windows.append(calculatorManager)
calculatorManager.show()
def absolutNormalizationTool(self):
absolutNormalizationWindow = NormalizationManager(parent=None)
self.windows.append(absolutNormalizationWindow)
absolutNormalizationWindow.show()
def predictionTool(self):
predictionToolWindow = PredictionToolManager(parent=None,guiWindow=self)
self.windows.append(predictionToolWindow)
predictionToolWindow.show()
def electronicLogbookTool(self):
print('Not Implemeted yet electronicLogbookTool')
class settingsBoxDialog(QtWidgets.QDialog):
def __init__(self, layouts, acceptFunctions, *args, **kwargs):
super(settingsBoxDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Settings")
self.acceptFunctions = acceptFunctions
self.layouts = layouts
self.layout = QtWidgets.QVBoxLayout()
for layout in layouts:
self.layout.addLayout(layout)
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
def accept(self): # the accept button has been pressed
for aFunc,layout in zip(self.acceptFunctions,self.layouts):
aFunc(self,layout)
return super(settingsBoxDialog,self).accept()
def reject(self):
return super(settingsBoxDialog,self).reject()
def updateSplash(splash,originalTime,updateInterval,padding='\n'*7+20*' '):
currentTime = datetime.datetime.now()
points = int(1000.0*(currentTime-originalTime).total_seconds()/updateInterval)+1
alignment = QtCore.Qt.AlignTop# | QtCore.Qt.AlignHCenter
splash.showMessage(padding+'Loading DMCGui'+'.'*points,color=QtGui.QColor(255,255,255),alignment=alignment)
#QTimer.singleShot(1000, updateSplash(splash,points+1) )
QtWidgets.QApplication.processEvents()
def main():
try:
import AppContextEmulator
except ImportError:
from DMCGui.src.main.python import AppContextEmulator
app = QtWidgets.QApplication(sys.argv) # Passing command line arguments to app
appEmu = AppContextEmulator.AppContextEmulator(__file__)
splash = QtWidgets.QSplashScreen(QtGui.QPixmap(appEmu.get_resource('splash.png')))
splash.show()
timer = QtCore.QTimer()
# adding action to timer
updateInterval = 400 # ms
originalTime = datetime.datetime.now()
updater = lambda:updateSplash(splash,originalTime=originalTime,updateInterval=updateInterval)
updater()
timer.timeout.connect(updater)
# update the timer every updateInterval
timer.start(updateInterval)
window = DMCMainWindow(appEmu) # This window has to be closed for app to end
splash.finish(window)
window.show()
timer.stop()
if len(sys.argv)==2:
window.loadGui(presetFileLocation=sys.argv[1])
app.exec_()
if __name__ == '__main__':
main() | PypiClean |
/CommandTool-0.5.3.tar.gz/CommandTool-0.5.3/doc/source/index.rst | .. CommandTool documentation master file, created by
sphinx-quickstart on Mon Jun 29 23:26:13 2009.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
0.5.3
+++++
.. include:: ../index.txt
Documentation
=============
The code is based on the blog post at
http://jimmyg.org/blog/2009/python-command-line-interface-(cli)-with-sub-commands.html
but see the documentation below for full, up-to-date information.
.. toctree::
:maxdepth: 3
manual
api
.. include:: ../../CHANGELOG.txt
License
=======
.. include:: ../../LICENSE.txt
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| PypiClean |
/JoUtil-1.3.3-py3-none-any.whl/JoTools/txkj/eagleUtil.py |
import time
import os
import copy
import random
import shutil
from PIL import Image
from ..utils.HashlibUtil import HashLibUtil
from ..utils.JsonUtil import JsonUtil
from ..utils.FileOperationUtil import FileOperationUtil
from ..txkjRes.deteRes import DeteRes
# fixme 可以主动选择分析颜色,选中需要的图片,右击更多,重新分析颜色
# todo 因为分析时间比较长,将 md5 字典保存为文件,这样的话中间中断也不用从头开始
use_xml_tag = True
use_folder_tag = True
class EagleMetaData(object):
"""Eagle 元数据"""
def __init__(self):
self.id = None
self.name = None
self.size = None
self.btime = None
self.mtime = None
self.ext = None # 后缀
self.tags = [] # 标签
self.folders = []
self.is_deleted = None
self.url = None
self.annotation = None
self.modification_time = None # 注释
self.height = None
self.width = None
self.orientation = None
self.last_modified = None
self.palettes = None # 应该是色板,记录了图中最常出现的几种颜色和比例
self.comments = None # 标注
#
self.attrs = ["id", "name", "size", "btime", "mtime", "ext", "tags", "folders", "isDeleted",
"url", "annotation", "modificationTime", "height", "width", "orientation", "lastModified", "palettes", "comments"]
def load_atts_from_json(self, json_path):
"""从 json 中获取属性"""
json_info = JsonUtil.load_data_from_json_file(json_path)
# 赋值属性
for each_attr in self.attrs:
if each_attr in json_info:
setattr(self, each_attr, json_info[each_attr])
def add_tag(self, tag):
"""增加标签"""
if tag not in self.tags:
self.tags.append(tag)
def add_comment(self, x1, y1, x2, y2, annotation, assign_id, last_modified):
"""添加标注框"""
comment_info = {"id":assign_id, "x":x1, "y":y1, "width":x2-x1, "height":y2-y1, "annotation":annotation, "lastModified":last_modified}
if self.comments is None:
self.comments = [comment_info]
else:
self.comments.append(comment_info)
def save_to_json_file(self, file_path):
"""保存属性为 json 文件"""
json_info = {}
for each_attr in self.attrs:
# 存在属性
if hasattr(self, each_attr):
# 不是 None 默认值
if getattr(self, each_attr) is not None:
json_info[each_attr] = getattr(self, each_attr)
JsonUtil.save_data_to_json_file(json_info, file_path)
class EagleMTimes(object):
def __init__(self):
self.time_dict = {}
def load_from_json(self, json_path):
"""从json中读取数据"""
self.time_dict = JsonUtil.load_data_from_json_file(json_path)
del self.time_dict["all"]
def update_assign_id(self, assign_id, new_time):
"""更新指定id的时间"""
if assign_id in self.time_dict:
self.time_dict[assign_id] = new_time
else:
self.time_dict[assign_id] = new_time
# raise ValueError("assign id not in time_dict")
def save_to_json_file(self, save_path):
"""保存为 json 文件"""
json_info = copy.deepcopy(self.time_dict)
json_info["all"] = len(self.time_dict)
JsonUtil.save_data_to_json_file(json_info, save_path)
class EagleTags(object):
def __init__(self):
self.historyTags = set()
self.starredTags = set()
def load_from_json(self, json_path):
json_info = JsonUtil.load_data_from_json_file(json_path)
self.historyTags = set(json_info["historyTags"])
self.starredTags = set(json_info["starredTags"])
def add_tags(self, tag):
"""更新指定id的时间"""
self.historyTags.add(tag)
def save_to_json_file(self, json_path):
json_info = {"historyTags":list(self.historyTags), "starredTags":list(self.starredTags)}
JsonUtil.save_data_to_json_file(json_info, json_path)
class EagleFolderMetaData(object):
def __init__(self):
self.applicationVersion = "2.0.0"
self.folders = []
self.smartFolders = []
self.quickAccess = []
self.tagsGroups = []
self.modificationTime = int(time.time()*1000)
def save_to_json_file(self, json_path):
json_info = {"applicationVersion":self.applicationVersion, "folders":self.folders, "smartFolders":self.smartFolders,
"quickAccess":self.quickAccess, "tagsGroups":self.tagsGroups, "modificationTime":self.modificationTime}
JsonUtil.save_data_to_json_file(json_info, json_path)
class EagleOperate(object):
def __init__(self, proj_dir, img_dir):
os.makedirs(proj_dir, exist_ok=True)
self.proj_dir = proj_dir
self.id_set = set()
self.tag_dict = {}
self.md5_dict = {}
self.xml_dict = {}
self.img_dir = img_dir
#
self.tag_json_path = os.path.join(self.proj_dir, "tags.json")
self.mtime_json_path = os.path.join(self.proj_dir, "mtime.json")
self.faster_metadata_json_path = os.path.join(self.proj_dir, "metadata.json")
self.back_up_dir = os.path.join(self.proj_dir, "backup")
self.images_dir = os.path.join(self.proj_dir, "images")
#
self.tag = None
self.mtime = None
self.faster_metadata = None
#
self.assign_id_pre = "KDH5" # 指定 id 的前缀,
@staticmethod
def get_modification_time():
return int(time.time()*1000)
@staticmethod
def get_thumbnail_img(img_path, save_path, min_length=300):
"""获取并保存缩略图"""
img = Image.open(img_path)
if img.width > min_length and img.height > min_length:
scale = max(img.width/min_length, img.height/min_length)
thumbnail = img.resize((int(img.width/scale), int(img.height/scale)))
thumbnail.save(save_path)
else:
img.save(save_path)
@staticmethod
def json_to_xml(json_path, xml_dir):
"""将 metadata json 文件转为 xml 文件"""
a = EagleMetaData()
a.load_atts_from_json(json_path)
# 读取 comment 中的信息,并直接转为 xml 信息
b = DeteRes()
if a.comments:
for each_comment in a.comments:
print(each_comment)
x1 = int(each_comment["x"])
y1 = int(each_comment["y"])
x2 = int(each_comment["x"] + each_comment["width"])
y2 = int(each_comment["y"] + each_comment["height"])
tag = str(each_comment["annotation"])
b.add_obj(x1, y1, x2, y2, tag, conf=-1)
#
b.width = a.width
b.height = a.height
save_xml_path = os.path.join(xml_dir, a.name + '.xml')
b.save_to_xml(save_xml_path)
@staticmethod
def json_to_dete_res(json_path):
"""将 metadata json 文件转为 xml 文件"""
a = EagleMetaData()
a.load_atts_from_json(json_path)
# 读取 comment 中的信息,并直接转为 xml 信息
b = DeteRes()
if a.comments:
for each_comment in a.comments:
print(each_comment)
x1 = int(each_comment["x"])
y1 = int(each_comment["y"])
x2 = int(each_comment["x"] + each_comment["width"])
y2 = int(each_comment["y"] + each_comment["height"])
tag = str(each_comment["annotation"])
b.add_obj(x1, y1, x2, y2, tag, conf=-1)
#
b.width = a.width
b.height = a.height
return b
def get_random_id(self):
"""随机获取图片的 id"""
while True:
random_id = self.assign_id_pre + str(random.randint(100000000, 1000000000))
if random_id not in self.id_set:
self.id_set.add(random_id)
return random_id
# while True:
# random_id = "Jo" + str(int(time.time() * 1000))[2:] # 前面两位,后面 11 位
# if random_id not in self.id_set:
# self.id_set.add(random_id)
# return random_id
def get_tag_dict(self, img_dir, xml_format='wh'):
"""合并图像信息,拿到每个图像对应的标签"""
for img_index, each_img_path in enumerate(FileOperationUtil.re_all_file(img_dir, lambda x:str(x).endswith((".jpg", ".JPG")))):
print(img_index, "get md5 info", each_img_path)
dir_name = os.path.dirname(each_img_path)
# get md5, tag
each_tags = set(dir_name[len(self.img_dir)+1:].split(os.sep))
each_md5 = HashLibUtil.get_file_md5(each_img_path)
if xml_format == 'wh':
each_xml = os.path.join(dir_name, "xml", os.path.split(each_img_path)[1][:-3] + 'xml')
else:
# 非武汉数据默认 xml 和 img 放在一个文件夹中
each_xml = os.path.join(dir_name, os.path.split(each_img_path)[1][:-3] + 'xml')
#
if each_md5 in self.md5_dict:
old_img_path = self.md5_dict[each_md5]
self.tag_dict[old_img_path].update(each_tags)
self.xml_dict[old_img_path].append(each_xml)
else:
self.md5_dict[each_md5] = each_img_path
self.tag_dict[each_img_path] = each_tags
self.xml_dict[each_img_path] = [each_xml]
def save_one_img_info(self, each_img_path):
"""保存一张图片对应的信息"""
a = EagleMetaData()
each_mo_time = EagleOperate.get_modification_time()
each_id = self.get_random_id()
# ----------------------------------------------------------------------------------------------------------
# 添加 tag 信息
for each_tag in self.tag_dict[each_img_path]:
# 去除空的 tag
if each_tag == "":
continue
a.add_tag(each_tag)
self.tag.add_tags(each_tag)
# ----------------------------------------------------------------------------------------------------------
# 添加标注信息
each_dir, each_img_name, _ = FileOperationUtil.bang_path(each_img_path)
for each_xml_path in self.xml_dict[each_img_path]:
if os.path.exists(each_xml_path):
each_dete_res = DeteRes(xml_path=each_xml_path)
for each_dete_obj in each_dete_res.alarms:
a.add_comment(each_dete_obj.x1, each_dete_obj.y1, each_dete_obj.x2, each_dete_obj.y2,
each_dete_obj.tag, self.get_random_id(), EagleOperate.get_modification_time())
# fixme 增加 obj 的标签
a.add_tag(each_dete_obj.tag)
self.tag.add_tags(each_dete_obj.tag)
# ----------------------------------------------------------------------------------------------------------
# 完善属性
self.mtime.update_assign_id(each_id, each_mo_time)
img = Image.open(each_img_path)
a.modification_time = each_mo_time
a.id = each_id
a.name = FileOperationUtil.bang_path(each_img_path)[1]
a.width = img.width
a.height = img.height
a.mtime = each_mo_time
a.btime = each_mo_time
a.folders = []
a.ext = each_img_path[-3:]
a.size = os.path.getsize(each_img_path)
# ----------------------------------------------------------------------------------------------------------
# 存储文件夹
each_img_dir = os.path.join(self.proj_dir, "images", each_id + '.info')
os.makedirs(each_img_dir, exist_ok=True)
#
each_save_name = FileOperationUtil.bang_path(each_img_path)[1]
save_img_path = os.path.join(each_img_dir, each_save_name + '.jpg')
save_img_thumbnail_path = os.path.join(each_img_dir, each_save_name + "_thumbnail.png")
# 拷贝文件,生成缩略图
shutil.copy(each_img_path, save_img_path)
EagleOperate.get_thumbnail_img(each_img_path, save_img_thumbnail_path)
each_meta_json_path = os.path.join(each_img_dir, "metadata.json")
a.save_to_json_file(each_meta_json_path)
def get_id_name_dict(self):
"""获取 id 和 name 对应的字典"""
name_dict = {}
image_dir = os.path.join(self.proj_dir, "images")
for each_json_path in FileOperationUtil.re_all_file(image_dir, lambda x:str(x).endswith("metadata.json")):
a = EagleMetaData()
a.load_atts_from_json(each_json_path)
name_dict[a.name] = a.id
return name_dict
def init_edgal_project(self, img_dir):
"""初始化一个 edgal 工程"""
self.tag = EagleTags()
self.mtime = EagleMTimes()
self.faster_metadata = EagleFolderMetaData()
# fixme 按照规则找到对应的 xml 路径
self.get_tag_dict(img_dir, 'wh')
# 创建对应的文件夹
os.makedirs(self.back_up_dir, exist_ok=True)
os.makedirs(self.images_dir, exist_ok=True)
# 完善 images 文件夹
index = 0
for each_img_path in self.tag_dict:
try:
self.save_one_img_info(each_img_path)
except Exception as e:
print(e)
print("move :", index, each_img_path)
index += 1
self.tag.save_to_json_file(self.tag_json_path)
self.mtime.save_to_json_file(self.mtime_json_path)
self.faster_metadata.save_to_json_file(self.faster_metadata_json_path)
def save_to_xml_img(self, save_dir):
"""直接转为我们常用的数据集"""
# Annotations, JPEGImages
xml_dir = os.path.join(save_dir, "Annotations")
img_dir = os.path.join(save_dir, "JPEGImages")
os.makedirs(xml_dir, exist_ok=True)
os.makedirs(img_dir, exist_ok=True)
#
for each_metadata_path in FileOperationUtil.re_all_file(self.images_dir, lambda x:str(x).endswith(".json")):
# 解析 json 文件
EagleOperate.json_to_xml(each_metadata_path, xml_dir)
# 复制 jpg 文件
each_img_path = FileOperationUtil.re_all_file(os.path.dirname(each_metadata_path), lambda x:str(x).endswith((".jpg", ".JPG")))[0]
each_save_img_path = os.path.join(img_dir, os.path.split(each_img_path)[1])
shutil.copy(each_img_path, each_save_img_path)
if __name__ == "__main__":
# imgDir = r"D:\算法培育-7月样本"
# eagle_library = r"C:\Users\14271\Desktop\del\peiyu07.library"
# imgDir = r"D:\算法培育-6月样本"
# eagle_library = r"D:\peiyu06.library"
eagle_library = r"D:\WuHan_05.library"
imgDir = r"D:\国网四川省电力公司地市公司2016年9-12月巡检影像02\2021年5月算法培育1"
a = EagleOperate(eagle_library, imgDir)
# 指定当前项目的前缀,不同的项目用不同的前缀就能合并了
a.assign_id_pre = "WH05"
a.init_edgal_project(imgDir)
# a.save_to_xml_img(r"C:\Users\14271\Desktop\del\new_res") | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/action2d/TouchZoomAndPan.js | define("dojox/charting/action2d/TouchZoomAndPan",["dojo/_base/lang","dojo/_base/declare","dojo/_base/event","dojo/_base/sniff","./ChartAction","../Element","dojox/gesture/tap","../plot2d/common"],function(_1,_2,_3,_4,_5,_6,_7,_8){
var _9=_2("dojox.charting.action2d._GlassView",[_6],{constructor:function(_a){
},render:function(){
if(!this.isDirty()){
return;
}
this.cleanGroup();
this.group.createRect({width:this.chart.dim.width,height:this.chart.dim.height}).setFill("rgba(0,0,0,0)");
},cleanGroup:function(_b){
this.inherited(arguments);
return this;
},clear:function(){
this.dirty=true;
if(this.chart.stack[0]!=this){
this.chart.movePlotToFront(this.name);
}
return this;
},getSeriesStats:function(){
return _1.delegate(_8.defaultStats);
},initializeScalers:function(){
return this;
},isDirty:function(){
return this.dirty;
}});
return _2("dojox.charting.action2d.TouchZoomAndPan",_5,{defaultParams:{axis:"x",scaleFactor:1.2,maxScale:100,enableScroll:true,enableZoom:true},optionalParams:{},constructor:function(_c,_d,_e){
this._listeners=[{eventName:"ontouchstart",methodName:"onTouchStart"},{eventName:"ontouchmove",methodName:"onTouchMove"},{eventName:"ontouchend",methodName:"onTouchEnd"},{eventName:_7.doubletap,methodName:"onDoubleTap"}];
if(!_e){
_e={};
}
this.axis=_e.axis?_e.axis:"x";
this.scaleFactor=_e.scaleFactor?_e.scaleFactor:1.2;
this.maxScale=_e.maxScale?_e.maxScale:100;
this.enableScroll=_e.enableScroll!=undefined?_e.enableScroll:true;
this.enableZoom=_e.enableScroll!=undefined?_e.enableZoom:true;
this._uName="touchZoomPan"+this.axis;
this.connect();
},connect:function(){
this.inherited(arguments);
if(_4("safari")&&this.chart.surface.declaredClass.indexOf("svg")!=-1){
this.chart.addPlot(this._uName,{type:_9});
}
},disconnect:function(){
if(_4("safari")&&this.chart.surface.declaredClass.indexOf("svg")!=-1){
this.chart.removePlot(this._uName);
}
this.inherited(arguments);
},onTouchStart:function(_f){
var _10=this.chart,_11=_10.getAxis(this.axis);
var _12=_f.touches.length;
this._startPageCoord={x:_f.touches[0].pageX,y:_f.touches[0].pageY};
if((this.enableZoom||this.enableScroll)&&_10._delayedRenderHandle){
clearTimeout(_10._delayedRenderHandle);
_10._delayedRenderHandle=null;
_10.render();
}
if(this.enableZoom&&_12>=2){
this._endPageCoord={x:_f.touches[1].pageX,y:_f.touches[1].pageY};
var _13={x:(this._startPageCoord.x+this._endPageCoord.x)/2,y:(this._startPageCoord.y+this._endPageCoord.y)/2};
var _14=_11.getScaler();
this._initScale=_11.getWindowScale();
var t=this._initData=this.plot.toData();
this._middleCoord=t(_13)[this.axis];
this._startCoord=_14.bounds.from;
this._endCoord=_14.bounds.to;
}else{
if(this.enableScroll){
this._startScroll(_11);
_3.stop(_f);
}
}
},onTouchMove:function(_15){
var _16=this.chart,_17=_16.getAxis(this.axis);
var _18=_15.touches.length;
var _19=_17.vertical?"pageY":"pageX",_1a=_17.vertical?"y":"x";
if(this.enableZoom&&_18>=2){
var _1b={x:(_15.touches[1].pageX+_15.touches[0].pageX)/2,y:(_15.touches[1].pageY+_15.touches[0].pageY)/2};
var _1c=(this._endPageCoord[_1a]-this._startPageCoord[_1a])/(_15.touches[1][_19]-_15.touches[0][_19]);
if(this._initScale/_1c>this.maxScale){
return;
}
var _1d=this._initData(_1b)[this.axis];
var _1e=_1c*(this._startCoord-_1d)+this._middleCoord,_1f=_1c*(this._endCoord-_1d)+this._middleCoord;
_16.zoomIn(this.axis,[_1e,_1f]);
_3.stop(_15);
}else{
if(this.enableScroll){
var _20=_17.vertical?(this._startPageCoord[_1a]-_15.touches[0][_19]):(_15.touches[0][_19]-this._startPageCoord[_1a]);
_16.setAxisWindow(this.axis,this._lastScale,this._initOffset-_20/this._lastFactor/this._lastScale);
_16.delayedRender();
_3.stop(_15);
}
}
},onTouchEnd:function(_21){
var _22=this.chart,_23=_22.getAxis(this.axis);
if(_21.touches.length==1&&this.enableScroll){
this._startPageCoord={x:_21.touches[0].pageX,y:_21.touches[0].pageY};
this._startScroll(_23);
}
},_startScroll:function(_24){
var _25=_24.getScaler().bounds;
this._initOffset=_24.getWindowOffset();
this._lastScale=_24.getWindowScale();
this._lastFactor=_25.span/(_25.upper-_25.lower);
},onDoubleTap:function(_26){
var _27=this.chart,_28=_27.getAxis(this.axis);
var _29=1/this.scaleFactor;
if(_28.getWindowScale()==1){
var _2a=_28.getScaler(),_2b=_2a.bounds.from,end=_2a.bounds.to,_2c=(_2b+end)/2,_2d=this.plot.toData(this._startPageCoord)[this.axis],_2e=_29*(_2b-_2c)+_2d,_2f=_29*(end-_2c)+_2d;
_27.zoomIn(this.axis,[_2e,_2f]);
}else{
_27.setAxisWindow(this.axis,1,0);
_27.render();
}
_3.stop(_26);
}});
}); | PypiClean |
/Flask-DebugToolbar-0.13.1.tar.gz/Flask-DebugToolbar-0.13.1/src/flask_debugtoolbar/static/codemirror/util/javascript-hint.js | (function () {
function forEach(arr, f) {
for (var i = 0, e = arr.length; i < e; ++i) f(arr[i]);
}
function arrayContains(arr, item) {
if (!Array.prototype.indexOf) {
var i = arr.length;
while (i--) {
if (arr[i] === item) {
return true;
}
}
return false;
}
return arr.indexOf(item) != -1;
}
function scriptHint(editor, keywords, getToken) {
// Find the token at the cursor
var cur = editor.getCursor(), token = getToken(editor, cur), tprop = token;
// If it's not a 'word-style' token, ignore the token.
if (!/^[\w$_]*$/.test(token.string)) {
token = tprop = {start: cur.ch, end: cur.ch, string: "", state: token.state,
className: token.string == "." ? "property" : null};
}
// If it is a property, find out what it is a property of.
while (tprop.className == "property") {
tprop = getToken(editor, {line: cur.line, ch: tprop.start});
if (tprop.string != ".") return;
tprop = getToken(editor, {line: cur.line, ch: tprop.start});
if (tprop.string == ')') {
var level = 1;
do {
tprop = getToken(editor, {line: cur.line, ch: tprop.start});
switch (tprop.string) {
case ')': level++; break;
case '(': level--; break;
default: break;
}
} while (level > 0)
tprop = getToken(editor, {line: cur.line, ch: tprop.start});
if (tprop.className == 'variable')
tprop.className = 'function';
else return; // no clue
}
if (!context) var context = [];
context.push(tprop);
}
return {list: getCompletions(token, context, keywords),
from: {line: cur.line, ch: token.start},
to: {line: cur.line, ch: token.end}};
}
CodeMirror.javascriptHint = function(editor) {
return scriptHint(editor, javascriptKeywords,
function (e, cur) {return e.getTokenAt(cur);});
}
function getCoffeeScriptToken(editor, cur) {
// This getToken, it is for coffeescript, imitates the behavior of
// getTokenAt method in javascript.js, that is, returning "property"
// type and treat "." as indepenent token.
var token = editor.getTokenAt(cur);
if (cur.ch == token.start + 1 && token.string.charAt(0) == '.') {
token.end = token.start;
token.string = '.';
token.className = "property";
}
else if (/^\.[\w$_]*$/.test(token.string)) {
token.className = "property";
token.start++;
token.string = token.string.replace(/\./, '');
}
return token;
}
CodeMirror.coffeescriptHint = function(editor) {
return scriptHint(editor, coffeescriptKeywords, getCoffeeScriptToken);
}
var stringProps = ("charAt charCodeAt indexOf lastIndexOf substring substr slice trim trimLeft trimRight " +
"toUpperCase toLowerCase split concat match replace search").split(" ");
var arrayProps = ("length concat join splice push pop shift unshift slice reverse sort indexOf " +
"lastIndexOf every some filter forEach map reduce reduceRight ").split(" ");
var funcProps = "prototype apply call bind".split(" ");
var javascriptKeywords = ("break case catch continue debugger default delete do else false finally for function " +
"if in instanceof new null return switch throw true try typeof var void while with").split(" ");
var coffeescriptKeywords = ("and break catch class continue delete do else extends false finally for " +
"if in instanceof isnt new no not null of off on or return switch then throw true try typeof until void while with yes").split(" ");
function getCompletions(token, context, keywords) {
var found = [], start = token.string;
function maybeAdd(str) {
if (str.indexOf(start) == 0 && !arrayContains(found, str)) found.push(str);
}
function gatherCompletions(obj) {
if (typeof obj == "string") forEach(stringProps, maybeAdd);
else if (obj instanceof Array) forEach(arrayProps, maybeAdd);
else if (obj instanceof Function) forEach(funcProps, maybeAdd);
for (var name in obj) maybeAdd(name);
}
if (context) {
// If this is a property, see if it belongs to some object we can
// find in the current environment.
var obj = context.pop(), base;
if (obj.className == "variable")
base = window[obj.string];
else if (obj.className == "string")
base = "";
else if (obj.className == "atom")
base = 1;
else if (obj.className == "function") {
if (window.jQuery != null && (obj.string == '$' || obj.string == 'jQuery') &&
(typeof window.jQuery == 'function'))
base = window.jQuery();
else if (window._ != null && (obj.string == '_') && (typeof window._ == 'function'))
base = window._();
}
while (base != null && context.length)
base = base[context.pop().string];
if (base != null) gatherCompletions(base);
}
else {
// If not, just look in the window object and any local scope
// (reading into JS mode internals to get at the local variables)
for (var v = token.state.localVars; v; v = v.next) maybeAdd(v.name);
gatherCompletions(window);
forEach(keywords, maybeAdd);
}
return found;
}
})(); | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/gqcms/Information.py | import numpy as np
import pandas as pd
import gqcms
def CoefficientMatrix(
hubbard: gqcms.Hubbard, state: np.ndarray, system_sites: list
) -> np.ndarray:
"""
Create the coefficient matrix that expresses the wave function in the two subsystems
:param hubbard: a hubbard class object
:param state: wave function coefficient vector
:param system_sites: sites where the entropy is computed for can be list or int
"""
# Change system_sites to list if int is given
if isinstance(system_sites, int):
system_sites = [system_sites]
# Create a zero coefficient matrix of dimentions 4**A x 4**B
# where A are the system_sites and B all other sites. A power of four is
# taken, because one site has four possible configurations i.e empty, alpha, beta, alpha and bet.
C = np.zeros((4 ** len(system_sites), 4 ** (hubbard.sites - len(system_sites))))
for i, onv_i in enumerate(hubbard.basis):
bitstring_alpha = bin(onv_i.alpha_onv)[2:].rjust(hubbard.sites, "0")[::-1]
bitstring_beta = bin(onv_i.beta_onv)[2:].rjust(hubbard.sites, "0")[::-1]
indices = tuple(
"".join(index) for index in zip(bitstring_alpha, bitstring_beta)
)
sys_index = int("".join([indices[sys_site] for sys_site in system_sites]), 2)
env_index = int(
"".join(
indices[site]
for site in range(hubbard.sites)
if site not in system_sites
),
2,
)
C[sys_index, env_index] = onv_i.get_sign(system_sites, system_sites) * state[i]
return C
def Entropy(state: np.ndarray, hubbard: gqcms.Hubbard, system_sites: list) -> float:
"""
Computes Von Neumann entropy of the given state at the given site(s)
:param state: the state used for the entropy computation
:param hubbard: a hubbard class object
:param system_sites: sites where the entropy needs to be computed from
"""
C = CoefficientMatrix(hubbard, state, system_sites)
# Compute the reduced density matrix
rdm_sys = np.einsum("ij,kj->ik", C, C, optimize=True)
# Compute the eigenvalues of the reduced density matrix
eigvals = np.linalg.eigvalsh(rdm_sys)
# Compute the natural log of the eigenvalues and remove the NAN values
ln_eigvals = np.log(eigvals)
valid_values = np.where(~np.isnan(ln_eigvals) & np.isfinite(ln_eigvals))
# Finally compute the Von Neumann entropy
S = -np.sum((eigvals * ln_eigvals)[valid_values])
return S
def EntropyFromDataFrame(df: pd.DataFrame, hubbard: gqcms.Hubbard, system_sites: list) -> None:
"""
Compute the Von Neumann entropy at the given site(s) for all rows in the given dataframe.
The dataframe must contain a column named 'C'.
:param df: pandas dataframe with a column named 'C'
:param hubbard: a hubbard class object
:param system_sites: sites where the entropy needs to be computed from
"""
if isinstance(system_sites, int):
system_sites = [system_sites]
sites_str = "".join(str(site) for site in system_sites)
df[f"S{sites_str}"] = df['C'].apply(
Entropy, hubbard=hubbard, system_sites=system_sites
)
def MutualInformation(
state: np.ndarray, hubbard: gqcms.Hubbard, site_p: int, site_q: int
) -> float:
"""
Computes the mutual information of site p and q from a Hubbard computation
:param state: the state used for the entropy computation
:param hubbard: a hubbard class object
:param site_p: site index
:param site_q: site index
"""
# If sites p and q are equal, the mutual information is zero
if site_p == site_q:
return 0
return 0.5 * (
Entropy(state, hubbard, site_p)
+ Entropy(state, hubbard, site_q)
- Entropy(state, hubbard, [site_p, site_q])
)
def MutualInformationFromDataFrame(df, hubbard, site_p, site_q):
df[f"I{site_p}{site_q}"] = df['C'].apply(
MutualInformation, hubbard=hubbard, site_p=site_p, site_q=site_q
) | PypiClean |
/FightMan01dc.pymod-2.0.4.tar.gz/FightMan01dc.pymod-2.0.4/discord/reaction.py | from .iterators import ReactionIterator
class Reaction:
"""Represents a reaction to a message.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two reactions are equal. This works by checking if the emoji
is the same. So two messages with the same reaction will be considered
"equal".
.. describe:: x != y
Checks if two reactions are not equal.
.. describe:: hash(x)
Returns the reaction's hash.
.. describe:: str(x)
Returns the string form of the reaction's emoji.
Attributes
-----------
emoji: Union[:class:`Emoji`, :class:`str`]
The reaction emoji. May be a custom emoji, or a unicode emoji.
count: :class:`int`
Number of times this reaction was made
me: :class:`bool`
If the user sent this reaction.
message: :class:`Message`
Message this reaction is for.
"""
__slots__ = ('message', 'count', 'emoji', 'me')
def __init__(self, *, message, data, emoji=None):
self.message = message
self.emoji = emoji or message._state.get_reaction_emoji(data['emoji'])
self.count = data.get('count', 1)
self.me = data.get('me')
@property
def custom_emoji(self):
""":class:`bool`: If this is a custom emoji."""
return not isinstance(self.emoji, str)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.emoji == self.emoji
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.emoji != self.emoji
return True
def __hash__(self):
return hash(self.emoji)
def __str__(self):
return str(self.emoji)
def __repr__(self):
return '<Reaction emoji={0.emoji!r} me={0.me} count={0.count}>'.format(self)
async def remove(self, user):
"""|coro|
Remove the reaction by the provided :class:`User` from the message.
If the reaction is not your own (i.e. ``user`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``user`` parameter must represent a user or member and meet
the :class:`abc.Snowflake` abc.
Parameters
-----------
user: :class:`abc.Snowflake`
The user or member from which to remove the reaction.
Raises
-------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The user you specified, or the reaction's message was not found.
"""
await self.message.remove_reaction(self.emoji, user)
def users(self, limit=None, after=None):
"""Returns an :class:`AsyncIterator` representing the users that have reacted to the message.
The ``after`` parameter must represent a member
and meet the :class:`abc.Snowflake` abc.
Examples
---------
Usage ::
# I do not actually recommend doing this.
async for user in reaction.users():
await channel.send('{0} has reacted with {1.emoji}!'.format(user, reaction))
Flattening into a list: ::
users = await reaction.users().flatten()
# users is now a list of User...
winner = random.choice(users)
await channel.send('{} has won the raffle.'.format(winner))
Parameters
------------
limit: :class:`int`
The maximum number of results to return.
If not provided, returns all the users who
reacted to the message.
after: :class:`abc.Snowflake`
For pagination, reactions are sorted by member.
Raises
--------
HTTPException
Getting the users for the reaction failed.
Yields
--------
Union[:class:`User`, :class:`Member`]
The member (if retrievable) or the user that has reacted
to this message. The case where it can be a :class:`Member` is
in a guild message context. Sometimes it can be a :class:`User`
if the member has left the guild.
"""
if self.custom_emoji:
emoji = '{0.name}:{0.id}'.format(self.emoji)
else:
emoji = self.emoji
if limit is None:
limit = self.count
return ReactionIterator(self.message, emoji, limit, after) | PypiClean |
/ComPora-0.1.0-py3-none-any.whl/compora/tokenize.py | import re
from yoolkit.xmlscape import encode, decode
from yoolkit.text import unicode_category
from compora.nb_prefix import nb_prefix
def tokenize(sentence, language, split_aggressive_hyphen=True):
# seperate out all special characters that are not in 'L' and 'Nd' categories
sentence = encode(sentence)
sentence = re.sub(r'([^\w\d\s\'\-,.])', ' \g<1> ', sentence)
if split_aggressive_hyphen:
sentence = re.sub(r'([\w\d])-(?=[\w\d])', '\g<1> @-@ ', sentence)
# Do not split multi-dots
sentence = re.sub(r'\.(\.+)', ' MULTIDOT\g<1>', sentence)
while re.search(r'MULTIDOT\.', sentence) is not None:
sentence = re.sub(r'MULTIDOT\.([^\.])', 'MULTIMULTIDOT \g<1>', sentence)
sentence = re.sub(r'MULTIDOT\.', 'MULTIMULTIDOT', sentence)
# if pattern is not the form of digit,digit, seperate ','
sentence = re.sub(r'([^\d]),', '\g<1> , ', sentence)
sentence = re.sub(r',([^\d])', ' , \g<1>', sentence)
sentence = re.sub(r'([\d]),$', '\g<1> ,', sentence)
# split contractions
if language in {'en', }:
# right side split
sentence = re.sub(r'([^\w])\'([^\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([\w])\'([^\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([^\w\d])\'([\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([\w])\'([\w])', '\g<1> \'\g<2>', sentence)
sentence = re.sub(r'([\d])\'([s])', '\g<1> \'\g<2>', sentence)
elif language in {'fr', 'it', 'ga'}:
# left side split
sentence = re.sub(r'([^\w])\'([^\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([\w])\'([^\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([^\w])\'([\w])', '\g<1> \' \g<2>', sentence)
sentence = re.sub(r'([\w])\'([\w])', '\g<1>\' \g<2>', sentence)
else:
sentence = re.sub(r'\'', '\' ', sentence)
words = sentence.split()
for index in range(len(words)):
word = words[index]
if re.search(r'^(\S+)\.$', word) is not None:
prefix = re.search(r'^(\S+)\.$', word).group(1)
if re.search(r'\.', prefix) and re.search(r'\w', prefix):
pass
elif prefix in nb_prefix[language]['1']:
pass
elif index < len(words) - 1 and unicode_category(words[index+1][0]) == 'Ll':
pass
elif prefix in nb_prefix[language]['2'] and index < len(words) - 1 and re.search(r'^[0-9]+', words[index+1]):
pass
else:
word = prefix + ' .'
words[index] = word
sentence = ' '.join(words)
sentence = ' '.join(sentence.split())
sentence = re.sub(r'\.\' ?$', ' . \' ', sentence)
while re.search(r'MULTIMULTIDOT', sentence):
sentence = re.sub(r'MULTIMULTIDOT', 'MULTIDOT.', sentence)
sentence = re.sub(r'MULTIDOT', '.', sentence)
sentence = decode(sentence)
return sentence | PypiClean |
/CWR-API-0.0.40.tar.gz/CWR-API-0.0.40/docs/source/other/ipi_number.rst | ==========
IPI Number
==========
There are two fields which appear commonly in records refering to interested
parties: IPI Name Number and IPI Base Number. These are references to ISAC's
Interested Parties Information system, used by CISAC identify Interested
Parties on collective rights management data, and are stored on their
databases.
Sometimes this is refered as IPI/CAE number. But the CAE (Composer, Author and
Publisher, the E standing for 'Editeur') system is obsolete since 2001.
These codes are used along ISWC (International Standard Musical Work Code)
codes.
As previously indicated, IPI numbers are divided into two types:
- IPI Name Number
- IPI Base Number
---------------
IPI Name Number
---------------
The IPI Name Number, is just composed of eleven numbers.
---------------
IPI Base Number
---------------
The IPI Base Number, follows the pattern H-NNNNNNNNN-C, where each digit means:
- H: header, which is a single letter
- N: identification number. Nine numeric digits.
- C: check digit. A single number. | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/outputs/glances_bars.py | from __future__ import division
from math import modf
class Bar(object):
"""Manage bar (progression or status).
import sys
import time
b = Bar(10)
for p in range(0, 100):
b.percent = p
print("\r%s" % b),
time.sleep(0.1)
sys.stdout.flush()
"""
def __init__(self, size, percentage_char='|', empty_char=' ', pre_char='[', post_char=']', with_text=True):
# Build curses_bars
self.__curses_bars = [empty_char] * 5 + [percentage_char] * 5
# Bar size
self.__size = size
# Bar current percent
self.__percent = 0
# Min and max value
self.min_value = 0
self.max_value = 100
# Char used for the decoration
self.__pre_char = pre_char
self.__post_char = post_char
self.__empty_char = empty_char
self.__with_text = with_text
@property
def size(self, with_decoration=False):
# Return the bar size, with or without decoration
if with_decoration:
return self.__size
if self.__with_text:
return self.__size - 6
@property
def percent(self):
return self.__percent
@percent.setter
def percent(self, value):
if value <= self.min_value:
value = self.min_value
if value >= self.max_value:
value = self.max_value
self.__percent = value
@property
def pre_char(self):
return self.__pre_char
@property
def post_char(self):
return self.__post_char
def get(self):
"""Return the bars."""
frac, whole = modf(self.size * self.percent / 100.0)
ret = self.__curses_bars[8] * int(whole)
if frac > 0:
ret += self.__curses_bars[int(frac * 8)]
whole += 1
ret += self.__empty_char * int(self.size - whole)
if self.__with_text:
ret = '{}{:5.1f}%'.format(ret, self.percent)
return ret
def __str__(self):
"""Return the bars."""
return self.get() | PypiClean |
/JO_AutoMl_Sathishmahi-0.0.5-py3-none-any.whl/JO_AutoMl/combine_all.py | import pandas as pd
import numpy as np
from JO_AutoMl.classifierTrainer import non_hyper_parameter_classifier_model
from JO_AutoMl.handle_missing_value_in_catData import replace_nan_categorical_data
from JO_AutoMl.hyper_parameter import hyper_parameter_classifier
from JO_AutoMl.detect_outlierANDremove import detect_remove_outliers
from JO_AutoMl.handle_imbalanced_dataset import handle_imbalanced_data
# from source_code.diamensionalityReduction import diamensionality_reduction
from JO_AutoMl.diamensionalityReduction import diamensionality_reduction
from JO_AutoMl.remove_unwntedColumns import remove_col
from JO_AutoMl.find_Corr_remove import find_correlation
from JO_AutoMl.transformation import transformation
from JO_AutoMl.replace_NaN import replace_nan
from JO_AutoMl.handle_categorical_features import cat_value
from JO_AutoMl.remove_unwntedColumns import remove_col
from JO_AutoMl.train_test_split import train_test_split_fn
from JO_AutoMl.exception import CustomException
from sklearn.metrics import (
accuracy_score,
f1_score,
confusion_matrix,
precision_score,
recall_score,
)
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import sys,os
import json
# from source_code.model_score import return_model_score
class combine_all_functions:
def __init__(self):
os.makedirs("all_datasets",exist_ok=True)
self.kmeans_col_li = []
self.non_hyper_parameter_classifier_model_obj = (
non_hyper_parameter_classifier_model()
)
self.replace_nan_categorical_data_obj = replace_nan_categorical_data()
self.hyper_parameter_classifier_obj = hyper_parameter_classifier()
self.detect_remove_outliers_obj = detect_remove_outliers()
self.handle_imbalanced_data_obj = handle_imbalanced_data()
self.diamensionality_reduction_obj = diamensionality_reduction()
self.remove_col_obj = remove_col()
self.find_correlation_obj = find_correlation()
self.transformation_obj = transformation()
self.replace_nan_obj = replace_nan()
self.cat_value_obj = cat_value()
# self.return_model_score_obj=return_model_score()
def is_imbalanced(self, label: pd.DataFrame, cl_name: str) -> bool:
try:
label_li = label[cl_name].values.tolist()
all_count_li = []
is_im = False
for i in label[cl_name].unique():
all_count_li.append(label_li.count(i))
for i in all_count_li:
for j in all_count_li:
if ((i - j) / len(label)) > 0.20:
is_im = True
return is_im
return is_im
except:
CustomException(sys)
def _demo(
self,
feature: pd.DataFrame,
label: pd.DataFrame,
isClassification=True,
predict=False,
) -> pd.DataFrame:
try:
print('enter')
counter = 0
print(counter)
for col in feature.columns:
if (
"int" in str(feature[col].dtypes).lower()
or "float" in str(feature[col].dtypes).lower()
):
pass
else:
print(f" cat columns =====> {col}")
counter = counter + 1
print(counter)
if counter >= 1:
handle_cat_data = self.cat_value_obj.combine_all(feature)
print(
f"===================done handle_CAT data========================="
)
feature = self.replace_nan_categorical_data_obj.combine_all(
handle_cat_data
)
print(
f"===================done replace_nan_CAT========================="
)
# replace_nan_data=self.replace_nan_obj.mean_median_mode(feature)
replace_nan_data = self.replace_nan_obj.replace_nan_knnimpute(feature)
print(f"===================done replace_nan=========================")
if isClassification:
TorF = self.is_imbalanced(
pd.DataFrame(label, columns=["label"]), cl_name="label"
)
if TorF:
(
replace_nan_data,
label,
) = (
handle_imbalanced_data
) = self.handle_imbalanced_data_obj.using_smotetomek(
replace_nan_data, label
)
find_corr_data = self.find_correlation_obj.remove_corr_col(replace_nan_data)
# find_corr_data=self.find_correlation_obj.remove_corr_col(feature)
print(f"===================done find corr data=========================")
# find_corr_data['label']=raw_data[label_column]
# transformation_data=self.transformation_obj.std_scaler_dist(detect_remove_outliers_data)
transformation_data = self.transformation_obj.std_scaler_dist(
find_corr_data
)
print(
f"===================done transformation data========================="
)
final_data, label = self.detect_remove_outliers_obj.remove_outlier(
transformation_data, label
)
# transformation_data=self.transformation_obj.std_scaler_dist(feature)
print(
f"===================done for outlier removed data========================="
)
if predict == False:
final_data = self.remove_col_obj.all_columns_remove(final_data)
[self.kmeans_col_li.append(col) for col in final_data.columns]
print(
f"===================done for remove unwanted columns========================="
)
else:
print("True")
print(self.kmeans_col_li)
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
final_data = final_data[self.kmeans_col_li]
return final_data, label
return final_data, label
except:
CustomException(sys)
def _model_trainer(
self, feature: pd.DataFrame, label: pd.DataFrame, isClassification=True
):
try:
# final_pre_process_data,label=self._combine_all_data_preprocessing(path,label_column,isClassification)
# print(final_pre_process_data.isna().sum())
self.non_hyper_parameter_classifier_model_obj.split_data_training(
feature, label, hyper_parameter=True
)
print(
"++++++++++++++++++++++++++++++++ training compleate ++++++++++++++++++++++++++++++++++++++++++"
)
except:
CustomException(sys)
def _model_predict(self, data: pd.DataFrame) -> list:
try:
(
out_li,
df_li,
) = self.non_hyper_parameter_classifier_model_obj.model_predicted(data)
return df_li, out_li
except:
CustomException(sys)
def diamension_reduction(
self, feature: pd.DataFrame, label: pd.DataFrame, isClassification=True
) -> pd.DataFrame:
try:
print("enter")
_feature, _label = self._demo(feature, label, isClassification)
print(_feature)
reduction_data = self.diamensionality_reduction_obj.pca_pipe(_feature)
print(reduction_data)
return reduction_data
except:
CustomException(sys)
def combine_all_data_preprocessing(
self, path: str, label_column: str, isClassification=True
) -> dict:
try:
raw_data = pd.read_csv(path, nrows=1000)
feature = raw_data.drop(columns=label_column)
label = raw_data[label_column]
x_train, x_test, y_train, y_test = train_test_split_fn(
feature=feature, label=label
)
print("finish train_test_split")
print(x_train.shape)
data_list = [x_train, x_test, y_train, y_test]
train_feature, train_label = self._demo(x_train, y_train, isClassification)
self._model_trainer(train_feature, train_label)
test_faeture, test_label = self._demo(
x_test, y_test, isClassification, True
)
print("======================TEST LABEL============================")
print(len(test_label))
print(len(test_label))
print("==================================================")
df_li, out_li = self._model_predict(test_faeture)
print("======================OUT LABEL ============================")
print(df_li)
# print(len(out_li[0]), len(out_li[1]))
print(len(out_li))
print("==================================================")
if isClassification:
dic = self.non_hyper_parameter_classifier_model_obj.classification_model_score(
out_li, test_label
)
else:
dic = self.non_hyper_parameter_classifier_model_obj.regression_model_score(
out_li, test_label
)
return dic
except:
CustomException(sys) | PypiClean |
/NDFinance-0.0.8.tar.gz/NDFinance-0.0.8/ndfinance/brokers/backtest/data_provider.py | from ndfinance.brokers.base.data_provider import DataProvider, OHLCVT
from ndfinance.analysis.technical import TechnicalIndicator
from ndfinance.brokers.base import TimeIndexer
from ndfinance.utils import array_utils
from ndfinance.brokers.base import TimeFrames, asset
from ndfinance.data.crawlers import get_yf_ticker_async, get_fdr_data_async
import yfinance as yf
import pandas as pd
import numpy as np
import warnings
import ray
class BacktestDataProvider(DataProvider):
def __init__(self, primary_timeframe=TimeFrames.day):
super(BacktestDataProvider, self).__init__()
self.root = array_utils.StructureDataset()
self.group_ohlcv = self.root.create_group("ohlcv")
self.group_fundamental = self.root.create_group("fundamental")
self.primary_timeframe = primary_timeframe
def add_ohlc_dataframe(self,
df:pd.DataFrame,
ticker:str,
datetime_format="%Y-%m-%d %H:%M:%S",
timeframe=TimeFrames.day,
timestamp=OHLCVT.timestamp,
open=OHLCVT.open, high=OHLCVT.high,
low=OHLCVT.low, close=OHLCVT.close,
volume=OHLCVT.volume):
if df.empty:
warnings.warn("empty df occured")
return
if not df[timestamp].values.dtype == np.float64:
df[timestamp] = array_utils.to_timestamp(df[timestamp], pattern=datetime_format)
ticker_grp = self.group_ohlcv.create_group(ticker) \
if not ticker in self.group_ohlcv.keys() else self.group_ohlcv[ticker]
timeframe_grp = ticker_grp.create_group(timeframe)
timeframe_grp.create_dataset(name=OHLCVT.timestamp, data=df[timestamp].values)
timeframe_grp.create_dataset(name=OHLCVT.open, data=np.array(df[open].values, dtype=np.float64))
timeframe_grp.create_dataset(name=OHLCVT.high, data=np.array(df[high].values, dtype=np.float64))
timeframe_grp.create_dataset(name=OHLCVT.low, data=np.array(df[low].values, dtype=np.float64))
timeframe_grp.create_dataset(name=OHLCVT.close, data=np.array(df[close].values, dtype=np.float64))
timeframe_grp.create_dataset(name=OHLCVT.volume, data=np.array(df[volume].values, dtype=np.float64))
def add_ohlc_dataframes(self,
dataframes_or_paths,
tickers,
datetime_format="%Y-%m-%d %H:%M:%S",
timeframe=TimeFrames.day,
timestamp=OHLCVT.timestamp,
open=OHLCVT.open, high=OHLCVT.high,
low=OHLCVT.low, close=OHLCVT.close,
volume=OHLCVT.volume):
for df, ticker in zip(dataframes_or_paths, tickers):
if isinstance(df, str):
df = pd.read_csv(df)
self.add_ohlc_dataframe(
df, ticker, datetime_format, timeframe, timestamp, open, high, low, close, volume)
def set_indexer(self, indexer:TimeIndexer):
self.indexer = indexer
def add_fundamental_dataframe(self, df, ticker, datetime_format="%Y-%m-%d %H:%M:%S", timestamp=OHLCVT.timestamp):
if not df[timestamp].values.dtype == np.float64:
df[timestamp] = array_utils.to_timestamp(df[timestamp], pattern=datetime_format)
ticker_grp = self.group_fundamental.create_group(ticker) \
if not ticker in self.group_fundamental[ticker] else self.group_fundamental[ticker]
ticker_grp.create_dataset(name=OHLCVT.timestamp, data=df[timestamp].values)
for l in list(df.columns):
ticker_grp.create_dataset(name=l, data=df[l].values)
def add_fundamental_dataframes(self,
dataframes_or_paths,
tickers,
datetime_format="%Y-%m-%d %H:%M:%S",
timestamp=OHLCVT.timestamp):
for df, ticker in zip(dataframes_or_paths, tickers):
if isinstance(df, str):
df = pd.read_csv(df)
self.add_fundamental_dataframe(
df, ticker, datetime_format, timestamp)
def current_price(self, ticker) -> np.ndarray:
return self.get_ohlcvt(ticker, timeframe=self.primary_timeframe, label=OHLCVT.close)[-1]
def get_ohlcvt(self, ticker, label, timeframe=None, n=1) -> np.ndarray:
if isinstance(ticker, asset.Asset):
ticker = ticker.ticker
if timeframe is None:
timeframe = self.primary_timeframe
try:
idx = np.where(
self.group_ohlcv[ticker][timeframe][OHLCVT.timestamp] <= self.indexer.timestamp)[-1][-1]
except IndexError: return None
return self.group_ohlcv[ticker][timeframe][label][:idx][-n:]
def get_ohlcvt_current(self, *args, **kwargs):
ret = self.get_ohlcvt(*args, **kwargs)
if (ret is None): return None
try: return ret[-1]
except: return None
def _add_technical_indicator(self, ticker, timeframe, indicator:TechnicalIndicator):
self.group_ohlcv[ticker][timeframe].create_dataset(
indicator.name, indicator(self.group_ohlcv[ticker][timeframe]))
def add_technical_indicators(self, tickers, timeframes, indicators):
if not isinstance(tickers, list):
tickers = [tickers]
if not isinstance(timeframes, list):
timeframes = [timeframes]
if not isinstance(indicators, list):
indicators = [indicators]
for ticker in tickers:
for timeframe in timeframes:
for indicator in indicators:
self._add_technical_indicator(ticker, timeframe, indicator)
def get_shortest_timestamp_seq(self):
timeframe_len = np.inf
timeframe = None
for ticker in self.group_ohlcv.keys():
if timeframe_len > len(self.group_ohlcv[ticker][self.primary_timeframe][OHLCVT.timestamp]):
timeframe = self.group_ohlcv[ticker][self.primary_timeframe][OHLCVT.timestamp]
timeframe_len = len(timeframe)
return timeframe
def get_longest_timestamp_seq(self):
timeframe_len = -np.inf
timeframe = None
for ticker in self.group_ohlcv.keys():
if timeframe_len < len(self.group_ohlcv[ticker][self.primary_timeframe][OHLCVT.timestamp]):
timeframe = self.group_ohlcv[ticker][self.primary_timeframe][OHLCVT.timestamp]
timeframe_len = len(timeframe)
return timeframe
def add_yf_tickers(self, *tickers):
dataframes = get_yf_ticker_async(*tickers)
self.add_ohlc_dataframes(dataframes, tickers)
def add_fdr_tickers(self, *tickers):
dataframes = get_fdr_data_async(*tickers)
self.add_ohlc_dataframes(dataframes, tickers)
def cut_data(self, slip=2):
for ticker, timeframe_grp in self.group_ohlcv.items():
for timeframe, label_grp in timeframe_grp.items():
timestamp = self.group_ohlcv[ticker][timeframe][OHLCVT.timestamp]
index = np.where((timestamp >= self.indexer.first_timestamp) & (timestamp <= self.indexer.last_timestamp))[-1]
for label, array in label_grp.items():
self.group_ohlcv[ticker][timeframe][label] = self.group_ohlcv[ticker][timeframe][label][int(np.clip(index[0]-slip, 0, np.inf)):int(index[-1])] | PypiClean |
/Langton-project-0.3.tar.gz/Langton-project-0.3/README.md | # Projet-Langton
I had to model a Langton's ant [1] and exploit some data such as it's distance from it's initial position during the time. In short I initalize a 2D space full of white boxes. I drop an ants on the space, starting from (0,0), and give it the following behavior:
- If the box it's standing on is white, colors the boxe in black, rotates 90 degrees clockwise, move forward by 1 box.
- If the box it's standing on is black, colors the boxe in white, rotates 90 degrees counter-clockwise, move forward by 1 box.
I also defined multiple behavior if multiple ants were to close. First what i mean by "close" is when there's already an ant on an other ant's next box. If this happen:
- 1 -> colors the box in white/black (depending on its current color), move forward.
- 2 -> proceed as if there were no other ant.
- 3 -> remain still
You can launch the run.py file and give some argument in the terminal, for instance:
"python3 run.py 2 30 12000 0.05 12 3" will launch the script with 2 ants, both following the behavior associated with 3, each ant will make 12000 steps and the data will be averaged over 30 iterations, each iteration will give each ant a new random set of initial parameters (position and orientation). Futhermore 5% of the 2D space will be filled with black boxes that will alter their path.
The run_interface.py file can be launch to see the ants move and draw beautiful figures. How to initialize ants with the interface: you wil have to specify the number of ants, their initial position and orientation (0, 90, 180, 270), if you want noise and how many, if you want the posistion or the orientation to be random (be carefull, if you want to initialize multiple ants with the interface you will have to specify an initial position even though you have checked the "random positions" box), the size of the ants (only pick odd intergers), if you want a random color (suggested with more than one ant), the number of steps, the size of the screen, the speed of the animation and finally if you want to disable the animation.
[1] C. Langton, (1986) « Studying artificial life with cellular automata », Physica 22D 120-149
| PypiClean |
/GetPixelColor-0.2.19.tar.gz/GetPixelColor-0.2.19/README.md | # GetPixelColor
## A cross-platform python library for getting the color of a given pixel on screen.

- Compatible with MacOS, Windows, and Linux.
- Transparency data only available on some platforms.
__Examples:__
Get color of a specific pixel: `getpixelcolor.pixel(x, y)`
> (R, G, B, (A))
Get average color of an area: `getpixelcolor.average(x, y, width, height)`
> (R, G, B, (A))
Get all color values of an area: `getpixelcolor.area(x, y, width, height)`
> [[[R, G, B, (A)]]]
https://pypi.org/project/GetPixelColor/ | PypiClean |
/EtlWorkers-0.0.6.tar.gz/EtlWorkers-0.0.6/README.md | ## ETL Workers
It is my way to express my love for python and my life passion for data science in general and explicitly for data engineering.
### About the package name:
I named it after the famous data-engineering pipeline approach: Extract, Transform, and Load.
<hr>
### About the idea and package structure
It will contain multiple worker classes, And Each worker will fulfill certain data operations segmented by type.
For example, now it contains one worker called FileWorker, And it is performing file-related operations.
In the future, I will add more workers and more functions per worker. So it can perform complex operations under any applicable built with python.
<hr>
### How to install & use
```
# In your terminal
pip install EtlWorkers
# In your python application
import EtlWorkers as ew
help(ew.end_lines_with_comma)
```
| PypiClean |
/HiCExplorer-2.2.1.1-py3-none-any.whl/hicexplorer/hicPlotViewpoint.py | import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import argparse
import sys
import numpy as np
from hicmatrix import HiCMatrix as hm
from hicexplorer.utilities import toString
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from hicexplorer._version import __version__
import logging
log = logging.getLogger(__name__)
def parse_arguments(args=None):
parser = argparse.ArgumentParser(add_help=False,
description='Plots the number of interactions around a given reference point in a region.')
parserRequired = parser.add_argument_group('Required arguments')
parserRequired.add_argument('--matrix', '-m',
help='path of the Hi-C matrices to plot',
required=True,
nargs='+')
parserRequired.add_argument('--region',
help='The format is chr:start-end ',
required=True)
parserRequired.add_argument('--outFileName', '-o',
help='File name to save the image.',
required=True)
parserRequired.add_argument('--referencePoint', '-rp', help='Reference point. Needs to be in the format: \'chr:100\' for a '
'single reference point or \'chr:100-200\' for a reference region.',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosome', '-C',
help='Optional parameter: Only show results for this chromosome.')
parserOpt.add_argument('--interactionOutFileName', '-i', help='Optional parameter: If set a bedgraph file with all interaction'
' will be created.',
required=False)
parserOpt.add_argument('--dpi',
help='Optional parameter: Resolution for the image in case the'
'ouput is a raster graphics image (e.g png, jpg)',
type=int,
default=300)
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
parserOpt.add_argument("--help", "-h", action="help", help="show this help message and exit")
return parser
def relabelTicks(pTick):
if pTick < 1e6:
xlabels = "{:.2f} Kb".format(int(pTick) / 1e3)
else:
xlabels = "{:.2f} Mb".format(int(pTick) / 1e6)
return xlabels
def getViewpointValues(pMatrix, pReferencePoint, pChromViewpoint, pRegion_start, pRegion_end, pInteractionList=None, pChromosome=None):
hic = hm.hiCMatrix(pMatrix)
if pChromosome is not None:
hic.keepOnlyTheseChr(pChromosome)
if len(pReferencePoint) == 2:
view_point_start, view_point_end = hic.getRegionBinRange(pReferencePoint[0], int(pReferencePoint[1]), int(pReferencePoint[1]))
elif len(pReferencePoint) == 3:
view_point_start, view_point_end = hic.getRegionBinRange(pReferencePoint[0], int(pReferencePoint[1]), int(pReferencePoint[2]))
else:
log.error("No valid reference point given. {}".format(pReferencePoint))
exit(1)
view_point_range = hic.getRegionBinRange(pChromViewpoint, pRegion_start, pRegion_end)
elements_of_viewpoint = view_point_range[1] - view_point_range[0]
data_list = np.zeros(elements_of_viewpoint)
view_point_start_ = view_point_start
interactions_list = None
if pInteractionList is not None:
interactions_list = []
while view_point_start_ <= view_point_end:
chrom, start, end, _ = hic.getBinPos(view_point_start_)
for j, idx in zip(range(elements_of_viewpoint), range(view_point_range[0], view_point_range[1], 1)):
data_list[j] += hic.matrix[view_point_start_, idx]
if interactions_list is not None:
chrom_second, start_second, end_second, _ = hic.getBinPos(idx)
interactions_list.append((chrom, start, end, chrom_second, start_second, end_second, hic.matrix[view_point_start_, idx]))
view_point_start_ += 1
return [view_point_start, view_point_end, view_point_range, data_list, interactions_list]
def main(args=None):
args = parse_arguments().parse_args(args)
if args.region:
if sys.version_info[0] == 2:
args.region = args.region.translate(None, ",.;|!{}()").replace("-", ":")
if sys.version_info[0] == 3:
args.region = args.region.replace(",", "")
args.region = args.region.replace(";", "")
args.region = args.region.replace("!", "")
args.region = args.region.replace("-", ":")
region = args.region.split(":")
if len(region) != 3:
log.error("Region format is invalid {}".format(args.region))
exit(0)
chrom, region_start, region_end = region[0], int(region[1]), int(region[2])
if sys.version_info[0] == 2:
args.referencePoint = args.referencePoint.translate(None, ",.;|!{}()").replace("-", ":")
if sys.version_info[0] == 3:
args.referencePoint = args.referencePoint.replace(",", "")
args.referencePoint = args.referencePoint.replace(";", "")
args.referencePoint = args.referencePoint.replace("!", "")
args.referencePoint = args.referencePoint.replace("-", ":")
referencePoint = args.referencePoint.split(":")
data_list = []
interactions_list = None
if args.interactionOutFileName is not None:
interactions_list = []
matrix_name_legend = []
for matrix in args.matrix:
view_point_start, view_point_end, view_point_range, data_list_, interactions_list_ \
= getViewpointValues(matrix, referencePoint, chrom, region_start, region_end, args.interactionOutFileName, args.chromosome)
data_list.append(data_list_)
if args.interactionOutFileName is not None:
interactions_list.append(interactions_list_)
matrix_name_legend.append(os.path.basename(matrix))
fig = plt.figure(figsize=(6.4, 4.8))
ax = plt.subplot(111)
matrices_plot_legend = []
for i, data in enumerate(data_list):
matrices_plot_legend.append(ax.plot(range(len(data)), data, alpha=0.7, label=matrix_name_legend[i])[0])
if len(referencePoint) == 2:
log.debug("Single reference point mode: {}".format(referencePoint))
log.debug("label 0: {}".format((int(referencePoint[1]) - region_start) * (-1)))
log.debug("referencePoint[1]: {}".format(referencePoint[1]))
log.debug("region_start: {}".format(region_start))
log.debug("label 1: {}".format(referencePoint[0] + ":" + relabelTicks(int(referencePoint[1]))))
log.debug("label 2: {}".format(region_end - int(referencePoint[1])))
ax.set_xticks([0, view_point_start - view_point_range[0], view_point_range[1] - view_point_range[0]])
xticklabels = [None] * 3
xticklabels[0] = relabelTicks((int(referencePoint[1]) - region_start) * (-1))
xticklabels[1] = referencePoint[0] + ":" + relabelTicks(int(referencePoint[1]))
xticklabels[2] = relabelTicks(region_end - int(referencePoint[1]))
elif len(referencePoint) == 3:
log.debug("Range mode: {}".format(referencePoint))
# fit scale: start coordinate is 0 --> view_point_range[0]
ax.set_xticks([0, view_point_start - view_point_range[0], view_point_end - view_point_range[0], view_point_range[1] - view_point_range[0]])
xticklabels = [None] * 4
xticklabels[0] = relabelTicks((int(referencePoint[1]) - region_start) * (-1))
xticklabels[1] = referencePoint[0] + ":" + relabelTicks(int(referencePoint[1]))
xticklabels[2] = referencePoint[0] + ":" + relabelTicks(int(referencePoint[2]))
xticklabels[3] = relabelTicks(region_end - int(referencePoint[1]))
ax.set_xticklabels(xticklabels)
ax.set_ylabel('Number of interactions')
# left, width = .45, .5
# bottom, height = .25, .7
# right = left + width
# top = bottom + height
plt.legend(handles=matrices_plot_legend)
plt.savefig(args.outFileName, dpi=args.dpi)
plt.close(fig)
if interactions_list is not None:
for i, interactions_list_ in enumerate(interactions_list):
with open(args.interactionOutFileName + '_' + matrix_name_legend[i] + '.bedgraph', 'w') as fh:
for interaction in interactions_list_:
fh.write("{}\t{}\t{}\t{}\t{}\t{}\t{:.12f}\n".format(toString(interaction[0]), toString(interaction[1]), toString(interaction[2]), toString(interaction[3]), toString(interaction[4]), toString(interaction[5]), float(interaction[6]))) | PypiClean |
/Demomgr-1.10.1-py3-none-any.whl/demomgr/threads/filter.py | import os
import time
import queue
from demomgr.demo_info import DemoInfo
from demomgr.filterlogic import process_filterstring, FILTERFLAGS
from demomgr.helpers import readdemoheader
from demomgr.threads.read_folder import ThreadReadFolder
from demomgr.threads._threadsig import THREADSIG
from demomgr.threads._base import _StoppableBaseThread
class ThreadFilter(_StoppableBaseThread):
"""
Thread to filter a directory of demos.
"""
def __init__(self, queue_out, filterstring, curdir, cfg, silent = False):
"""
Thread requires output queue and the following args:
filterstring <Str>: Raw user input from the entry field
curdir <Str>: Absolute path to current directory
cfg <Dict>: Program configuration
silent <Bool>: If True, thread will not drop progress messages
"""
self.filterstring = filterstring
self.curdir = curdir
self.cfg = cfg
self.silent = silent
super().__init__(None, queue_out)
def run(self):
starttime = time.time()
self.queue_out_put(THREADSIG.INFO_STATUSBAR, ("Filtering demos; Parsing filter...", ))
try:
filters, flags = process_filterstring(self.filterstring)
except Exception as error:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Error parsing filter request: {error}", 4000)
)
self.queue_out_put(THREADSIG.FAILURE)
return
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED)
return
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, ("Filtering demos; Reading information...", )
)
self.datafetcherqueue = queue.Queue()
self.datafetcherthread = ThreadReadFolder(
self.datafetcherqueue, targetdir = self.curdir, cfg = self.cfg
)
self.datafetcherthread.start()
# NOTE: Can't really wait for join to this thread here.
self.datafetcherthread.join(None, nostop = True)
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED)
return
demo_data = None
while True:
try:
queueobj = self.datafetcherqueue.get_nowait()
if queueobj[0] is THREADSIG.RESULT_DEMODATA:
demo_data = queueobj[1]
elif queueobj[0].is_finish_signal():
if queueobj[0] is THREADSIG.SUCCESS:
break
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
("Demo fetching thread failed during filtering.", 4000)
)
self.queue_out_put(THREADSIG.FAILURE)
return
except queue.Empty:
break
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED)
return
errors = 0
filtered_demo_data = {
"col_filename": [], "col_demo_info": [], "col_ctime": [], "col_filesize": []
}
file_amnt = len(demo_data["col_filename"])
for i, demo_name in enumerate(demo_data["col_filename"]):
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Filtering demos; {i+1} / {file_amnt}", )
)
curdataset = {
"name": demo_name,
"demo_info": (
DemoInfo(demo_name, [], []) if demo_data["col_demo_info"][i] is None
else demo_data["col_demo_info"][i]
),
"header": None,
"filedata": {
"filesize": demo_data["col_filesize"][i],
"modtime": demo_data["col_ctime"][i],
},
}
if flags & FILTERFLAGS.HEADER:
try:
curdataset["header"] = readdemoheader(os.path.join(self.curdir, demo_name))
except (OSError, ValueError):
errors += 1
continue
if all(lambda_(curdataset) for lambda_ in filters):
filtered_demo_data["col_filename" ].append(demo_name)
filtered_demo_data["col_demo_info"].append(demo_data["col_demo_info"][i])
filtered_demo_data["col_ctime" ].append(demo_data["col_ctime"][i])
filtered_demo_data["col_filesize" ].append(demo_data["col_filesize"][i])
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED)
return
res_msg = f"Filtered {file_amnt} demos in {round(time.time() - starttime, 3)} seconds."
if errors > 0:
res_msg += f" {errors} of those excluded due to errors."
self.queue_out_put(THREADSIG.INFO_STATUSBAR, (res_msg, 3000))
self.queue_out_put(THREADSIG.RESULT_DEMODATA, filtered_demo_data)
self.queue_out_put(THREADSIG.SUCCESS) | PypiClean |
/Eskapade_ROOT-0.9.0-py3-none-any.whl/esroofit/tutorials/esk405_simulation_based_on_binned_data.py | from eskapade import ConfigObject, Chain, process_manager
from eskapade import core_ops, analysis
from eskapade.logger import Logger, LogLevel
from esroofit import resources
from esroofit.links import RooDataHistFiller, PrintWs, WsUtils
logger = Logger()
logger.debug('Now parsing configuration file esk405_simulation_based_on_binned_data')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk405_simulation_based_on_binned_data'
settings['version'] = 0
#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.
settings['high_num_dims'] = False
input_files = [resources.fixture('mock_accounts.csv.gz')]
#########################################################################################
# --- now set up the chains and links based on configuration flags
ch = Chain('Data')
# --- 0. read input data
read_data = analysis.ReadToDf(name='dflooper', key='accounts', reader='csv')
read_data.path = input_files
ch.add(read_data)
# --- 1. add the record factorizer
# Here the columns dummy and loc of the input dataset are factorized
# e.g. x = ['apple', 'tree', 'pear', 'apple', 'pear'] becomes the column:
# x = [0, 1, 2, 0, 2]
# By default, the mapping is stored in a dict under key: 'map_'+store_key+'_to_original'
fact = analysis.RecordFactorizer(name='rf1')
fact.columns = ['isActive', 'eyeColor', 'favoriteFruit', 'gender']
fact.read_key = 'accounts'
fact.inplace = True
fact.sk_map_to_original = 'to_original'
fact.sk_map_to_factorized = 'to_factorized'
fact.logger.log_level = LogLevel.DEBUG
ch.add(fact)
# --- 2. Fill a roodatahist with the contents of the dataframe
df2rdh = RooDataHistFiller()
df2rdh.read_key = read_data.key
df2rdh.store_key = 'rdh_' + read_data.key
df2rdh.store_key_vars = 'rdh_vars'
df2rdh.store_key_cats = 'rdh_cats'
df2rdh.map_to_factorized = 'to_factorized'
if settings['high_num_dims']:
df2rdh.columns = ['transaction', 'latitude', 'longitude', 'age', 'eyeColor', 'favoriteFruit']
else:
df2rdh.columns = ['longitude', 'age', 'eyeColor']
# be careful not to blow up the total number of bins.
# do this by setting the maximum total number of bins allowed.
df2rdh.n_max_total_bins = 1e6
# a histogram-based pdf is created out of the roodatahist object
# we use this pdf below to simulate a new dataset with the same properties as the original
df2rdh.create_hist_pdf = 'hpdf_Ndim'
# all output is stored in the workspace, not datastore
df2rdh.into_ws = True
ch.add(df2rdh)
# --- Print overview
pws = PrintWs()
ch.add(pws)
pds = core_ops.PrintDs()
ch.add(pds)
# --- 3. resimulate the data with the created hist-pdf, and plot these data and the pdf
ch = Chain('WsOps')
wsu = WsUtils()
wsu.add_simulate(pdf='hpdf_Ndim', obs='rdh_vars', num=10000, key='simdata')
wsu.add_plot(obs='age', data='simdata', pdf='hpdf_Ndim', output_file='test.pdf',
pdf_kwargs={'ProjWData': ('rdh_cats', 'simdata')})
ch.add(wsu)
#########################################################################################
logger.debug('Done parsing configuration file esk405_simulation_based_on_binned_data') | PypiClean |
/DB12-1.0.4.tar.gz/DB12-1.0.4/src/db12/benchmark.py |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import logging
import json
import sys
import random
import re
import multiprocessing
if sys.version_info[0] < 3:
# pylint: disable = E, W, R, C
# python3 range corresponds to xrange in python2
range = xrange
def single_dirac_benchmark(iterations_num=1, measured_copies=None, correction=True):
"""Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12)
:param int iterations_num: number of iterations to run
:param multiprocessing.Value measured_copies: extra iterations to run
"""
# pylint: disable = too-many-locals
# This number of iterations corresponds to 1kHS2k.seconds, i.e. 250 HS06 seconds
iters = int(1000 * 1000 * 12.5)
calib = 250.0
m_1 = int(0)
m_2 = int(0)
if sys.version_info[0] < 3:
# pylint: disable = E, W, R, C
# long type does not exist anymore in python3 but was used in this context with python2
m_1 = long(0)
m_2 = long(0)
p_1 = 0
p_2 = 0
# Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration)
# Do one or more extra iterations to avoid tail effects when copies run in parallel
it_1 = 0
while (it_1 <= iterations_num) or (
measured_copies is not None and measured_copies.value > 0
):
if it_1 == 1:
start = os.times()
# Now the iterations
for _j in range(iters):
t_1 = random.normalvariate(10, 1)
m_1 += t_1
m_2 += t_1 * t_1
p_1 += t_1
p_2 += t_1 * t_1
if it_1 == iterations_num:
end = os.times()
if measured_copies is not None:
# Reduce the total of running copies by one
measured_copies.value -= 1
it_1 += 1
cput = sum(end[:4]) - sum(start[:4])
wall = end[4] - start[4]
if not cput:
return None
norm = calib * iterations_num / cput
if correction:
norm = get_norm_correction(norm)
# Return DIRAC-compatible values
output = {
"CPU": cput,
"WALL": wall,
"NORM": norm,
"UNIT": "DB12",
}
return output
def single_dirac_benchmark_process(
result_object, iterations_num=1, measured_copies=None, correction=True
):
"""Run single_dirac_benchmark() in a multiprocessing friendly way
:param multiprocessing.Value result_object: result to be returned
:param int iterations_num: number of iterations to run
:param multiprocessing.Value measured_copies: extra iterations to run
"""
benchmark_result = single_dirac_benchmark(
iterations_num=iterations_num,
measured_copies=measured_copies,
correction=correction,
)
if not benchmark_result or "NORM" not in benchmark_result:
return
# This makes it easy to use with multiprocessing.Process
result_object.value = benchmark_result["NORM"]
def multiple_dirac_benchmark(
copies=1, iterations_num=1, extra_iter=False, correction=True
):
"""Run multiple copies of the DIRAC Benchmark in parallel
:param int copies: number of single benchmark to run in parallel
:param int interations_num: number of iterations to run
:param bool extra_iter: to know whether it should include extra iterations
"""
processes = []
results = []
if extra_iter:
# If true, then we run one or more extra iterations in each
# copy until the number still being meausured is zero.
measured_copies = multiprocessing.Value("i", copies)
else:
measured_copies = None
# Set up all the subprocesses
for i in range(copies):
results.append(multiprocessing.Value("d", 0.0))
processes.append(
multiprocessing.Process(
target=single_dirac_benchmark_process,
args=(results[i], iterations_num, measured_copies, correction),
)
)
# Start them all off at the same time
for process in processes:
process.start()
# Wait for them all to finish
for process in processes:
process.join()
raw = []
product = 1.0
for res in results:
raw.append(res.value)
product *= res.value
raw.sort()
# Return the list of raw results and various averages
output = {
"raw": raw,
"copies": copies,
"sum": sum(raw),
"arithmetic_mean": sum(raw) / copies,
"geometric_mean": product ** (1.0 / copies),
"median": raw[(copies - 1) // 2],
}
return output
def wholenode_dirac_benchmark(iterations_num=1, extra_iter=False, correction=True):
"""Run as many copies as needed to occupy the whole machine
:param int interations_num: number of iterations to run
:param bool extra_iter: to know whether it should include extra iterations
"""
try:
copies = multiprocessing.cpu_count()
except: # pylint: disable=bare-except
copies = 1
return multiple_dirac_benchmark(
copies=copies,
iterations_num=iterations_num,
extra_iter=extra_iter,
correction=correction,
)
def get_norm_correction(norm_computed):
"""Apply a factor on the norm depending on the python version used and
the architecture targeted in order to reproduce the original norm from python2.
:param float norm_computed: raw norm
"""
# If python2 is used, then no action is needed
if sys.version_info[0] < 3:
return norm_computed
logging.warning(
"You are executing DB12 using python3, DB12 score is generally higher than it was with python2"
)
logging.warning("Trying to apply a correction...")
# Get the dictionary of factors
with open(
os.path.join(os.path.dirname(__file__), "factors.json"), "r"
) as file_object:
factor_dict = json.load(file_object)
# Get Python version: if not in the dictionary, no action can be performed
major, minor = sys.version_info[0:2]
python_version = "%s.%s" % (major, minor)
python_versions_dict = factor_dict["python_version"]
if python_version not in python_versions_dict.keys():
logging.warning(
"Cannot correct the score, return the raw norm: the python version %s has not been analyzed.",
python_version,
)
logging.warning(
"Versions available are: %s", " ".join(python_versions_dict.keys())
)
logging.warning(
"Please consult https://zenodo.org/record/5647834 for further details"
)
return norm_computed
# Get CPU brand name
try:
with open("/proc/cpuinfo", "r") as file_object:
content = file_object.read()
cpu_brand_name = re.findall("model name\t: ([a-zA-Z]*)", content)[0]
except (IOError, IndexError):
logging.warning(
"Cannot correct the score, return the raw norm: cannot access CPU information"
)
return norm_computed
cpus_dict = python_versions_dict[python_version]["cpu_brand"]
factor = cpus_dict.get(cpu_brand_name)
if not factor:
logging.warning(
"Cannot correct the score, return the raw norm: the CPU brand %s has not been analyzed.",
cpu_brand_name,
)
logging.warning("Brands available are: %s", " ".join(cpus_dict.keys()))
logging.warning(
"Please consult https://zenodo.org/record/5647834 for further details."
)
return norm_computed
logging.info("Applying a factor of %s to the raw norm %s", factor, norm_computed)
return norm_computed * factor | PypiClean |
/Client_API_VN-2.11.1.tar.gz/Client_API_VN-2.11.1/docs/apps/examples.rst | =======================
Database usage examples
=======================
Nature-isere export
===================
The following SQL code creates view on the database for Nature-isere.
It can be used as a base for defining views on the database, for export.
.. code:: sql
--
-- Initialisation de la base pour export vers nature_isere
-- - Création des roles (mots de passes à définir). xfer38 est créé lors de l'installation du serveur debian
-- - Création de la base, des extensions et des schémas
-- - Création du FOREIGN DATA WRAPPER et des tables FOREIGN
-- - Création des vues matérialisées
-- - Création des vues publiques
--
-- A utiliser depuis le compte SUPERUSER:
-- $ sudo -iu xfer38
-- $ psql postgres
--
-- Role: lpo_isere
CREATE ROLE lpo_isere WITH
NOLOGIN
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION;
GRANT lpo_isere TO xfer38;
-- Role: nature_isere
-- DROP ROLE nature_isere;
CREATE ROLE nature_isere WITH
LOGIN
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION
PASSWORD '???';
-- Database: faune_isere
ALTER DATABASE faune_isere
SET search_path TO "$user", public, topology;
\c faune_isere
ALTER DEFAULT PRIVILEGES
GRANT ALL ON TABLES TO lpo_isere;
ALTER DEFAULT PRIVILEGES
GRANT ALL ON TABLES TO postgres;
ALTER DEFAULT PRIVILEGES
GRANT ALL ON TABLES TO xfer38;
-- DROP EXTENSION IF EXISTS adminpack CASCADE;
-- CREATE EXTENSION adminpack;
-- DROP EXTENSION IF EXISTS postgis CASCADE;
-- CREATE EXTENSION postgis;
-- DROP EXTENSION IF EXISTS postgis_topology CASCADE;
-- CREATE EXTENSION postgis_topology;
DROP EXTENSION IF EXISTS postgres_fdw CASCADE;
DROP SERVER IF EXISTS aura_server CASCADE;
DROP USER MAPPING IF EXISTS FOR xfer38 SERVER aura_server;
CREATE EXTENSION IF NOT EXISTS postgres_fdw;
CREATE SERVER aura_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'geonature.fauneauvergnerhonealpes.org', port '5432', dbname 'gnlpoaura');
-- ALTER SERVER aura_server
-- OWNER TO postgres;
CREATE USER MAPPING FOR xfer38 SERVER aura_server
OPTIONS ("user" 'xxx', password '???');
-- SCHEMA and FOREIGN SCHEMA
CREATE SCHEMA IF NOT EXISTS taxonomie
AUTHORIZATION lpo_isere;
COMMENT ON SCHEMA taxonomie
IS 'Schéma contenant les réferentiels officiels (TAXREF, Mailles, etc.)';
IMPORT FOREIGN SCHEMA taxonomie
FROM SERVER aura_server INTO taxonomie;
-- TABLES for public access
DROP TABLE IF EXISTS public.nature_isere_entity;
CREATE TABLE public.nature_isere_entity (
entity_short_name character varying(1000) COLLATE pg_catalog."default" NOT NULL,
CONSTRAINT nature_isere_entity_pkey PRIMARY KEY (entity_short_name)
);
ALTER TABLE public.nature_isere_entity OWNER to lpo_isere;
-- MATERIALIZED VIEWS and TABLES for referentiel
DROP TABLE IF EXISTS taxonomie.tabx_code_atlas;
CREATE TABLE taxonomie.tabx_code_atlas (
code50 integer NOT NULL,
code19 real,
CONSTRAINT tabx_code_atlas_pkey PRIMARY KEY (code50)
);
ALTER TABLE taxonomie.tabx_code_atlas OWNER TO xfer38;
INSERT INTO taxonomie.tabx_code_atlas VALUES
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(7, 7),
(8, 8),
(9, 9),
(10, 10),
(11, 11),
(12, 12),
(13, 13),
(14, 14),
(15, 15),
(16, 16),
(17, 17),
(18, 18),
(19, 19),
(30, 3.5),
(40, 4.5),
(50, 11.5),
(99, NULL);
-- VIEWS
DROP VIEW IF EXISTS public.obs_nature_isere;
CREATE OR REPLACE VIEW public.obs_nature_isere AS
SELECT observations.id_sighting,
species.french_name AS name_species,
species.latin_name AS latin_species,
cor_c_vn_taxref.taxref_id AS "CD_REF",
observations.date,
observations.date_year,
CASE
WHEN observations.hidden IS NULL THEN observations.place::text
ELSE format('E0%sN%s'::text, (observations.coord_x_local / 10000::double precision)::integer, (observations.coord_y_local / 10000::double precision)::integer)
END AS place,
local_admin_units.name AS municipality,
local_admin_units.insee,
format('E0%sN%s'::text, (observations.coord_x_local / 10000::double precision)::integer, (observations.coord_y_local / 10000::double precision)::integer) AS grid_name,
observations.estimation_code,
observations.count AS total_count,
observations.details AS detail,
observations.atlas_code,
observations.altitude,
observations.hidden,
observations.insert_date,
observations.update_date,
tabx_code_atlas.code19
FROM src_vn.observations
LEFT JOIN src_vn.species ON observations.id_species = species.id
LEFT JOIN src_vn.places ON places.id = observations.id_place
LEFT JOIN src_vn.local_admin_units ON places.id_commune = local_admin_units.id
LEFT JOIN taxonomie.tabx_code_atlas ON observations.atlas_code = tabx_code_atlas.code50
LEFT JOIN taxonomie.cor_c_vn_taxref ON observations.id_species = cor_c_vn_taxref.vn_id
WHERE (observations.admin_hidden IS NULL) AND observations.count > 0 AND (local_admin_units.name IS NOT NULL);
ALTER TABLE public.obs_nature_isere OWNER TO lpo_isere;
GRANT SELECT ON TABLE public.obs_nature_isere TO nature_isere;
GRANT ALL ON TABLE public.obs_nature_isere TO postgres;
GRANT ALL ON TABLE public.obs_nature_isere TO lpo_isere;
DROP VIEW IF EXISTS public.lieu_nature_isere;
CREATE OR REPLACE VIEW public.lieu_nature_isere AS
SELECT places.id AS id,
places.name AS nom,
places.coord_lat AS latitude__d_d_,
places.coord_lon AS longitude__d_d_,
places.coord_x_local AS lambert_93_e__m_,
places.coord_y_local AS lambert_93_n__m_,
places.altitude,
places.visible,
local_admin_units.insee,
local_admin_units.name AS commune
FROM src_vn.places, src_vn.local_admin_units
WHERE local_admin_units.id = places.id_commune;
ALTER TABLE public.lieu_nature_isere OWNER TO lpo_isere;
GRANT SELECT ON TABLE public.lieu_nature_isere TO nature_isere;
GRANT ALL ON TABLE public.lieu_nature_isere TO lpo_isere;
DROP VIEW IF EXISTS public.espece_nature_isere;
CREATE OR REPLACE VIEW public.espece_nature_isere AS
SELECT species.id AS id_species,
species.french_name AS name,
species.latin_name AS name_latin,
species.category_1 AS category,
species.rarity
FROM src_vn.species
WHERE species.is_used;
ALTER TABLE public.espece_nature_isere OWNER TO lpo_isere;
GRANT SELECT ON TABLE public.espece_nature_isere TO nature_isere;
GRANT ALL ON TABLE public.espece_nature_isere TO postgres;
GRANT ALL ON TABLE public.espece_nature_isere TO lpo_isere;
| PypiClean |
/OASYS1_HALF_SRW-0.0.3-py3-none-any.whl/orangecontrib/srw/menu/ow_srw_tools_menu.py | __author__ = 'labx'
from orangecanvas.scheme.link import SchemeLink
from oasys.menus.menu import OMenu
from wofry.propagator.propagator import PropagationManager
from wofrysrw.propagator.propagators2D.srw_fresnel_native import SRW_APPLICATION
from wofrysrw.propagator.propagators2D.srw_propagation_mode import SRWPropagationMode
from orangecontrib.srw.util.srw_util import showWarningMessage, showCriticalMessage
from orangecontrib.srw.widgets.optical_elements.ow_srw_screen import OWSRWScreen
from orangecontrib.srw.widgets.native.ow_srw_intensity_plotter import OWSRWIntensityPlotter
from orangecontrib.srw.widgets.native.ow_srw_me_degcoh_plotter import OWSRWDegCohPlotter
from orangecontrib.srw.widgets.loop_management.ow_srw_accumulation_point import OWSRWAccumulationPoint
from orangecontrib.srw.widgets.gui.ow_srw_widget import SRWWidget
class SRWToolsMenu(OMenu):
def __init__(self):
super().__init__(name="SRW")
self.openContainer()
self.addContainer("Propagation Mode")
self.addSubMenu("Element by Element (Wofry)")
self.addSubMenu("Element by Element (SRW Native)")
self.addSubMenu("Whole beamline at Final Screen (SRW Native)")
self.addSeparator()
self.addSubMenu("Disable Wavefront Propagation on all the Final Screens")
self.closeContainer()
self.openContainer()
self.addContainer("Plotting")
self.addSubMenu("Select Plots \'No\' on all Source and O.E. widgets")
self.addSubMenu("Select Plots \'Yes\' on all Source and O.E. widgets")
self.closeContainer()
self.openContainer()
self.addContainer("Cumulative Loops")
self.addSubMenu("Reload Saved Data on the all the Accumulation Points")
self.closeContainer()
def executeAction_1(self, action):
try:
propagation_mode = PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION)
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, SRWPropagationMode.STEP_BY_STEP_WOFRY)
self.set_srw_live_propagation_mode()
showWarningMessage("Propagation Mode: Element by Element (Wofry)")
except Exception as exception:
showCriticalMessage(exception.args[0])
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, propagation_mode)
self.set_srw_live_propagation_mode()
except:
pass
except:
pass
def executeAction_2(self, action):
try:
propagation_mode = PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION)
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, SRWPropagationMode.STEP_BY_STEP)
self.set_srw_live_propagation_mode()
showWarningMessage("Propagation Mode: Element by Element (SRW Native)")
except Exception as exception:
showCriticalMessage(exception.args[0])
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, propagation_mode)
self.set_srw_live_propagation_mode()
except:
pass
except:
pass
def executeAction_3(self, action):
try:
propagation_mode = PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION)
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, SRWPropagationMode.WHOLE_BEAMLINE)
self.set_srw_live_propagation_mode()
showWarningMessage("Propagation Mode: Whole beamline at Final Screen (SRW Native)")
except Exception as exception:
showCriticalMessage(exception.args[0])
try:
PropagationManager.Instance().set_propagation_mode(SRW_APPLICATION, propagation_mode)
self.set_srw_live_propagation_mode()
except:
pass
except:
pass
def executeAction_4(self, action):
try:
for node in self.canvas_main_window.current_document().scheme().nodes:
widget = self.canvas_main_window.current_document().scheme().widget_for_node(node)
if isinstance(widget, OWSRWScreen):
if hasattr(widget, "is_final_screen") and hasattr(widget, "set_is_final_screen"):
if (PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION) != SRWPropagationMode.WHOLE_BEAMLINE):
raise Exception("Action possibile only while Propagation Mode: Whole beamline at Final Screen (SRW Native)")
if hasattr(widget, "show_view_box") and getattr(widget, "show_view_box"):
widget.is_final_screen = 0
widget.set_is_final_screen()
except Exception as exception:
showCriticalMessage(exception.args[0])
def executeAction_5(self, action):
try:
for node in self.canvas_main_window.current_document().scheme().nodes:
widget = self.canvas_main_window.current_document().scheme().widget_for_node(node)
if isinstance(widget, SRWWidget) and not (isinstance(widget, OWSRWIntensityPlotter) or
isinstance(widget, OWSRWDegCohPlotter) or
isinstance(widget, OWSRWAccumulationPoint)):
if hasattr(widget, "view_type") and hasattr(widget, "set_PlotQuality"):
if (PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION) == SRWPropagationMode.WHOLE_BEAMLINE):
raise Exception("Action not possibile while Propagation Mode: Whole beamline at Final Screen (SRW Native)")
if hasattr(widget, "show_view_box") and getattr(widget, "show_view_box"):
widget.view_type = 0
widget.set_PlotQuality()
except Exception as exception:
showCriticalMessage(exception.args[0])
def executeAction_6(self, action):
try:
for node in self.canvas_main_window.current_document().scheme().nodes:
widget = self.canvas_main_window.current_document().scheme().widget_for_node(node)
if isinstance(widget, SRWWidget) and not (isinstance(widget, OWSRWIntensityPlotter) or
isinstance(widget, OWSRWDegCohPlotter) or
isinstance(widget, OWSRWAccumulationPoint)):
if hasattr(widget, "view_type") and hasattr(widget, "set_PlotQuality"):
if (PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION) == SRWPropagationMode.WHOLE_BEAMLINE):
raise Exception("Action not possibile while Propagation Mode: Whole beamline at Final Screen (SRW Native)")
if hasattr(widget, "show_view_box") and getattr(widget, "show_view_box"):
widget.view_type = 1
widget.set_PlotQuality()
except Exception as exception:
showCriticalMessage(exception.args[0])
def executeAction_7(self, action):
try:
for node in self.canvas_main_window.current_document().scheme().nodes:
widget = self.canvas_main_window.current_document().scheme().widget_for_node(node)
if isinstance(widget, OWSRWAccumulationPoint) and widget.autosave == 1: widget.reload_autosave_file()
except Exception as exception:
showCriticalMessage(exception.args[0])
#################################################################
def set_srw_live_propagation_mode(self):
for node in self.canvas_main_window.current_document().scheme().nodes:
widget = self.canvas_main_window.current_document().scheme().widget_for_node(node)
if hasattr(widget, "set_srw_live_propagation_mode"):
widget.set_srw_live_propagation_mode()
if (PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION) == SRWPropagationMode.WHOLE_BEAMLINE):
if not (isinstance(widget, OWSRWScreen) or
isinstance(widget, OWSRWIntensityPlotter) or
isinstance(widget, OWSRWDegCohPlotter) or
isinstance(widget, OWSRWAccumulationPoint)) \
or getattr(widget, "is_final_screen") == False:
if hasattr(widget, "view_type") and hasattr(widget, "set_PlotQuality"):
if hasattr(widget, "show_view_box") and getattr(widget, "show_view_box"):
widget.view_type = 0
widget.set_PlotQuality()
if isinstance(widget, OWSRWScreen): widget.set_is_final_screen()
#################################################################
#
# SCHEME MANAGEMENT
#
#################################################################
def getWidgetFromNode(self, node):
return self.canvas_main_window.current_document().scheme().widget_for_node(node)
def createLinks(self, nodes):
previous_node = None
for node in nodes:
if not (isinstance(node, str)) and not previous_node is None and not (isinstance(previous_node, str)):
link = SchemeLink(source_node=previous_node, source_channel="Beam", sink_node=node, sink_channel="Input Beam")
self.canvas_main_window.current_document().addLink(link=link)
previous_node = node
def getWidgetDesc(self, widget_name):
return self.canvas_main_window.widget_registry.widget(widget_name)
def createNewNode(self, widget_desc):
return self.canvas_main_window.current_document().createNewNode(widget_desc)
def createNewNodeAndWidget(self, widget_desc):
messages = []
try:
node = self.createNewNode(widget_desc)
widget = self.getWidgetFromNode(node)
# here you can put values on the attrubutes
except Exception as exception:
messages.append(exception.args[0])
return widget, node, messages | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/company/it_IT/__init__.py | from faker.utils.checksums import calculate_luhn
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}}-{{last_name}} {{company_suffix}}",
"{{last_name}}, {{last_name}} e {{last_name}} {{company_suffix}}",
)
catch_phrase_words = (
(
"Abilità",
"Access",
"Adattatore",
"Algoritmo",
"Alleanza",
"Analizzatore",
"Applicazione",
"Approccio",
"Architettura",
"Archivio",
"Intelligenza artificiale",
"Array",
"Attitudine",
"Benchmark",
"Capacità",
"Sfida",
"Circuito",
"Collaborazione",
"Complessità",
"Concetto",
"Conglomerato",
"Contingenza",
"Core",
"Database",
"Data-warehouse",
"Definizione",
"Emulazione",
"Codifica",
"Criptazione",
"Firmware",
"Flessibilità",
"Previsione",
"Frame",
"framework",
"Funzione",
"Funzionalità",
"Interfaccia grafica",
"Hardware",
"Help-desk",
"Gerarchia",
"Hub",
"Implementazione",
"Infrastruttura",
"Iniziativa",
"Installazione",
"Set di istruzioni",
"Interfaccia",
"Soluzione internet",
"Intranet",
"Conoscenza base",
"Matrici",
"Matrice",
"Metodologia",
"Middleware",
"Migrazione",
"Modello",
"Moderazione",
"Monitoraggio",
"Moratoria",
"Rete",
"Architettura aperta",
"Sistema aperto",
"Orchestrazione",
"Paradigma",
"Parallelismo",
"Policy",
"Portale",
"Struttura di prezzo",
"Prodotto",
"Produttività",
"Progetto",
"Proiezione",
"Protocollo",
"Servizio clienti",
"Software",
"Soluzione",
"Standardizzazione",
"Strategia",
"Struttura",
"Successo",
"Sovrastruttura",
"Supporto",
"Sinergia",
"Task-force",
"Finestra temporale",
"Strumenti",
"Utilizzazione",
"Sito web",
"Forza lavoro",
),
(
"adattiva",
"avanzata",
"migliorata",
"assimilata",
"automatizzata",
"bilanciata",
"centralizzata",
"compatibile",
"configurabile",
"cross-platform",
"decentralizzata",
"digitalizzata",
"distribuita",
"piccola",
"ergonomica",
"esclusiva",
"espansa",
"estesa",
"configurabile",
"fondamentale",
"orizzontale",
"implementata",
"innovativa",
"integrata",
"intuitiva",
"inversa",
"gestita",
"obbligatoria",
"monitorata",
"multi-canale",
"multi-laterale",
"open-source",
"operativa",
"ottimizzata",
"organica",
"persistente",
"polarizzata",
"proattiva",
"programmabile",
"progressiva",
"reattiva",
"riallineata",
"ricontestualizzata",
"ridotta",
"robusta",
"sicura",
"condivisibile",
"stand-alone",
"switchabile",
"sincronizzata",
"sinergica",
"totale",
"universale",
"user-friendly",
"versatile",
"virtuale",
"visionaria",
),
(
"24 ore",
"24/7",
"terza generazione",
"quarta generazione",
"quinta generazione",
"sesta generazione",
"asimmetrica",
"asincrona",
"background",
"bi-direzionale",
"biforcata",
"bottom-line",
"coerente",
"coesiva",
"composita",
"sensibile al contesto",
"basta sul contesto",
"basata sul contenuto",
"dedicata",
"didattica",
"direzionale",
"discreta",
"dinamica",
"eco-centrica",
"esecutiva",
"esplicita",
"full-range",
"globale",
"euristica",
"alto livello",
"olistica",
"omogenea",
"ibrida",
"impattante",
"incrementale",
"intangibile",
"interattiva",
"intermediaria",
"locale",
"logistica",
"massimizzata",
"metodica",
"mission-critical",
"mobile",
"modulare",
"motivazionale",
"multimedia",
"multi-tasking",
"nazionale",
"neutrale",
"nextgeneration",
"non-volatile",
"object-oriented",
"ottima",
"ottimizzante",
"radicale",
"real-time",
"reciproca",
"regionale",
"responsiva",
"scalabile",
"secondaria",
"stabile",
"statica",
"sistematica",
"sistemica",
"tangibile",
"terziaria",
"uniforme",
"valore aggiunto",
),
)
bsWords = (
(
"partnerships",
"comunità",
"ROI",
"soluzioni",
"e-services",
"nicchie",
"tecnologie",
"contenuti",
"supply-chains",
"convergenze",
"relazioni",
"architetture",
"interfacce",
"mercati",
"e-commerce",
"sistemi",
"modelli",
"schemi",
"reti",
"applicazioni",
"metriche",
"e-business",
"funzionalità",
"esperienze",
"webservices",
"metodologie",
),
(
"implementate",
"utilizzo",
"integrate",
"ottimali",
"evolutive",
"abilitate",
"reinventate",
"aggregate",
"migliorate",
"incentivate",
"monetizzate",
"sinergizzate",
"strategiche",
"deploy",
"marchi",
"accrescitive",
"target",
"sintetizzate",
"spedizioni",
"massimizzate",
"innovazione",
"guida",
"estensioni",
"generate",
"exploit",
"transizionali",
"matrici",
"ricontestualizzate",
),
(
"valore aggiunto",
"verticalizzate",
"proattive",
"forti",
"rivoluzionari",
"scalabili",
"innovativi",
"intuitivi",
"strategici",
"e-business",
"mission-critical",
"24/7",
"globali",
"B2B",
"B2C",
"granulari",
"virtuali",
"virali",
"dinamiche",
"magnetiche",
"web",
"interattive",
"sexy",
"back-end",
"real-time",
"efficienti",
"front-end",
"distributivi",
"estensibili",
"mondiali",
"open-source",
"cross-platform",
"sinergiche",
"out-of-the-box",
"enterprise",
"integrate",
"di impatto",
"wireless",
"trasparenti",
"next-generation",
"cutting-edge",
"visionari",
"plug-and-play",
"collaborative",
"olistiche",
"ricche",
),
)
company_suffixes = ("SPA", "e figli", "Group", "s.r.l.")
def _random_vat_office(self) -> int:
"""
Returns a random code identifying the VAT office needed to build a valid VAT with company_vat.
See https://it.wikipedia.org/wiki/Partita_IVA#Tabella_degli_Uffici_IVA
"""
val = self.random_int(1, 104)
# handle special cases
if val == 101:
return 120
elif val == 102:
return 121
elif val == 103:
return 888
elif val == 104:
return 999
# else: between 1 and 100 are all valid
return val
def company_vat(self) -> str:
"""
Returns Italian VAT identification number (Partita IVA).
"""
code = self.bothify("#######") + str(self._random_vat_office()).zfill(3)
luhn_checksum = str(calculate_luhn(int(code)))
return f"IT{code}{luhn_checksum}" | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/localization/mk/MathML.js | MathJax.Localization.addTranslation("mk","MathML",{version:"2.7.9",isLoaded:true,strings:{BadMglyph:"\u0413\u0440\u0435\u0448\u0435\u043D mglyph: %1",BadMglyphFont:"\u0413\u0440\u0435\u0448\u0435\u043D \u0444\u043E\u043D\u0442: %1",MathPlayer:"MathJax \u043D\u0435 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0433\u043E \u043F\u043E\u0441\u0442\u0430\u0432\u0438 MathPlayer.\n\n\u0414\u043E\u043A\u043E\u043B\u043A\u0443 \u043D\u0435 \u0432\u0438 \u0435 \u0432\u043E\u0441\u043F\u043E\u0441\u0442\u0430\u0432\u0435\u043D MathPlayer, \u045C\u0435 \u0442\u0440\u0435\u0431\u0430 \u043F\u0440\u0432\u043E \u0434\u0430 \u0433\u043E \u0432\u043E\u0441\u043F\u043E\u0441\u0442\u0430\u0432\u0438\u0442\u0435.\n\u0412\u043E \u0441\u043F\u0440\u043E\u0442\u0438\u0432\u043D\u043E, \u043E\u0432\u0430 \u0437\u043D\u0430\u0447\u0438 \u0434\u0435\u043A\u0430 \u0432\u0430\u0448\u0438\u0442\u0435 \u0431\u0435\u0437\u0431\u0435\u0434\u043D\u043E\u0441\u043D\u0438 \u043F\u043E\u0441\u0442\u0430\u0432\u043A\u0438 \u0458\u0430 \u0441\u043F\u0440\u0435\u0447\u0443\u0432\u0430\u0430\u0442 \u0440\u0430\u0431\u043E\u0442\u0430\u0442\u0430 \u043D\u0430 \u043A\u043E\u043D\u0442\u0440\u043E\u043B\u0438\u0442\u0435 \u0432\u043E ActiveX. \u041F\u043E\u0458\u0434\u0435\u0442\u0435 \u0432\u043E \u0431\u0435\u0437\u0431\u0435\u0434\u043D\u043E\u0441\u043D\u0438\u0442\u0435 \u043D\u0430\u0433\u043E\u0434\u0443\u0432\u0430\u045A\u0430 \u043D\u0430 \u043F\u0440\u0435\u043B\u0438\u0441\u0442\u0443\u0432\u0430\u0447\u043E\u0442 \u0438 \u0441\u0442\u0438\u0441\u043D\u0435\u0442\u0435 \u043D\u0430 \u043F\u0440\u0438\u043B\u0430\u0433\u043E\u0434\u043B\u0438\u0432\u043E (Custom Level). \u0422\u0430\u043C\u0443 \u043F\u0440\u043E\u0432\u0435\u0440\u0435\u0442\u0435 \u0434\u0430\u043B\u0438 \u0432\u0438 \u0441\u0435 \u0432\u043A\u043B\u0443\u0447\u0435\u043D\u0438 \u043C\u043E\u0436\u043D\u043E\u0441\u0442\u0438\u0442\u0435 \u0437\u0430 \u0440\u0430\u0431\u043E\u0442\u0430 \u0441\u043E ActiveX-\u043A\u043E\u043D\u0442\u0440\u043E\u043B\u0438 \u0438 \u0411\u0438\u043D\u0430\u0440\u043D\u0438 \u0438 \u0441\u043A\u0440\u0438\u043F\u0442\u043D\u0438 \u043F\u043E\u0432\u0435\u0434\u0435\u043D\u0438\u0458\u0430.\n\n\u0417\u0430\u0441\u0435\u0433\u0430 \u045C\u0435 \u0433\u043B\u0435\u0434\u0430\u0442\u0435 \u0438\u0437\u0432\u0435\u0441\u0442\u0443\u0432\u0430\u045A\u0430 \u0437\u0430 \u0433\u0440\u0435\u0448\u043A\u0438 \u043D\u0430\u043C\u0435\u0441\u0442\u043E \u0438\u0441\u043F\u0438\u0448\u0430\u043D\u0438 \u043C\u0430\u0442\u0435\u043C\u0430\u0442\u0438\u0447\u043A\u0438 \u0441\u043E\u0434\u0440\u0436\u0438\u043D\u0438.",CantCreateXMLParser:"MathJax \u043D\u0435 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0441\u043E\u0437\u0434\u0430\u0434\u0435 XML-\u043F\u0430\u0440\u0441\u0435\u0440 \u0437\u0430 MathML. \u041F\u0440\u043E\u0432\u0435\u0440\u0435\u0442\u0435 \u0434\u0430\u043B\u0438 \u0435 \u0432\u043A\u043B\u0443\u0447\u0435\u043D\u0430 \u0441\u0438\u0433\u0443\u0440\u043D\u043E\u0441\u043D\u0430\u0442\u0430 \u043F\u043E\u0441\u0442\u0430\u0432\u043A\u0430 ActiveX-\u043A\u043E\u043D\u0442\u0440\u043E\u043B\u0438 \u0431\u0435\u0437\u0431\u0435\u0434\u043D\u0438 \u0437\u0430 \u0441\u043A\u0440\u0438\u043F\u0442\u0438\u0440\u0430\u045A\u0435 (\u0432\u043E \u043F\u0440\u0438\u043B\u0430\u0433\u043E\u0434\u043B\u0438\u0432\u0438\u043E\u0442 \u0434\u0435\u043B (Custom Level) \u043D\u0430 \u0431\u0435\u0437\u0431\u0435\u0434\u043D\u043E\u0441\u0442\u0438\u0442\u0435 \u043F\u043E\u0441\u0442\u0430\u0432\u043A\u0438 \u0432\u043E \u043F\u0440\u0435\u043B\u0438\u0441\u0442\u0443\u0432\u0430\u0447\u043E\u0442).\n\nMathJax \u043D\u0435\u043C\u0430 \u0434\u0430 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0433\u0438 \u043E\u0431\u0440\u0430\u0431\u043E\u0442\u0438 \u0440\u0430\u0432\u0435\u043D\u043A\u0438\u0442\u0435 \u043D\u0430 MathML.",UnknownNodeType:"\u041D\u0435\u043F\u043E\u0437\u043D\u0430\u0442 \u0442\u0438\u043F \u043D\u0430 \u0458\u0430\u0437\u043E\u043B: %1",UnexpectedTextNode:"\u041D\u0435\u043E\u0447\u0435\u043A\u0443\u0432\u0430\u043D \u0458\u0430\u0437\u043E\u043B \u0432\u043E \u0442\u0435\u043A\u0441\u0442\u043E\u0442: %1",ErrorParsingMathML:"\u0413\u0440\u0435\u0448\u043A\u0430 \u043F\u0440\u0438 \u0440\u0430\u0441\u0447\u043B\u0435\u043D\u0443\u0432\u0430\u045A\u0435\u0442\u043E \u043D\u0430 MathML",ParsingError:"\u0413\u0440\u0435\u0448\u043A\u0430 \u043F\u0440\u0438 \u0440\u0430\u0441\u0447\u043B\u0435\u043D\u0443\u0432\u0430\u045A\u0435\u0442\u043E \u043D\u0430 MathML: %1",MathMLSingleElement:"MathML \u043C\u043E\u0440\u0430 \u0434\u0430 \u0441\u0435 \u043E\u0431\u0440\u0430\u0437\u0443\u0432\u0430 \u043E\u0434 \u0435\u0434\u0435\u043D \u0435\u043B\u0435\u043C\u0435\u043D\u0442",MathMLRootElement:"MathML \u043C\u043E\u0440\u0430 \u0434\u0430 \u0441\u0435 \u043E\u0431\u0440\u0430\u0437\u0443\u0432\u0430 \u0441\u043E \u0435\u043B\u0435\u043C\u0435\u043D\u0442 \u003Cmath\u003E, \u0430 \u043D\u0435 %1"}});MathJax.Ajax.loadComplete("[MathJax]/localization/mk/MathML.js"); | PypiClean |
/88rest-0.1.9.tar.gz/88rest-0.1.9/rest88/routers.py | from collections import OrderedDict
from django.conf.urls import url
from rest_framework.routers import (
SimpleRouter as _SimpleRouter,
APIRootView,
SchemaView,
SchemaGenerator
)
from rest_framework.settings import api_settings
from rest_framework.urlpatterns import format_suffix_patterns
from orm88.connector import ORM88
class SimpleRouter(_SimpleRouter):
def get_default_basename(self, viewset):
"""
If `basename` is not specified, attempt to automatically determine
it from the viewset.
"""
queryset = getattr(viewset, 'queryset', None) # type: ORM88
assert queryset is not None, (
'`basename` argument not specified, '
'and could not automatically determine '
'the name from the viewset, as it does '
'not have a `.queryset` attribute.'
)
return queryset._model_name.lower()
class DefaultRouter(SimpleRouter):
"""
The default router extends the SimpleRouter, but also adds in a default
API root view, and adds format suffix patterns to the URLs.
"""
include_root_view = True
include_format_suffixes = True
root_view_name = 'api-root'
default_schema_renderers = None
APIRootView = APIRootView
APISchemaView = SchemaView
SchemaGenerator = SchemaGenerator
def __init__(self, *args, **kwargs):
if 'root_renderers' in kwargs:
self.root_renderers = kwargs.pop('root_renderers')
else:
self.root_renderers = list(api_settings.DEFAULT_RENDERER_CLASSES)
super().__init__(*args, **kwargs)
def get_api_root_view(self, api_urls=None):
"""
Return a basic root view.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
return self.APIRootView.as_view(api_root_dict=api_root_dict)
def get_urls(self):
"""
Generate the list of URL patterns, including a default root view
for the API, and appending `.json` style format suffixes.
"""
urls = super().get_urls()
if self.include_root_view:
view = self.get_api_root_view(api_urls=urls)
root_url = url(r'^$', view, name=self.root_view_name)
urls.append(root_url)
if self.include_format_suffixes:
urls = format_suffix_patterns(urls)
return urls | PypiClean |
/HCNetSDK_python-1.0.0-py3-none-win32.whl/HCNetSDK/SDK_Struct.py |
import os
import platform
from ctypes import *
sys_platform = platform.system().lower().strip()
lib_path_dict = {
'windows': os.path.dirname(__file__) + '\\Libs\\windows\\',
'linux': os.path.dirname(__file__) + '/Libs/linux/',
}
netsdkdllpath_dict = {
'windows': lib_path_dict['windows'] + 'HCNetSDK.dll',
'linux': lib_path_dict['linux'] + 'libhcnetsdk.so',
}
playsdkdllpath_dict = {
'windows': lib_path_dict['windows'] + 'PlayCtrl.dll',
'linux': lib_path_dict['linux'] + 'libPlayCtrl.so',
}
netsdkcomdllpath_dict = {
'windows': lib_path_dict['windows'] + 'libcrypto-1_1-x64.dll',
'linux': lib_path_dict['linux'] + 'libcrypto.so.1.1',
}
sslmdllpath_dict = {
'windows': lib_path_dict['windows'] + 'libssl-1_1-x64.dll',
'linux': lib_path_dict['linux'] + 'libssl.so.1.1',
}
logdir_dict = {
'windows' : os.path.dirname(__file__) + '\\Libs\\log',
'linux': os.path.dirname(__file__) + '/Libs/log',
}
if sys_platform == 'linux':
load_library = cdll.LoadLibrary
CB_FUNCTYPE = CFUNCTYPE
elif sys_platform == 'windows':
load_library = windll.LoadLibrary
CB_FUNCTYPE = WINFUNCTYPE
else:
print("************不支持的平台**************")
exit(0)
netsdkdllpath = netsdkdllpath_dict[sys_platform]
playsdkdllpath = playsdkdllpath_dict[sys_platform]
netsdkcomdllpath = netsdkcomdllpath_dict[sys_platform]
sslmdllpath = sslmdllpath_dict[sys_platform]
logdir = logdir_dict[sys_platform]
class NET_DVR_PTZPOS(Structure):
_fields_ = [
("wAction", c_uint16), # 操作类型,仅在设置时有效。1-定位PTZ参数,2-定位P参数,3-定位T参数,4-定位Z参数,5-定位PT参数
("wPanPos", c_uint16), # P参数(水平参数)
("wTiltPos", c_uint16), # T参数(垂直参数)
("wZoomPos", c_uint16) # Z参数(变倍参数)
]
class NET_DVR_DEVICEINFO_V30(Structure):
_fields_ = [
("sSerialNumber", c_byte * 48), # 序列号
("byAlarmInPortNum", c_byte), # 模拟报警输入个数
("byAlarmOutPortNum", c_byte), # 模拟报警输出个数
("byDiskNum", c_byte), # 硬盘个数
("byDVRType", c_byte), # 设备类型
("byChanNum", c_byte), # 设备模拟通道个数,数字(IP)通道最大个数为byIPChanNum + byHighDChanNum*256
("byStartChan", c_byte), # 模拟通道的起始通道号,从1开始。数字通道的起始通道号见下面参数byStartDChan
("byAudioChanNum", c_byte), # 设备语音对讲通道数
("byIPChanNum", c_byte), # 设备最大数字通道个数,低8位,高8位见byHighDChanNum
("byZeroChanNum", c_byte), # 零通道编码个数
(
"byMainProto",
c_byte,
), # 主码流传输协议类型:0- private,1- rtsp,2- 同时支持私有协议和rtsp协议取流(默认采用私有协议取流)
(
"bySubProto",
c_byte,
), # 子码流传输协议类型:0- private,1- rtsp,2- 同时支持私有协议和rtsp协议取流(默认采用私有协议取流)
("bySupport", c_byte), # 能力,位与结果为0表示不支持,1表示支持
# bySupport & 0x1,表示是否支持智能搜索
# bySupport & 0x2,表示是否支持备份
# bySupport & 0x4,表示是否支持压缩参数能力获取
# bySupport & 0x8, 表示是否支持双网卡
# bySupport & 0x10, 表示支持远程SADP
# bySupport & 0x20, 表示支持Raid卡功能
# bySupport & 0x40, 表示支持IPSAN目录查找
# bySupport & 0x80, 表示支持rtp over rtsp
("bySupport1", c_byte), # 能力集扩充,位与结果为0表示不支持,1表示支持
# bySupport1 & 0x1, 表示是否支持snmp v30
# bySupport1 & 0x2, 表示是否支持区分回放和下载
# bySupport1 & 0x4, 表示是否支持布防优先级
# bySupport1 & 0x8, 表示智能设备是否支持布防时间段扩展
# bySupport1 & 0x10,表示是否支持多磁盘数(超过33个)
# bySupport1 & 0x20,表示是否支持rtsp over http
# bySupport1 & 0x80,表示是否支持车牌新报警信息,且还表示是否支持NET_DVR_IPPARACFG_V40配置
("bySupport2", c_byte), # 能力集扩充,位与结果为0表示不支持,1表示支持
# bySupport2 & 0x1, 表示解码器是否支持通过URL取流解码
# bySupport2 & 0x2, 表示是否支持FTPV40
# bySupport2 & 0x4, 表示是否支持ANR(断网录像)
# bySupport2 & 0x20, 表示是否支持单独获取设备状态子项
# bySupport2 & 0x40, 表示是否是码流加密设备
("wDevType", c_uint16), # 设备型号,详见下文列表
("bySupport3", c_byte), # 能力集扩展,位与结果:0- 不支持,1- 支持
# bySupport3 & 0x1, 表示是否支持多码流
# bySupport3 & 0x4, 表示是否支持按组配置,具体包含通道图像参数、报警输入参数、IP报警输入/输出接入参数、用户参数、设备工作状态、JPEG抓图、定时和时间抓图、硬盘盘组管理等
# bySupport3 & 0x20, 表示是否支持通过DDNS域名解析取流
("byMultiStreamProto", c_byte), # 是否支持多码流,按位表示,位与结果:0-不支持,1-支持
# byMultiStreamProto & 0x1, 表示是否支持码流3
# byMultiStreamProto & 0x2, 表示是否支持码流4
# byMultiStreamProto & 0x40,表示是否支持主码流
# byMultiStreamProto & 0x80,表示是否支持子码流
("byStartDChan", c_byte), # 起始数字通道号,0表示无数字通道,比如DVR或IPC
("byStartDTalkChan", c_byte), # 起始数字对讲通道号,区别于模拟对讲通道号,0表示无数字对讲通道
("byHighDChanNum", c_byte), # 数字通道个数,高8位
("bySupport4", c_byte), # 能力集扩展,按位表示,位与结果:0- 不支持,1- 支持
# bySupport4 & 0x01, 表示是否所有码流类型同时支持RTSP和私有协议
# bySupport4 & 0x10, 表示是否支持域名方式挂载网络硬盘
("byLanguageType", c_byte), # 支持语种能力,按位表示,位与结果:0- 不支持,1- 支持
# byLanguageType ==0,表示老设备,不支持该字段
# byLanguageType & 0x1,表示是否支持中文
# byLanguageType & 0x2,表示是否支持英文
("byVoiceInChanNum", c_byte), # 音频输入通道数
("byStartVoiceInChanNo", c_byte), # 音频输入起始通道号,0表示无效
("bySupport5", c_byte), # 按位表示,0-不支持,1-支持,bit0-支持多码流
("bySupport6", c_byte), # 按位表示,0-不支持,1-支持
# bySupport6 & 0x1 表示设备是否支持压缩
# bySupport6 & 0x2 表示是否支持流ID方式配置流来源扩展命令,DVR_SET_STREAM_SRC_INFO_V40
# bySupport6 & 0x4 表示是否支持事件搜索V40接口
# bySupport6 & 0x8 表示是否支持扩展智能侦测配置命令
# bySupport6 & 0x40 表示图片查询结果V40扩展
("byMirrorChanNum", c_byte), # 镜像通道个数,录播主机中用于表示导播通道
("wStartMirrorChanNo", c_uint16), # 起始镜像通道号
("bySupport7", c_byte), # 能力,按位表示,0-不支持,1-支持
# bySupport7 & 0x1 表示设备是否支持NET_VCA_RULECFG_V42扩展
# bySupport7 & 0x2 表示设备是否支持IPC HVT 模式扩展
# bySupport7 & 0x04 表示设备是否支持返回锁定时间
# bySupport7 & 0x08 表示设置云台PTZ位置时,是否支持带通道号
# bySupport7 & 0x10 表示设备是否支持双系统升级备份
# bySupport7 & 0x20 表示设备是否支持OSD字符叠加V50
# bySupport7 & 0x40 表示设备是否支持主从跟踪(从摄像机)
# bySupport7 & 0x80 表示设备是否支持报文加密
("byRes2", c_byte),
] # 保留,置为0
class NET_DVR_DEVICEINFO_V40(Structure):
_fields_ = [
("struDeviceV30", NET_DVR_DEVICEINFO_V30), # 设备信息
(
"bySupportLock",
c_byte,
), # 设备支持锁定功能,该字段由SDK根据设备返回值来赋值的。bySupportLock为1时,dwSurplusLockTime和byRetryLoginTime有效
("byRetryLoginTime", c_byte), # 剩余可尝试登陆的次数,用户名,密码错误时,此参数有效
("byPasswordLevel", c_byte), # admin密码安全等级
("byProxyType", c_byte), # 代理类型,0-不使用代理, 1-使用socks5代理, 2-使用EHome代理
("dwSurplusLockTime", c_uint32), # 剩余时间,单位秒,用户锁定时,此参数有效
("byCharEncodeType", c_byte), # 字符编码类型
("bySupportDev5", c_byte), # 支持v50版本的设备参数获取,设备名称和设备类型名称长度扩展为64字节
("bySupport", c_byte), # 能力集扩展,位与结果:0- 不支持,1- 支持
("byLoginMode", c_byte), # 登录模式:0- Private登录,1- ISAPI登录
("dwOEMCode", c_uint32), # OEM Code
(
"iResidualValidity",
c_uint32,
), # 该用户密码剩余有效天数,单位:天,返回负值,表示密码已经超期使用,例如“-3表示密码已经超期使用3天”
("byResidualValidity", c_byte), # iResidualValidity字段是否有效,0-无效,1-有效
(
"bySingleStartDTalkChan",
c_byte,
), # 独立音轨接入的设备,起始接入通道号,0-为保留字节,无实际含义,音轨通道号不能从0开始
("bySingleDTalkChanNums", c_byte), # 独立音轨接入的设备的通道总数,0-表示不支持
("byPassWordResetLevel", c_byte), # 0-无效,
# 1- 管理员创建一个非管理员用户为其设置密码,该非管理员用户正确登录设备后要提示“请修改初始登录密码”,未修改的情况下,用户每次登入都会进行提醒;
# 2- 当非管理员用户的密码被管理员修改,该非管理员用户再次正确登录设备后,需要提示“请重新设置登录密码”,未修改的情况下,用户每次登入都会进行提醒。
("bySupportStreamEncrypt", c_byte), # 能力集扩展,位与结果:0- 不支持,1- 支持
# bySupportStreamEncrypt & 0x1 表示是否支持RTP/TLS取流
# bySupportStreamEncrypt & 0x2 表示是否支持SRTP/UDP取流
# bySupportStreamEncrypt & 0x4 表示是否支持SRTP/MULTICAST取流
("byMarketType", c_byte), # 0-无效(未知类型),1-经销型,2-行业型
("byRes2", c_byte * 238), # 保留,置为0
]
# 异步登录回调函数
fLoginResultCallBack = CB_FUNCTYPE(None, c_uint32, c_uint32, POINTER(NET_DVR_DEVICEINFO_V40), c_void_p)
class NET_DVR_USER_LOGIN_INFO(Structure):
_fields_ = [
("sDeviceAddress", c_char * 129), # 设备地址,IP 或者普通域名
("byUseTransport", c_byte), # 是否启用能力集透传:0- 不启用透传,默认;1- 启用透传
("wPort", c_uint16), # 设备端口号,例如:8000
("sUserName", c_char * 64), # 登录用户名,例如:admin
("sPassword", c_char * 64), # 登录密码,例如:12345
("cbLoginResult", fLoginResultCallBack), # 登录状态回调函数,bUseAsynLogin 为1时有效
("pUser", c_void_p), # 用户数据
("bUseAsynLogin", c_uint32), # 是否异步登录:0- 否,1- 是
("byProxyType", c_byte), # 0:不使用代理,1:使用标准代理,2:使用EHome代理
("byUseUTCTime", c_byte),
# 0-不进行转换,默认,1-接口上输入输出全部使用UTC时间,SDK完成UTC时间与设备时区的转换,2-接口上输入输出全部使用平台本地时间,SDK完成平台本地时间与设备时区的转换
("byLoginMode", c_byte), # 0-Private 1-ISAPI 2-自适应
("byHttps", c_byte), # 0-不适用tls,1-使用tls 2-自适应
("iProxyID", c_uint32), # 代理服务器序号,添加代理服务器信息时,相对应的服务器数组下表值
("byVerifyMode", c_byte), # 认证方式,0-不认证,1-双向认证,2-单向认证;认证仅在使用TLS的时候生效;
("byRes2", c_byte * 119),
]
class NET_DVR_LOCAL_SDK_PATH(Structure):
_fields_ = [
("sPath", c_char * 256), # 组件库地址
("byRes", c_byte * 128),
]
class NET_DVR_PREVIEWINFO(Structure):
_fields_ = [
("lChannel", c_uint32), # 通道号
(
"dwStreamType",
c_uint32,
), # 码流类型,0-主码流,1-子码流,2-码流3,3-码流4, 4-码流5,5-码流6,7-码流7,8-码流8,9-码流9,10-码流10
(
"dwLinkMode",
c_uint32,
), # 0:TCP方式,1:UDP方式,2:多播方式,3 - RTP方式,4-RTP/RTSP,5-RSTP/HTTP ,6- HRUDP(可靠传输) ,7-RTSP/HTTPS
("hPlayWnd", c_uint32), # 播放窗口的句柄,为NULL表示不播放图象
(
"bBlocked",
c_uint32,
), # 0-非阻塞取流, 1-阻塞取流, 如果阻塞SDK内部connect失败将会有5s的超时才能够返回,不适合于轮询取流操作
("bPassbackRecord", c_uint32), # 0-不启用录像回传,1启用录像回传
("byPreviewMode", c_ubyte), # 预览模式,0-正常预览,1-延迟预览
("byStreamID", c_ubyte * 32), # 流ID,lChannel为0xffffffff时启用此参数
("byProtoType", c_ubyte), # 应用层取流协议,0-私有协议,1-RTSP协议,
# 2-SRTP码流加密(对应此结构体中dwLinkMode 字段,支持如下方式, 为1,表示udp传输方式,信令走TLS加密,码流走SRTP加密,为2,表示多播传输方式,信令走TLS加密,码流走SRTP加密)
("byRes1", c_ubyte),
("byVideoCodingType", c_ubyte), # 码流数据编解码类型 0-通用编码数据 1-热成像探测器产生的原始数据
("dwDisplayBufNum", c_uint32), # 播放库播放缓冲区最大缓冲帧数,范围1-50,置0时默认为1
("byNPQMode", c_ubyte), # NPQ是直连模式,还是过流媒体:0-直连 1-过流媒体
("byRecvMetaData", c_ubyte), # 是否接收metadata数据
# 设备是否支持该功能通过GET /ISAPI/System/capabilities 中DeviceCap.SysCap.isSupportMetadata是否存在且为true
("byDataType", c_ubyte), # 数据类型,0-码流数据,1-音频数据
("byRes", c_ubyte * 213),
]
class NET_DVR_JPEGPARA(Structure):
_fields_ = [
("wPicSize", c_ushort),
("wPicQuality", c_ushort),
]
class NET_DVR_SHOWSTRINGINFO(Structure):
_fields_ = [
("wShowString", c_ushort),
("wStringSize", c_ushort),
("wShowStringTopLeftX", c_ushort),
("wShowStringTopLeftY", c_ushort),
("sString", c_ubyte * 44),
]
class NET_DVR_SHOWSTRING_V30(Structure):
_fields_ = [
("dwSize", c_uint32),
("struStringInfo", NET_DVR_SHOWSTRINGINFO * 8),
]
class NET_DVR_XML_CONFIG_OUTPUT(Structure):
_fields_ = [
("dwSize", c_uint32),
("lpOutBuffer", c_void_p),
("dwOutBufferSize", c_uint32),
("dwReturnedXMLSize", c_uint32),
("lpStatusBuffer", c_void_p),
("dwStatusSize", c_uint32),
("byRes", c_ubyte * 32),
]
class NET_DVR_XML_CONFIG_INPUT(Structure):
_fields_ = [
("dwSize", c_uint32),
("lpRequestUrl", c_void_p),
("dwRequestUrlLen", c_uint32),
("lpInBuffer", c_void_p),
("dwInBufferSize", c_uint32),
("dwRecvTimeOut", c_uint32),
("byForceEncrpt", c_ubyte),
("byNumOfMultiPart", c_ubyte),
("byRes", c_ubyte * 30),
]
class NET_DVR_ALARMER(Structure):
_fields_ = [
("byUserIDValid", c_byte), # UserID是否有效 0-无效,1-有效
("bySerialValid", c_byte), # 序列号是否有效 0-无效,1-有效
("byVersionValid", c_byte), # 版本号是否有效 0-无效,1-有效
("byDeviceNameValid", c_byte), # 设备名字是否有效 0-无效,1-有效
("byMacAddrValid", c_byte), # MAC地址是否有效 0-无效,1-有效
("byLinkPortValid", c_byte), # login端口是否有效 0-无效,1-有效
("byDeviceIPValid", c_byte), # 设备IP是否有效 0-无效,1-有效
("bySocketIPValid", c_byte), # socket ip是否有效 0-无效,1-有效
("lUserID", c_uint32), # NET_DVR_Login()返回值, 布防时有效
("sSerialNumber", c_byte * 48), # 序列号
("dwDeviceVersion", c_uint32), # 版本信息 高16位表示主版本,低16位表示次版本
("sDeviceName", c_byte * 32), # 设备名字
("byMacAddr", c_byte * 6), # MAC地址
("wLinkPort", c_uint16), # link port
("sDeviceIP", c_byte * 128), # IP地址
("sSocketIP", c_byte * 128), # 报警主动上传时的socket IP地址
("byIpProtocol", c_byte), # Ip协议 0-IPV4, 1-IPV6
("byRes2", c_byte * 11),
]
class NET_DVR_SETUPALARM_PARAM(Structure):
_fields_ = [
("dwSize", c_uint32), # 结构体大小
("byLevel", c_byte), # 布防优先级:0- 一等级(高),1- 二等级(中),2- 三等级(低)
("byAlarmInfoType", c_byte),
# 上传报警信息类型(抓拍机支持),0-老报警信息(NET_DVR_PLATE_RESULT),1-新报警信息(NET_ITS_PLATE_RESULT)2012-9-28
("byRetAlarmTypeV40", c_byte),
# 0- 返回NET_DVR_ALARMINFO_V30或NET_DVR_ALARMINFO,
# 1- 设备支持NET_DVR_ALARMINFO_V40则返回NET_DVR_ALARMINFO_V40,不支持则返回NET_DVR_ALARMINFO_V30或NET_DVR_ALARMINFO
(
"byRetDevInfoVersion",
c_byte,
), # CVR上传报警信息回调结构体版本号 0-COMM_ALARM_DEVICE, 1-COMM_ALARM_DEVICE_V40
(
"byRetVQDAlarmType",
c_byte,
), # VQD报警上传类型,0-上传报报警NET_DVR_VQD_DIAGNOSE_INFO,1-上传报警NET_DVR_VQD_ALARM
("byFaceAlarmDetection", c_byte),
("bySupport", c_byte),
("byBrokenNetHttp", c_byte),
("wTaskNo", c_uint16),
# 任务处理号 和 (上传数据NET_DVR_VEHICLE_RECOG_RESULT中的字段dwTaskNo对应 同时 下发任务结构 NET_DVR_VEHICLE_RECOG_COND中的字段dwTaskNo对应)
("byDeployType", c_byte), # 布防类型:0-客户端布防,1-实时布防
("byRes1", c_byte * 3),
("byAlarmTypeURL", c_byte),
# bit0-表示人脸抓拍报警上传
# 0-表示二进制传输,1-表示URL传输(设备支持的情况下,设备支持能力根据具体报警能力集判断,同时设备需要支持URL的相关服务,当前是”云存储“)
("byCustomCtrl", c_byte),
] # Bit0- 表示支持副驾驶人脸子图上传: 0-不上传,1-上传
class NET_DVR_ALARMINFO_V30(Structure):
_fields_ = [
("dwAlarmType", c_uint32), # 报警类型
("dwAlarmInputNumber", c_uint32), # 报警输入端口,当报警类型为0、23时有效
("byAlarmOutputNumber", c_byte * 96),
# 触发的报警输出端口,值为1表示该报警端口输出,如byAlarmOutputNumber[0]=1表示触发第1个报警输出口输出,byAlarmOutputNumber[1]=1表示触发第2个报警输出口,依次类推
(
"byAlarmRelateChannel",
c_byte * 64,
), # 触发的录像通道,值为1表示该通道录像,如byAlarmRelateChannel[0]=1表示触发第1个通道录像
(
"byChannel",
c_byte * 64,
), # 发生报警的通道。当报警类型为2、3、6、9、10、11、13、15、16时有效,如byChannel[0]=1表示第1个通道报警
("byDiskNumber", c_byte * 33),
] # 发生报警的硬盘。当报警类型为1,4,5时有效,byDiskNumber[0]=1表示1号硬盘异常
class NET_DVR_SETUPALARM_PARAM(Structure):
_fields_ = [
("dwSize", c_uint32), # 结构体大小
("byLevel", c_byte), # 布防优先级:0- 一等级(高),1- 二等级(中),2- 三等级(低)
("byAlarmInfoType", c_byte),
# 上传报警信息类型(抓拍机支持),0-老报警信息(NET_DVR_PLATE_RESULT),1-新报警信息(NET_ITS_PLATE_RESULT)2012-9-28
("byRetAlarmTypeV40", c_byte),
# 0- 返回NET_DVR_ALARMINFO_V30或NET_DVR_ALARMINFO,
# 1- 设备支持NET_DVR_ALARMINFO_V40则返回NET_DVR_ALARMINFO_V40,不支持则返回NET_DVR_ALARMINFO_V30或NET_DVR_ALARMINFO
(
"byRetDevInfoVersion",
c_byte,
), # CVR上传报警信息回调结构体版本号 0-COMM_ALARM_DEVICE, 1-COMM_ALARM_DEVICE_V40
(
"byRetVQDAlarmType",
c_byte,
), # VQD报警上传类型,0-上传报报警NET_DVR_VQD_DIAGNOSE_INFO,1-上传报警NET_DVR_VQD_ALARM
("byFaceAlarmDetection", c_byte),
("bySupport", c_byte),
("byBrokenNetHttp", c_byte),
("wTaskNo", c_uint16),
# 任务处理号 和 (上传数据NET_DVR_VEHICLE_RECOG_RESULT中的字段dwTaskNo对应 同时 下发任务结构 NET_DVR_VEHICLE_RECOG_COND中的字段dwTaskNo对应)
("byDeployType", c_byte), # 布防类型:0-客户端布防,1-实时布防
("byRes1", c_byte * 3),
("byAlarmTypeURL", c_byte),
# bit0-表示人脸抓拍报警上传
# 0- 表示二进制传输,1- 表示URL传输(设备支持的情况下,设备支持能力根据具体报警能力集判断,同时设备需要支持URL的相关服务,当前是”云存储“)
("byCustomCtrl", c_byte),
] # Bit0- 表示支持副驾驶人脸子图上传: 0-不上传,1-上传,(注:只在公司内部8600/8200等平台开放)
class NET_DVR_TIME(Structure):
_fields_ = [
("dwYear", c_uint32), # 年
("dwMonth", c_uint32), # 月
("dwDay", c_uint32), # 日
("dwHour", c_uint32), # 时
("dwMinute", c_uint32), # 分
("dwSecond", c_uint32), # 秒
]
class NET_DVR_IPADDR(Structure):
_fields_ = [
("sIpV4", c_byte * 16), # 设备IPv4地址
("sIpV6", c_byte * 128), # 设备IPv6地址
]
class NET_DVR_ACS_EVENT_INFO(Structure):
_fields_ = [
("dwSize", c_uint32), # 结构体大小
("byCardNo", c_byte * 32), # 卡号
(
"byCardType",
c_byte,
), # 卡类型:1- 普通卡,2- 残障人士卡,3- 黑名单卡,4- 巡更卡,5- 胁迫卡,6- 超级卡,7- 来宾卡,8- 解除卡,为0表示无效
("byAllowListNo", c_byte), # 白名单单号,取值范围:1~8,0表示无效
("byReportChannel", c_byte), # 报告上传通道:1- 布防上传,2- 中心组1上传,3- 中心组2上传,0表示无效
("byCardReaderKind", c_byte), # 读卡器类型:0- 无效,1- IC读卡器,2- 身份证读卡器,3- 二维码读卡器,4- 指纹头
("dwCardReaderNo", c_uint32), # 读卡器编号,为0表示无效
("dwDoorNo", c_uint32), # 门编号(或者梯控的楼层编号),为0表示无效(当接的设备为人员通道设备时,门1为进方向,门2为出方向)
("dwVerifyNo", c_uint32), # 多重卡认证序号,为0表示无效
("dwAlarmInNo", c_uint32), # 报警输入号,为0表示无效
("dwAlarmOutNo", c_uint32), # 报警输出号,为0表示无效
("dwCaseSensorNo", c_uint32), # 事件触发器编号
("dwRs485No", c_uint32), # RS485通道号,为0表示无效
("dwMultiCardGroupNo", c_uint32), # 群组编号
("wAccessChannel", c_uint16), # 人员通道号
("byDeviceNo", c_byte), # 设备编号,为0表示无效
("byDistractControlNo", c_byte), # 分控器编号,为0表示无效
("dwEmployeeNo", c_uint32), # 工号,为0无效
("wLocalControllerID", c_uint16), # 就地控制器编号,0-门禁主机,1-255代表就地控制器
("byInternetAccess", c_byte), # 网口ID:(1-上行网口1,2-上行网口2,3-下行网口1)
("byType", c_byte),
# 防区类型,0:即时防区,1-24小时防区,2-延时防区,3-内部防区,4-钥匙防区,5-火警防区,6-周界防区,7-24小时无声防区,
# 8-24小时辅助防区,9-24小时震动防区,10-门禁紧急开门防区,11-门禁紧急关门防区,0xff-无
("byMACAddr", c_byte * 6), # 物理地址,为0无效
("bySwipeCardType", c_byte), # 刷卡类型,0-无效,1-二维码
("byMask", c_byte), # 是否带口罩:0-保留,1-未知,2-不戴口罩,3-戴口罩
("dwSerialNo", c_uint32), # 事件流水号,为0无效
("byChannelControllerID", c_byte), # 通道控制器ID,为0无效,1-主通道控制器,2-从通道控制器
("byChannelControllerLampID", c_byte), # 通道控制器灯板ID,为0无效(有效范围1-255)
("byChannelControllerIRAdaptorID", c_byte), # 通道控制器红外转接板ID,为0无效(有效范围1-255)
("byChannelControllerIREmitterID", c_byte), # 通道控制器红外对射ID,为0无效(有效范围1-255)
("byHelmet", c_byte), # 可选,是否戴安全帽:0-保留,1-未知,2-不戴安全, 3-戴安全帽
("byRes", c_byte * 3),
] # 保留,置为0
class NET_DVR_ACS_ALARM_INFO(Structure):
_fields_ = [
("dwSize", c_uint32), # 结构体大小
("dwMajor", c_uint32), # 报警主类型,具体定义见“Remarks”说明
("dwMinor", c_uint32), # 报警次类型,次类型含义根据主类型不同而不同,具体定义见“Remarks”说明
("struTime", NET_DVR_TIME), # 报警时间
("sNetUser", c_byte * 16), # 网络操作的用户名
("struRemoteHostAddr", NET_DVR_IPADDR), # 远程主机地址
("struAcsEventInfo", NET_DVR_ACS_EVENT_INFO), # 报警信息详细参数
("dwPicDataLen", c_uint32), # 图片数据大小,不为0是表示后面带数据
("pPicData", c_void_p), # 图片数据缓冲区
(
"wInductiveEventType",
c_uint16,
), # 归纳事件类型,0-无效,客户端判断该值为非0值后,报警类型通过归纳事件类型区分,否则通过原有报警主次类型(dwMajor、dwMinor)区分
("byPicTransType", c_byte), # 图片数据传输方式: 0-二进制;1-url
("byRes1", c_byte), # 保留,置为0
("dwIOTChannelNo", c_uint32), # IOT通道号
(
"pAcsEventInfoExtend",
c_void_p,
), # byAcsEventInfoExtend为1时,表示指向一个NET_DVR_ACS_EVENT_INFO_EXTEND结构体
("byAcsEventInfoExtend", c_byte), # pAcsEventInfoExtend是否有效:0-无效,1-有效
("byTimeType", c_byte), # 时间类型:0-设备本地时间,1-UTC时间(struTime的时间)
("byRes2", c_byte), # 保留,置为0
("byAcsEventInfoExtendV20", c_byte), # pAcsEventInfoExtendV20是否有效:0-无效,1-有效
(
"pAcsEventInfoExtendV20",
c_void_p,
), # byAcsEventInfoExtendV20为1时,表示指向一个NET_DVR_ACS_EVENT_INFO_EXTEND_V20结构体
("byRes", c_byte * 4),
] # 保留,置为0
class NET_VCA_POINT(Structure):
_fields_ = [("fX", c_float), ("fY", c_float)]
class NET_DVR_ID_CARD_INFO_EXTEND(Structure):
_fields_ = [
("byRemoteCheck", c_ubyte),
("byThermometryUnit", c_ubyte),
("byIsAbnomalTemperature", c_ubyte),
("byRes2", c_ubyte),
("fCurrTemperature", c_float),
("struRegionCoordinates", NET_VCA_POINT),
("dwQRCodeInfoLen", c_uint32),
("dwVisibleLightDataLen", c_uint32),
("dwThermalDataLen", c_uint32),
("pQRCodeInfo", POINTER(c_byte)),
("pVisibleLightData", POINTER(c_byte)),
("pThermalData", POINTER(c_byte)),
("byRes", c_ubyte * 1024),
]
class NET_DVR_DATE(Structure):
_fields_ = [("wYear", c_ushort), ("byMonth", c_ubyte), ("byDay", c_ubyte)]
class NET_DVR_ID_CARD_INFO(Structure):
_fields_ = [
("dwSize", c_uint),
("byName", c_ubyte * 128),
("struBirth", NET_DVR_DATE),
("byAddr", c_ubyte * 280),
("byIDNum", c_ubyte * 32),
("byIssuingAuthority", c_ubyte * 128),
("struStartDate", NET_DVR_DATE),
("struEndDate", NET_DVR_DATE),
("byTermOfValidity", c_ubyte),
("bySex", c_ubyte),
("byNation", c_ubyte),
("byRes", c_ubyte * 101),
]
class NET_DVR_TIME(Structure):
_fields_ = [
("dwYear", c_uint32),
("dwMonth", c_uint32),
("dwDay", c_uint32),
("dwHour", c_uint32),
("dwMinute", c_uint32),
("dwSecond", c_uint32),
]
class NET_DVR_TIME_V30(Structure):
_fields_ = [
("wYear", c_ushort),
("byMonth", c_ubyte),
("byDay", c_ubyte),
("byHour", c_ubyte),
("byMinute", c_ubyte),
("bySecond", c_ubyte),
("byISO8601", c_ubyte),
("wMilliSec", c_ushort),
("cTimeDifferenceH", c_ubyte),
("cTimeDifferenceM", c_ubyte),
]
class NET_DVR_IPADDR(Structure):
_fields_ = [("sIpV4", c_ubyte * 16), ("byIPv6", c_ubyte * 128)]
class NET_DVR_ID_CARD_INFO_ALARM(Structure):
_fields_ = [
("dwSize", c_uint32), # 结构长度
("struIDCardCfg", NET_DVR_ID_CARD_INFO), # 身份证信息
("dwMajor", c_uint32), # 报警主类型,参考宏定义
("dwMinor", c_uint32), # 报警次类型,参考宏定义
("struSwipeTime", NET_DVR_TIME_V30), # 刷卡时间
("byNetUser", c_ubyte * 16), # 网络操作的用户名
("struRemoteHostAddr", NET_DVR_IPADDR), # 远程主机地址
("dwCardReaderNo", c_uint32), # 读卡器编号,为0无效
("dwDoorNo", c_uint32), # 门编号,为0无效
("dwPicDataLen", c_uint32), # 图片数据大小,不为0是表示后面带数据
("pPicData", c_void_p), # 身份证图片数据缓冲区,dwPicDataLen不为0时缓冲区里面存放身份证头像的图片数据
(
"byCardType",
c_ubyte,
), # 卡类型,1-普通卡,2-残疾人卡,3-黑名单卡,4-巡更卡,5-胁迫卡,6-超级卡,7-来宾卡,8-解除卡,为0无效
("byDeviceNo", c_ubyte), # 设备编号,为0时无效(有效范围1-255)
("byMask", c_ubyte), # 是否带口罩:0-保留,1-未知,2-不戴口罩,3-戴口罩
("byRes2", c_ubyte), # 保留,置为0
("dwFingerPrintDataLen", c_uint32), # 指纹数据大小,不为0是表示后面带数据
("pFingerPrintData", c_void_p), # 指纹数据缓冲区,dwFingerPrintDataLen不为0时缓冲区里面存放指纹数据
("dwCapturePicDataLen", c_uint32), # 抓拍图片数据大小,不为0是表示后面带数据
(
"pCapturePicData",
c_void_p,
), # 抓拍图片数据缓冲区,dwCapturePicDataLen不为0时缓冲区里面存放设备上摄像机抓拍上传的图片数据
("dwCertificatePicDataLen", c_uint32), # 证件抓拍图片数据大小,不为0是表示后面带数据
(
"pCertificatePicData",
c_void_p,
), # 证件抓拍图片数据缓冲区,dwCertificatePicDataLen不为0时缓冲区里面存放设备上摄像机抓拍上传的证件抓拍图片数据
("byCardReaderKind", c_ubyte), # 读卡器属于哪一类:0-无效,1-IC读卡器,2-身份证读卡器,3-二维码读卡器,4-指纹头
("byRes3", c_ubyte * 2), # 保留,置为0
("byIDCardInfoExtend", c_ubyte), # pIDCardInfoExtend是否有效:0-无效,1-有效
("pIDCardInfoExtend", POINTER(NET_DVR_ID_CARD_INFO_EXTEND)), # 身份证刷卡扩展事件信息
("byRes", c_ubyte * 172), # 身份证刷卡扩展事件信息
]
class NET_DVR_ALARM_ISAPI_PICDATA(Structure):
_fields_ = [
("dwPicLen", c_uint32), # 图片数据长度
("byPicType", c_ubyte), # 图片格式: 1- jpg
("byRes", c_ubyte * 3), #
("szFilename", c_ubyte * 256), # 图片名称
("pPicData", c_void_p), # 图片数据
]
class NET_DVR_ALARM_ISAPI_INFO(Structure):
_fields_ = [
("pAlarmData", c_void_p), # 报警数据
("dwAlarmDataLen", c_uint32), # 报警数据长度
("byDataType", c_ubyte), # 0-invalid,1-xml,2-json
("byPicturesNumber", c_ubyte), # 图片数量
("byRes[2]", c_ubyte * 2), # 保留字节
("pPicPackData", c_void_p), # 图片变长部分
("byRes1[32]", c_ubyte * 32), # 保留字节
]
class NET_DVR_LOCAL_GENERAL_CFG(Structure):
_fields_ = [
("byExceptionCbDirectly", c_ubyte), # 0-通过线程池异常回调,1-直接异常回调给上层
("byNotSplitRecordFile", c_ubyte), # 回放和预览中保存到本地录像文件不切片 0-默认切片,1-不切片
("byResumeUpgradeEnable", c_ubyte), # 断网续传升级使能,0-关闭(默认),1-开启
(
"byAlarmJsonPictureSeparate",
c_ubyte,
), # 控制JSON透传报警数据和图片是否分离,0-不分离,1-分离(分离后走COMM_ISAPI_ALARM回调返回)
("byRes", c_ubyte * 4), # 保留
("i64FileSize", c_uint64), # 单位:Byte
("dwResumeUpgradeTimeout", c_uint32), # 断网续传重连超时时间,单位毫秒
("byAlarmReconnectMode", c_ubyte), # 0-独立线程重连(默认) 1-线程池重连
("byStdXmlBufferSize", c_ubyte), # 设置ISAPI透传接收缓冲区大小,1-1M 其他-默认
("byMultiplexing", c_ubyte), # 0-普通链接(非TLS链接)关闭多路复用,1-普通链接(非TLS链接)开启多路复用
("byFastUpgrade", c_ubyte), # 0-正常升级,1-快速升级
("byRes1", c_ubyte * 232), # 预留
]
class FRAME_INFO(Structure):
_fields_ = [
('nWidth', c_uint32),
('nHeight', c_uint32),
('nStamp', c_uint32),
('nType', c_uint32),
('nFrameRate', c_uint32),
('dwFrameNum', c_uint32)
]
class PLAYM4_SYSTEM_TIME(Structure):
_fields_ = [
("dwYear", c_uint32),
("dwMon", c_uint32),
("dwDay", c_uint32),
("dwHour", c_uint32),
("dwMin", c_uint32),
("dwSec", c_uint32),
("dwMs", c_uint32),
] | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/docs/devel/starting.rst | .. _starting:
Starting with internationalization
==================================
Have a project and want to translate it into several languages? This
guide will help you do so. Several typical situations are showcased, but
most of the examples are generic and can be applied to other scenarios as
well.
Before translating any software, you should realize that languages around the
world are really different and you should not make any assumption based on
your experience. For most of languages it will look weird if you try to
concatenate a sentence out of translated segments. You also should properly
handle plural forms because many languages have complex rules for that and the
internationalization framework you end up using should support this.
Last but not least, sometimes it might be necessary to add some context to the
translated string. Imagine a translator would get string ``Sun`` to translate.
Without context most people would translate that as our closest star, but it
might be actually used as an abbreviation for Sunday.
Choosing internationalization framework
---------------------------------------
Choose whatever is standard on your platform, try to avoid reinventing the
wheel by creating your own framework to handle localizations. Weblate supports
most of the widely used frameworks, see :ref:`formats` for more information
(especially :ref:`fmt_capabs`).
Our personal recommendation for some platforms is in the following table. This
is based on our experience, but that can not cover all use cases, so always
consider your environment when doing the choice.
+--------------------------+--------------------------+
| Platform | Recommended format |
+==========================+==========================+
| Android | :ref:`aresource` |
+--------------------------+--------------------------+
| iOS | :ref:`apple` |
+--------------------------+--------------------------+
| Qt | :ref:`qtling` |
+--------------------------+--------------------------+
| Python | :ref:`gettext` |
+--------------------------+--------------------------+
| PHP | :ref:`gettext` [#php]_ |
+--------------------------+--------------------------+
| C/C++ | :ref:`gettext` |
+--------------------------+--------------------------+
| C# | :ref:`dotnet` |
+--------------------------+--------------------------+
| Perl | :ref:`gettext` |
+--------------------------+--------------------------+
| Ruby | :ref:`ryaml` |
+--------------------------+--------------------------+
| Web extensions | :ref:`webex` |
+--------------------------+--------------------------+
| Java | :ref:`xliff` [#java]_ |
+--------------------------+--------------------------+
| JavaScript | :ref:`js-i18next` [#js]_ |
+--------------------------+--------------------------+
.. [#php]
The native Gettext support in PHP is buggy and often missing on Windows
builds, it is recommended to use third party library `motranslator
<https://github.com/phpmyadmin/motranslator>`_ instead.
.. [#java]
You can also use :ref:`javaprop` if plurals are not needed.
.. [#js]
You can also use plain :ref:`json` if plurals are not needed.
The more detailed workflow for some formats is described in following chapters:
* :doc:`gettext`
* :doc:`sphinx`
* :doc:`html`
.. seealso::
:doc:`integration`,
:ref:`continuous-translation`
| PypiClean |
/GailBot_Testing_Suite-0.1a8-py3-none-any.whl/gailbot/services/converter/payload/audioPayload.py | from typing import List, Union
from .payloadObject import PayLoadObject, PayLoadStatus
from ...organizer.source import SourceObject
from gailbot.core.utils.general import (
get_extension,
copy)
from gailbot.core.utils.logger import makelogger
from gailbot.core.utils.media import AudioHandler
import os
from gailbot.workspace.manager import WorkspaceManager
from gailbot.configs import service_config_loader
SUPPORTED_AUDIO = service_config_loader().engines.audio_supported_format
MERGED_FILE_NAME = "merged"
logger = makelogger("audioPayload")
def load_audio_payload(
source: SourceObject, ws_manager: WorkspaceManager) -> Union[bool, List[PayLoadObject]]:
""" given a source object, convert it into an audio payload if the source
Args:
source (SourceObject): an instance of SourceObject that stores the
datafile and setting of the transcription
Returns:
Union[bool, List[PayLoadObject]]: return the converted payload if the
conversion is successful, return false other wise
"""
if not source.setting:
return False
if not AudioPayload.is_supported(source.source_path()):
return False
try:
return [AudioPayload(source, ws_manager)]
except Exception as e:
logger.error(source.__class__)
logger.error(e, exc_info=e)
return False
class AudioPayload(PayLoadObject):
"""
Class for audio payload
"""
def __init__(self, source: SourceObject, workspace: WorkspaceManager) -> None:
super().__init__(source, workspace)
@staticmethod
def is_supported(file_path: str) -> bool:
"""
Determines if a given file path has a supported file extension
"""
logger.info(file_path)
return get_extension(file_path) in SUPPORTED_AUDIO
def _set_initial_status(self) -> None:
"""
Sets the initial status of the payload object to initialized
"""
self.status = PayLoadStatus.INITIALIZED
def _copy_file(self) -> None:
"""
Copies file to workspace
"""
extension = get_extension(self.original_source)
tgt_path =os.path.join(
self.workspace.data_copy, f"{self.name}.{extension}")
copy(self.original_source, tgt_path)
self.data_files = [tgt_path]
def _merge_audio(self):
try:
handler = AudioHandler()
merged_path = handler.overlay_audios(self.data_files, self.out_dir.media_file, MERGED_FILE_NAME)
self.merged_audio = merged_path
assert merged_path
except Exception as e:
logger.error(e, exc_info=e)
@staticmethod
def supported_format() -> List[str]:
"""
Contains and accesses a list of the supported formats
"""
return SUPPORTED_AUDIO
def __repr__(self) -> str:
return "Audio payload" | PypiClean |
/NDETCStemmer_kaenova-1.4.3-py3-none-any.whl/NDETCStemmer/morphology/prefix10rule.py | import re
"""
=============================================================
PREFIX RULE 10
=============================================================
Rule 10: me{l|r|w|y}V -> me-{l|r|w|y}V
Example: melipat meringkas mewarnai meyakinkan
"""
def prefix10_rule(word, word_candidate, keys):
matches = re.match(r'^me([l|r|w|y])([aiueo])(.*)$', word)
if matches:
if len(matches.group(1)+matches.group(2)+matches.group(3)) > 2 and \
(matches.group(1)+matches.group(2)+matches.group(3)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1)+matches.group(2)+matches.group(3))
return word_candidate
"""
=============================================================
PREFIX RULE 11
=============================================================
Rule 11: mem{b|f|v} -> mem-{b|f|v} sastrawi
Rule 11: mem{b|f|v|p} -> mem-{b|f|v|p} modified by bun
example: membangun memfitnah memvonis memperoleh
"""
def prefix11_rule(word, word_candidate, keys):
matches = re.match(r'^mem([bfvp])(.*)$', word)
if matches:
if len(matches.group(1) + matches.group(2)) > 2 and \
(matches.group(1) + matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1) + matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 12
=============================================================
Rule 12: mempe -> mem-pe- sastrawi
Rule 12: mempe{r|l} -> mem-pe- modified by ?
example: memperbarui -> barui mempelajari -> ajari
memperbodoh memperbanyak memperbudak
"""
def prefix12_rule(word, word_candidate, keys):
matches = re.match(r'^mempe[r|l](.*)$', word)
if matches:
if len(matches.group(1)) > 2 and matches.group(1) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1))
return word_candidate
"""
=============================================================
PREFIX RULE 13A
=============================================================
Rule 13a: memV -> me-mV
example: meminum -> minum memakan -> makan
"""
def prefix13a_rule(word, word_candidate, keys):
matches = re.match(r'mem([aiueo].*)$', word)
if matches:
if len('m'+ matches.group(1)) > 2 and 'm'+ matches.group(1) \
not in word_candidate[keys]:
word_candidate[keys].append('m'+ matches.group(1))
return word_candidate
"""
=============================================================
PREFIX RULE 13B
=============================================================
Rule 13b: mem{rV|V} -> me-p{rV|V}
example: memukul -> pukul memroses -> proses
"""
def prefix13b_rule(word, word_candidate, keys):
matches = re.match(r'^mem(r?[aiueo])(.*)$', word)
if matches:
if len('p'+ matches.group(1) + matches.group(2)) > 2 and \
('p'+ matches.group(1) + matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('p'+ matches.group(1) + matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 14
=============================================================
Rule 14: men{c|d|j|s|t|z} -> men-{c|d|j|s|t|z}
example: mencinta -> men-cinta mendua -> men-dua
menjauh -> men-jauh menziarahi -> men-ziarahi mensyarat -> men-syarat *
"""
def prefix14_rule(word, word_candidate, keys):
matches = re.match(r'^men([cdjstz])(.*)$', word)
if matches:
if len(matches.group(1)+matches.group(2)) > 2 and \
(matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 15A
=============================================================
Rule 15a: men{V} -> me-n{V}
example: menuklir -> me-nuklir
"""
def prefix15a_rule(word, word_candidate, keys):
matches = re.match(r'^men([aiueo])(.*)$', word)
if matches:
if len('n'+ matches.group(1)+matches.group(2)) > 2 and \
('n'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('n'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 15B
=============================================================
Rule 15b: men{V} -> me-t{V}
example: menangkap -> me-tangkap
"""
def prefix15b_rule(word, word_candidate, keys):
matches = re.match(r'^men([aiueo])(.*)$', word)
if matches:
if len('t'+ matches.group(1)+matches.group(2)) > 2 and \
('t'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('t'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 16
=============================================================
Rule 19: meng{g|h|q|kh} -> meng-{g|h|q|kh}
example: menggila -> gila menghajar -> hajar mengqasar -> qasar
"""
def prefix16_rule(word, word_candidate, keys):
matches = re.match(r'^meng([g|h|q|k])(.*)$', word)
if matches:
if len(matches.group(1)+matches.group(2)) > 2 and \
(matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 16 V2
=============================================================
Rule 16 V2: meng{g|h|q|kh} -> meng-{g|h|q|kh}
example: menggila -> gila menghajar -> hajar mengqasar -> qasar
def prefix16_rule_v2(word, word_candidate, keys):
matches = re.match(r'^meng([g|h|q|k])(.*)$', word)
if matches:
if len(matches.group(1)+matches.group(2)) > 2 and \
(matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1)+matches.group(2))
return word_candidate
"""
"""
=============================================================
PREFIX RULE 17A
=============================================================
Rule 17a: mengV -> meng-V
example: mengudara -> meng-udara
"""
def prefix17a_rule(word, word_candidate, keys):
matches = re.match(r'^meng([aiueo])(.*)$', word)
if matches:
if len(matches.group(1)+matches.group(2)) > 2 and \
(matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 17B
=============================================================
Rule 17b: mengV -> meng-kV
example: mengupas -> meng-kupas
"""
def prefix17b_rule(word, word_candidate, keys):
matches = re.match(r'^meng([aiueo])(.*)$', word)
if matches:
if len('k'+ matches.group(1)+matches.group(2)) > 2 and \
('k'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('k'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 17C
=============================================================
Rule 17c: mengV -> meng-V- where V = 'e'
example: mengesa -> meng-esa
mengebom -> meng-ebom
"""
def prefix17c_rule(word, word_candidate, keys):
matches = re.match(r'^menge(.*)$', word)
if matches:
if len(matches.group(1)) > 2 and matches.group(1) not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1))
return word_candidate
"""
=============================================================
PREFIX RULE 17D
=============================================================
Rule 17d: mengV -> me-ngV
example: mengeri -> ngeri note: periksa ulang
"""
def prefix17d_rule(word, word_candidate, keys):
matches = re.match(r'^meng(.*)$', word)
if matches:
if len('ng'+ matches.group(1)) > 2 and ('ng'+ matches.group(1)) not in word_candidate[keys]:\
word_candidate[keys].append('ng' + matches.group(1))
return word_candidate
"""
=============================================================
PREFIX RULE 18A
=============================================================
Rule 18a: menyV -> me-nyV
example: menyanyi -> me-nyanyi
"""
def prefix18a_rule(word, word_candidate, keys):
matches = re.match(r'^meny([aiueo])(.*)$', word)
if matches:
if len('ny'+ matches.group(1)+matches.group(2)) > 2 and \
('ny'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('ny'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 18B
=============================================================
Rule 18b: menyV -> meny-sV
example: menyuara -> meny-suara
"""
def prefix18b_rule(word, word_candidate, keys):
matches = re.match(r'^meny([aiueo])(.*)$', word)
if matches:
if len('s'+ matches.group(1)+matches.group(2)) > 2 and \
('s'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('s'+ matches.group(1)+matches.group(2))
# if len('c'+ matches.group(1)+matches.group(2)) > 2 and \
# ('c'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
# word_candidate[keys].append('c'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 19
=============================================================
Rule 19: mempV -> mem-pV where V != 'e'
example: mempopulerkan -> populerkan
"""
def prefix19_rule(word, word_candidate, keys):
matches = re.match(r'^memp([aiuo])(.*)$', word)
if matches:
if len('p'+ matches.group(1)+matches.group(2)) > 2 and \
('p'+ matches.group(1)+matches.group(2)) not in word_candidate[keys]:
word_candidate[keys].append('p'+ matches.group(1)+matches.group(2))
return word_candidate
"""
=============================================================
PREFIX RULE 19 B
=============================================================
Rule 19b: mersatu -> satu
example:
"""
def prefix19b_rule(word, word_candidate, keys):
matches = re.match(r'^mer(.*)$', word)
if matches:
if len(matches.group(1)) > 2 and \
matches.group(1)not in word_candidate[keys]:
word_candidate[keys].append(matches.group(1))
return word_candidate | PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/README.md | # Montreal Forced Aligner

[](https://codecov.io/gh/MontrealCorpusTools/Montreal-Forced-Aligner)
[](http://montreal-forced-aligner.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/MontrealCorpusTools/montreal-forced-aligner/)
[](https://zenodo.org/badge/latestdoi/44983969)
The Montreal Forced Aligner is a command line utility for performing forced alignment of speech datasets using Kaldi (http://kaldi-asr.org/).
Please see the documentation http://montreal-forced-aligner.readthedocs.io for installation and usage.
If you run into any issues, please check the [mailing list](https://groups.google.com/forum/#!forum/mfa-users) for fixes/workarounds or to post a [new issue](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/issues).
## Installation
You can install MFA either entirely through [conda](https://docs.conda.io/en/latest/) or a mix of conda for Kaldi and Pynini dependencies and Python packaging for MFA itself
### Conda installation
MFA is hosted on [conda-forge](https://conda-forge.org/) and can be installed via:
```
conda install -c conda-forge montreal-forced-aligner
```
in your environment of choice.
### Source installation
If you'd like to install a local version of MFA or want to use the development set up, the easiest way is first create the dev environment from the yaml in the repo root directory:
```
conda env create -n mfa-dev -f environment.yml
```
Alternatively, the dependencies can be installed via:
```
conda install -c conda-forge python=3.8 kaldi sox librosa biopython praatio tqdm requests colorama pyyaml pynini openfst baumwelch ngram
```
MFA can be installed in develop mode via:
```
pip install -e .[dev]
```
You should be able to see appropriate output from `mfa version`
#### Development
The test suite is run via `tox -e py38-win` or `tox -e py38-unix` depending on the OS, and the docs are generated via `tox -e docs`
## Quick links
* [Getting started docs](https://montreal-forced-aligner.readthedocs.io/en/latest/getting_started.html)
* [User Guide](https://montreal-forced-aligner.readthedocs.io/en/latest/user_guide/index.html)
* [API Reference](https://montreal-forced-aligner.readthedocs.io/en/latest/reference/index.html)
* [Release notes](https://montreal-forced-aligner.readthedocs.io/en/latest/changelog/index.html)
* [MFA Models](https://github.com/MontrealCorpusTools/mfa-models)
* [Eleanor Chodroff's MFA tutorial](https://lingmethodshub.github.io/content/tools/mfa/mfa-tutorial/)
* [@mmcauliffe's forced alignment blog posts](https://memcauliffe.com/tag/forced-alignment.html)
| PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/web/media/js/mathjax/jax/output/HTML-CSS/fonts/TeX/SansSerif/Bold/BasicLatin.js | MathJax.Hub.Insert(MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS["MathJax_SansSerif-bold"],{32:[0,0,250,0,0],33:[694,0,367,110,256],34:[694,-442,558,37,420],35:[694,193,917,61,855],36:[750,56,550,49,488],37:[750,56,1029,61,966],38:[716,22,831,47,769],39:[694,-442,306,80,226],40:[750,249,428,79,366],41:[750,250,428,61,348],42:[750,-293,550,67,482],43:[617,116,856,61,794],44:[146,106,306,80,226],45:[273,-186,367,12,305],46:[146,0,306,80,226],47:[750,249,550,61,488],48:[715,22,550,43,506],49:[716,-1,550,76,473],50:[716,0,550,46,495],51:[716,22,550,46,503],52:[694,0,550,31,518],53:[694,22,550,37,494],54:[716,22,550,46,503],55:[695,11,550,46,503],56:[715,22,550,46,503],57:[716,22,550,46,503],58:[458,0,306,80,226],59:[458,106,306,80,226],61:[407,-94,856,61,794],63:[705,0,519,61,457],64:[704,11,733,61,671],65:[694,0,733,42,690],66:[694,-1,733,92,671],67:[704,11,703,61,647],68:[694,-1,794,92,732],69:[691,0,642,92,595],70:[691,0,611,92,564],71:[705,11,733,61,659],72:[694,0,794,92,702],73:[694,0,331,85,246],74:[694,22,519,46,427],75:[694,0,764,92,701],76:[694,0,581,92,534],77:[694,0,978,92,886],78:[694,0,794,92,702],79:[716,22,794,62,731],80:[694,0,703,92,641],81:[716,106,794,62,732],82:[694,0,703,92,654],83:[716,22,611,49,549],84:[688,0,733,40,692],85:[694,22,764,92,672],86:[694,-1,733,27,705],87:[694,0,1039,24,1014],88:[694,0,733,37,694],89:[694,0,733,24,708],90:[694,0,672,61,616],91:[750,250,343,79,318],93:[750,250,343,24,263],94:[694,-537,550,108,441],95:[-23,110,550,0,549],97:[475,11,525,31,472],98:[694,10,561,54,523],99:[475,11,489,37,457],100:[694,11,561,37,507],101:[474,10,511,30,480],102:[705,0,336,29,381],103:[469,206,550,17,534],104:[694,0,561,53,508],105:[695,0,256,46,208],106:[695,205,286,-71,232],107:[694,0,531,63,496],108:[694,0,256,54,201],109:[469,0,867,53,815],110:[468,0,561,53,508],111:[474,11,550,32,518],112:[469,194,561,54,523],113:[469,194,561,37,507],114:[469,0,372,54,356],115:[474,10,422,30,396],116:[589,10,404,20,373],117:[458,11,561,52,508],118:[458,0,500,26,473],119:[458,0,744,24,719],120:[458,0,500,24,475],121:[458,205,500,29,473],122:[458,0,476,31,442],126:[344,-198,550,92,457]});MathJax.Ajax.loadComplete(MathJax.OutputJax["HTML-CSS"].fontDir+"/SansSerif/Bold/BasicLatin.js"); | PypiClean |
/IceVolt-0.1.0.tar.gz/IceVolt-0.1.0/icevolt/models.py | import getpass
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, Session
from sqlalchemy import (
create_engine, Column, ForeignKey, Integer,
String, Table, Text
)
from werkzeug.security import (
generate_password_hash,
check_password_hash
)
# SQLAlchemy objects
engine = create_engine('sqlite:///blog.db')
Base = declarative_base()
session = Session(bind=engine)
class Post(Base):
"""Blog posts
Store blog posts with associated banner image
and tags. Date assigned automatically when
posted in the application."""
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
title = Column(String(250))
content = Column(String(250))
image = Column(String(250))
date = Column(String(100))
tags = relationship('Tag',
secondary='post_tag_association')
def __init__(self, title, content, image, date, tags):
self.title = title
self.content = content
self.image = image
self.date = date
self.tags = tags
@classmethod
def random(cls):
"""Returns a new Post with random attribute text blocks.
:field_len: = length of text blocks
:post_data: = Post model arguments
generates new Post with post_data.
"""
from datetime import datetime
def randomtext(length):
"""Returns a random string of text."""
def randomletter():
"""Returns a random letter."""
import random
import string
alphabet = string.ascii_lowercase
return random.choice(alphabet)
rand = ''.join(randomletter() for _ in range(length))
return rand
# set string field length
TITLE = 100
CONTENT = 200
post_data = {
'title': randomtext(TITLE),
'content': randomtext(CONTENT),
'date': datetime.today(),
'image': '',
'tags': []
}
return cls(**post_data)
class Tag(Base):
"""Tags for blog post categories."""
__tablename__ = 'tags'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
def __init__(self, name):
self.name = name
class PostTagAssociation(Base):
"""Many-to-Many association table
relationship()
-> post
-> tag
"""
__tablename__ = 'post_tag_association'
post_id = Column('post_id', Integer, ForeignKey('posts.id'), primary_key=True)
tag_id = Column('tag_id', Integer, ForeignKey('tags.id'), primary_key=True)
post = relationship('Post')
tag = relationship('Tag')
class User(Base):
"""User table
Holds the login for Admin users."""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50))
def __init__(self, username):
"""Store a hashed password with a username."""
def set_pass():
password = getpass.getpass('Enter new password: ')
pw_hash = generate_password_hash(password)
return pw_hash
self.username = username
self.password = set_pass() # stores hash
def check_pass(self, password):
"""Returns `True` if password input
matches the stored hash."""
return check_password_hash(self.password, password) | PypiClean |
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model/account_result_all_of.py | import re # noqa: F401
import sys # noqa: F401
from freeclimb.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from freeclimb.exceptions import ApiAttributeError
def lazy_import():
from freeclimb.model.account_status import AccountStatus
from freeclimb.model.account_type import AccountType
globals()['AccountStatus'] = AccountStatus
globals()['AccountType'] = AccountType
class AccountResultAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account_id': (str, none_type,), # noqa: E501
'api_key': (str, none_type,), # noqa: E501
'alias': (str, none_type,), # noqa: E501
'label': (str, none_type,), # noqa: E501
'type': (AccountType,), # noqa: E501
'status': (AccountStatus,), # noqa: E501
'subresource_uris': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_id': 'accountId', # noqa: E501
'api_key': 'apiKey', # noqa: E501
'alias': 'alias', # noqa: E501
'label': 'label', # noqa: E501
'type': 'type', # noqa: E501
'status': 'status', # noqa: E501
'subresource_uris': 'subresourceUris', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AccountResultAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_id (str, none_type): String that uniquely identifies this account resource.. [optional] # noqa: E501
api_key (str, none_type): The API key assigned to this account. This key must be kept a secret by the customer.. [optional] # noqa: E501
alias (str, none_type): A description for this account.. [optional] # noqa: E501
label (str, none_type): A string that identifies a category or group to which the account belongs.. [optional] # noqa: E501
type (AccountType): [optional] # noqa: E501
status (AccountStatus): [optional] # noqa: E501
subresource_uris ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): The list of subresources for this account.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountResultAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_id (str, none_type): String that uniquely identifies this account resource.. [optional] # noqa: E501
api_key (str, none_type): The API key assigned to this account. This key must be kept a secret by the customer.. [optional] # noqa: E501
alias (str, none_type): A description for this account.. [optional] # noqa: E501
label (str, none_type): A string that identifies a category or group to which the account belongs.. [optional] # noqa: E501
type (AccountType): [optional] # noqa: E501
status (AccountStatus): [optional] # noqa: E501
subresource_uris ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): The list of subresources for this account.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/parts/Top/Glasses/Kurt.py | def Kurt():
return (
'<defs>'
' <filter id="filter0_d_0_1711" x="5.76709" y="2.63634" width="130.465" height="51.3032"'
' filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">'
' <feFlood flood-opacity="0" result="BackgroundImageFix" />'
' <feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"'
' result="hardAlpha" />'
' <feOffset dy="2" />'
' <feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.16 0" />'
' <feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow_0_1711" />'
' <feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow_0_1711" result="shape" />'
' </filter>'
'</defs>'
'<g filter="url(#filter0_d_0_1711)">'
' <path'
' d="M70.9998 13.1111C59.9624 13.1111 58.3704 4.0266 35.6704 2.74074C12.9854 1.7168 5.81505 8.44949 5.7763 13.1111C5.81307 17.4043 4.64754 28.5613 19.3645 41.6296C34.1366 57.1421 49.2702 51.8818 54.6939 46.8148C60.135 44.4731 66.3402 23.4596 70.9998 23.4815C75.6595 23.5033 81.8646 44.4731 87.3057 46.8148C92.7294 51.8818 107.863 57.1421 122.635 41.6296C137.352 28.5613 136.187 17.4043 136.223 13.1111C136.185 8.44949 129.014 1.7168 106.329 2.74074C83.6293 4.0266 82.0373 13.1111 70.9998 13.1111Z"'
' fill="#F4F4F4" />'
' <path'
' d="M60.1293 23.4815C60.5103 15.8233 47.2156 7.64243 32.9529 7.92593C18.6973 8.22451 16.8417 17.3787 16.647 20.8889C16.2981 29.0221 25.0144 47.3038 41.1058 46.8148C57.1972 46.3058 59.9092 28.5344 60.1293 23.4815Z"'
' fill="#2F383B" />'
' <path'
' d="M81.8707 23.4815C81.4897 15.8233 94.7844 7.64243 109.047 7.92593C123.303 8.22451 125.158 17.3787 125.353 20.8889C125.702 29.0221 116.986 47.3038 100.894 46.8148C84.8028 46.3058 82.0908 28.5344 81.8707 23.4815Z"'
' fill="#2F383B" />'
'</g>'
) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@mapbox/node-pre-gyp/lib/main.js | 'use strict';
/**
* Set the title.
*/
process.title = 'node-pre-gyp';
const node_pre_gyp = require('../');
const log = require('npmlog');
/**
* Process and execute the selected commands.
*/
const prog = new node_pre_gyp.Run({ argv: process.argv });
let completed = false;
if (prog.todo.length === 0) {
if (~process.argv.indexOf('-v') || ~process.argv.indexOf('--version')) {
console.log('v%s', prog.version);
process.exit(0);
} else if (~process.argv.indexOf('-h') || ~process.argv.indexOf('--help')) {
console.log('%s', prog.usage());
process.exit(0);
}
console.log('%s', prog.usage());
process.exit(1);
}
// if --no-color is passed
if (prog.opts && Object.hasOwnProperty.call(prog, 'color') && !prog.opts.color) {
log.disableColor();
}
log.info('it worked if it ends with', 'ok');
log.verbose('cli', process.argv);
log.info('using', process.title + '@%s', prog.version);
log.info('using', 'node@%s | %s | %s', process.versions.node, process.platform, process.arch);
/**
* Change dir if -C/--directory was passed.
*/
const dir = prog.opts.directory;
if (dir) {
const fs = require('fs');
try {
const stat = fs.statSync(dir);
if (stat.isDirectory()) {
log.info('chdir', dir);
process.chdir(dir);
} else {
log.warn('chdir', dir + ' is not a directory');
}
} catch (e) {
if (e.code === 'ENOENT') {
log.warn('chdir', dir + ' is not a directory');
} else {
log.warn('chdir', 'error during chdir() "%s"', e.message);
}
}
}
function run() {
const command = prog.todo.shift();
if (!command) {
// done!
completed = true;
log.info('ok');
return;
}
// set binary.host when appropriate. host determines the s3 target bucket.
const target = prog.setBinaryHostProperty(command.name);
if (target && ['install', 'publish', 'unpublish', 'info'].indexOf(command.name) >= 0) {
log.info('using binary.host: ' + prog.package_json.binary.host);
}
prog.commands[command.name](command.args, function(err) {
if (err) {
log.error(command.name + ' error');
log.error('stack', err.stack);
errorMessage();
log.error('not ok');
console.log(err.message);
return process.exit(1);
}
const args_array = [].slice.call(arguments, 1);
if (args_array.length) {
console.log.apply(console, args_array);
}
// now run the next command in the queue
process.nextTick(run);
});
}
process.on('exit', (code) => {
if (!completed && !code) {
log.error('Completion callback never invoked!');
errorMessage();
process.exit(6);
}
});
process.on('uncaughtException', (err) => {
log.error('UNCAUGHT EXCEPTION');
log.error('stack', err.stack);
errorMessage();
process.exit(7);
});
function errorMessage() {
// copied from npm's lib/util/error-handler.js
const os = require('os');
log.error('System', os.type() + ' ' + os.release());
log.error('command', process.argv.map(JSON.stringify).join(' '));
log.error('cwd', process.cwd());
log.error('node -v', process.version);
log.error(process.title + ' -v', 'v' + prog.package.version);
}
// start running the given commands!
run(); | PypiClean |
/BlazeUtils-0.7.0-py3-none-any.whl/blazeutils/containers.py | class LazyDict(dict):
def __init__(self, *args, **kwargs):
self._ld_initialized = kwargs.pop('_ld_initialize', True)
dict.__init__(self, *args, **kwargs)
def __getattr__(self, attr):
if attr in self:
return self[attr]
raise AttributeError("'%s' object has no attribute '%s'"
% (self.__class__.__name__, attr))
def __setattr__(self, item, value):
# this test allows attributes to be set in the __init__ method
if '_ld_initialized' not in self.__dict__ or not self.__dict__['_ld_initialized']:
self.__dict__[item] = value
# any normal attributes are handled normally when they already exist
# this would happen if they are given different values after initilization
elif item in self.__dict__:
self.__dict__[item] = value
# if there is a property, then set use it
elif item in self.__class__.__dict__ and isinstance(self.__class__.__dict__[item],
property):
self.__class__.__dict__[item].__set__(self, value)
# attributes added after initialization are stored in _data
else:
self[item] = value
def __delattr__(self, name):
del self[name]
class _Attribute(str):
def __add__(self, other):
return _Attribute('{0} {1}'.format(self, other).lstrip(' '))
class HTMLAttributes(LazyDict):
def __init__(self, *args, **kwargs):
LazyDict.__init__(self, *args, **kwargs)
self._clean_keys()
def __getattr__(self, attr):
attr = self._clean_key(attr)
if attr not in self:
self[attr] = _Attribute()
return self[attr]
def __getitem__(self, key):
key = self._clean_key(key)
if key not in self:
self[key] = _Attribute()
return LazyDict.__getitem__(self, key)
def __setattr__(self, item, value):
item = self._clean_key(item)
value = _Attribute(value)
LazyDict.__setattr__(self, item, value)
def __setitem__(self, key, value):
key = self._clean_key(key)
value = _Attribute(value)
LazyDict.__setitem__(self, key, value)
def _clean_key(self, key):
if key.endswith('_'):
return key[:-1]
return key
def _clean_keys(self):
old_keys = list(self.keys())
for key in old_keys:
new_key = self._clean_key(key)
if new_key != key:
# have to use LazyDict b/c our __getitem__ will clean `key` and
# give us an empty value because the cleaned key obviously
# does not exist ye
value = LazyDict.__getitem__(self, key)
self[new_key] = value
del self[key] | PypiClean |
/EnigmaOPTestop-0.0.6.tar.gz/EnigmaOPTestop-0.0.6/src/enigmaop2/cipher buffer.py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 13:02:36 2021
@author: ninad
"""
import Rotors
wiring=Rotors.Rotor()
RotorSettingopz=[]
def runThrough(Rotor_num,inputy,Rotor_settingy):
inputy = (inputy+Rotor_settingy) % 127;
return wiring[Rotor_num][inputy];
def plug(plugboard,key):
if plugboard[key]==(-1):
return key
else:
return plugboard[key]
reflector=[127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
def encrypt(Rotor_combinationz,RotorSettingz,plugboardz,x):
x=plug(plugboardz,x)
connectTo=x
s=x
#ciphering block
for i in range(0,300):
s=runThrough(Rotor_combinationz[i],s,RotorSettingz[i])
connectTo=s
s=reflector[s]
for i in range(299,-1,-1):
s=runThrough(Rotor_combinationz[i],s,RotorSettingz[i])
connectTo=s
triger=1
counter=0
connectTo=plug(plugboardz,connectTo)
#incrementing the 1st rotor setting by 1
while triger==1:
RotorSettingz[counter]+=1
if RotorSettingz[counter]>127:
RotorSettingz[counter]=0
else:
triger=0
counter+=1
RotorSettingopz=RotorSettingz
RotorSettingz=[]
return Rotor_combinationz,RotorSettingopz,connectTo
def decrypt(Rotor_combinationz,RotorSettingz,plugboardz,x):
x=plug(plugboardz,x)
connectTo=x
s=x
#ciphering block
s=reflector[s]
for i in range(299,-1,-1):
s=runThrough(Rotor_combinationz[i],s,RotorSettingz[i])
connectTo=s
s=reflector[s]
for i in range(0,300):
s=runThrough(Rotor_combinationz[i],s,RotorSettingz[i])
connectTo=s
triger=1
counter=0
connectTo=plug(plugboardz,connectTo)
#incrementing the 1st rotor setting by 1
while triger==1:
RotorSettingz[counter]+=1
if RotorSettingz[counter]>127:
RotorSettingz[counter]=0
else:
triger=0
counter+=1
RotorSettingopz=RotorSettingz
RotorSettingz=[]
return Rotor_combinationz,RotorSettingopz,connectTo | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.