id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/Flask_Multipass-0.4.9-py3-none-any.whl/flask_multipass/providers/static.py
import itertools import operator from flask_wtf import FlaskForm from wtforms.fields import PasswordField, StringField from wtforms.validators import DataRequired from flask_multipass.auth import AuthProvider from flask_multipass.data import AuthInfo, IdentityInfo from flask_multipass.exceptions import InvalidCredentials, NoSuchUser from flask_multipass.group import Group from flask_multipass.identity import IdentityProvider class StaticLoginForm(FlaskForm): username = StringField('Username', [DataRequired()]) password = PasswordField('Password', [DataRequired()]) class StaticAuthProvider(AuthProvider): """Provides authentication against a static list This provider should NEVER be use in any production system. It serves mainly as a simple dummy/example for development. The type name to instantiate this provider is *static*. """ login_form = StaticLoginForm def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.settings.setdefault('identities', {}) def process_local_login(self, data): username = data['username'] password = self.settings['identities'].get(username) if password is None: raise NoSuchUser(provider=self) if password != data['password']: raise InvalidCredentials(provider=self) auth_info = AuthInfo(self, username=data['username']) return self.multipass.handle_auth_success(auth_info) class StaticGroup(Group): """A group from the static identity provider""" supports_member_list = True def get_members(self): members = self.provider.settings['groups'][self.name] for username in members: yield self.provider._get_identity(username) def has_member(self, identifier): return identifier in self.provider.settings['groups'][self.name] class StaticIdentityProvider(IdentityProvider): """Provides identity information from a static list. This provider should NEVER be use in any production system. It serves mainly as a simple dummy/example for development. The type name to instantiate this provider is *static*. """ #: If the provider supports refreshing user information supports_refresh = True #: If the provider supports searching identities supports_search = True #: If the provider also provides groups and membership information supports_groups = True #: If the provider supports getting the list of groups an identity belongs to supports_get_identity_groups = True #: The class that represents groups from this provider group_class = StaticGroup def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.settings.setdefault('identities', {}) self.settings.setdefault('groups', {}) def _get_identity(self, identifier): user = self.settings['identities'].get(identifier) if user is None: return None return IdentityInfo(self, identifier, **user) def get_identity_from_auth(self, auth_info): identifier = auth_info.data['username'] return self._get_identity(identifier) def refresh_identity(self, identifier, multipass_data): return self._get_identity(identifier) def get_identity(self, identifier): return self._get_identity(identifier) def search_identities(self, criteria, exact=False): for identifier, user in self.settings['identities'].items(): for key, values in criteria.items(): # same logic as multidict user_value = user.get(key) user_values = set(user_value) if isinstance(user_value, (tuple, list)) else {user_value} if not any(user_values): break elif exact and not user_values & set(values): break elif not exact and not any(sv in uv for sv, uv in itertools.product(values, user_values)): break else: yield IdentityInfo(self, identifier, **user) def get_identity_groups(self, identifier): groups = set() for group_name in self.settings['groups']: group = self.get_group(group_name) if identifier in group: groups.add(group) return groups def get_group(self, name): if name not in self.settings['groups']: return None return self.group_class(self, name) def search_groups(self, name, exact=False): compare = operator.eq if exact else operator.contains for group_name in self.settings['groups']: if compare(group_name, name): yield self.group_class(self, group_name)
PypiClean
/ExecFlowSDK-0.3.0.tar.gz/ExecFlowSDK-0.3.0/execflow/wrapper/workflows/pipeline.py
from typing import TYPE_CHECKING from aiida import orm from aiida.common.exceptions import InputValidationError, ValidationError from aiida.engine import run, run_get_node, while_ from aiida.engine.processes.workchains.workchain import WorkChain from aiida.plugins import CalculationFactory, WorkflowFactory from execflow.wrapper.data.declarative_pipeline import OTEPipelineData if TYPE_CHECKING: # pragma: no cover from aiida.engine.processes.workchains.workchain import WorkChainSpec class OTEPipeline(WorkChain): """Run an OTE Pipeline. Inputs: - **pipeline** (:py:class:`~execflow.wrapper.data.declarative_pipeline.OTEPipelineData`, :py:class:`aiida.orm.Dict`, :py:class:`aiida.orm.SinglefileData`, :py:class:`aiida.orm.Str`) -- The declarative pipeline as an AiiDA-valid type. Either as a path to a YAML file or the explicit content of the YAML file. - **run_pipeline** (:py:class:`aiida.orm.Str`) -- The pipeline to run. The pipeline name should match a pipeline given in the declarative pipeline given in the `pipeline` input. Outputs: - **session** (:py:class:`aiida.orm.Dict`) -- The OTE session object after running the pipeline. Outline: - :py:meth:`~execflow.wrapper.workflows.pipeline.OTEPipeline.setup` - while :py:meth:`~execflow.wrapper.workflows.pipeline.OTEPipeline.not_finished`: - :py:meth:`~execflow.wrapper.workflows.pipeline.OTEPipeline.submit_next` - :py:meth:`~execflow.wrapper.workflows.pipeline.OTEPipeline.process_current` - :py:meth:`~execflow.wrapper.workflows.pipeline.OTEPipeline.finalize` Exit Codes: - **2** (*ERROR_SUBPROCESS*) -- A subprocess has failed. """ @classmethod def define(cls, spec: "WorkChainSpec") -> None: super().define(spec) # Inputs spec.input( "pipeline", valid_type=(OTEPipelineData, orm.Dict, orm.SinglefileData, orm.Str), required=True, ) spec.input("run_pipeline", valid_type=orm.Str, required=False) # Outputs spec.output("session", valid_type=orm.Dict) # Outline spec.outline( cls.parse_pipeline, cls.setup, while_(cls.not_finished)(cls.submit_next, cls.process_current), cls.finalize, ) # Exit Codes spec.exit_code(2, "ERROR_SUBPROCESS", message="A subprocess has failed.") def parse_pipeline(self) -> None: """Parse the pipeline input.""" result = run( CalculationFactory("execflow.parse_pipeline"), pipeline_input=self.inputs.pipeline, ) self.ctx.pipeline = result["result"] self.ctx.strategy_configs = result["strategy_configs"] def setup(self) -> None: # pylint: disable=too-many-branches """Setup WorkChain Steps: - Initialize context. - Parse declarative pipeline. - Create a list of strategies to run, explicitly adding `init` and `get` CalcFunctions. """ self.ctx.current_id = 0 pipeline: OTEPipelineData = self.ctx.pipeline # Outline pipeline if "run_pipeline" in self.inputs and self.inputs.run_pipeline: if self.inputs.run_pipeline.value in pipeline.pipelines: run_pipeline_name = self.inputs.run_pipeline.value else: raise InputValidationError( f"{self.inputs.run_pipeline.value} not found in declarative " "pipeline. Pipelines: " f"{', '.join(repr(_) for _ in pipeline.pipelines)}" ) elif len(pipeline.pipelines) != 1: raise ValidationError( f"{len(pipeline.pipelines)} pipelines given in the declarative " "pipeline. Please specify which pipeline to run through the " "'run_pipeline' input." ) else: run_pipeline_name = list(pipeline.pipelines)[0] strategies = [] # Initialization for strategy in pipeline.get_strategies(run_pipeline_name, reverse=True): strategies.append( ( "init", strategy.get_type(), self.ctx.strategy_configs[strategy.get_name()], ) ) # Getting for strategy in pipeline.get_strategies(run_pipeline_name): strategies.append( ( "get", strategy.get_type(), self.ctx.strategy_configs[strategy.get_name()], ) ) self.ctx.strategies = strategies self.ctx.ote_session = orm.Dict() def not_finished(self) -> bool: """Determine whether or not the WorkChain is finished. Returns: Whether or not the WorkChain is finished based on comparing the current strategy index in the list of strategies against the total number of strategies. """ return self.ctx.current_id < len(self.ctx.strategies) def submit_next(self) -> None: """Prepare the current step for submission. Run the next strategy's CalcFunction and return its ProcessNode to the context. """ strategy_method, strategy_type, strategy_config = self.ctx.strategies[ self.ctx.current_id ] strategy_process_cls = ( WorkflowFactory(f"execflow.{strategy_type}_{strategy_method}") if strategy_type in ("function", "transformation") else CalculationFactory(f"execflow.{strategy_type}_{strategy_method}") ) self.to_context( current=run_get_node( strategy_process_cls, **{ "config": strategy_config, "session": self.ctx.ote_session, }, )[1] ) def process_current(self) -> None: """Process the current step's Node. Report if the process did not finish OK. Retrieve the return session update object, update the session and store it back to the context for the next strategy to use. """ if not self.ctx.current.is_finished_ok: self.report( f"A subprocess failed with exit status {self.ctx.current.exit_status}:" f" {self.ctx.current.exit_message}" ) self.ctx.ote_session = ( self.ctx.current.base.links.get_outgoing().get_node_by_label("result") ) self.ctx.current_id += 1 def finalize(self) -> None: """Finalize the WorkChain. Set the 'session' output. """ self.out("session", self.ctx.ote_session)
PypiClean
/CartiMorph_nnUNet-1.7.14.tar.gz/CartiMorph_nnUNet-1.7.14/CartiMorph_nnUNet/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_mm.py
from copy import deepcopy import numpy as np from CartiMorph_nnUNet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2 from CartiMorph_nnUNet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner from CartiMorph_nnUNet.network_architecture.generic_UNet import Generic_UNet from CartiMorph_nnUNet.paths import * class ExperimentPlannerIso(ExperimentPlanner): """ attempts to create patches that have an isotropic size (in mm, not voxels) CAREFUL! this one does not support transpose_forward and transpose_backward """ def __init__(self, folder_with_cropped_data, preprocessed_output_folder): super().__init__(folder_with_cropped_data, preprocessed_output_folder) self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixedisoPatchesInmm_plans_3D.pkl") self.data_identifier = "nnUNet_isoPatchesInmm" def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, num_modalities, num_classes): """ """ new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int) dataset_num_voxels = np.prod(new_median_shape) * num_cases # the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t # input_patch_size = new_median_shape # compute how many voxels are one mm input_patch_size = 1 / np.array(current_spacing) # normalize voxels per mm input_patch_size /= input_patch_size.mean() # create an isotropic patch of size 512x512x512mm input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value input_patch_size = np.round(input_patch_size).astype(int) # clip it to the median shape of the dataset because patches larger then that make not much sense input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)] network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \ shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size, self.unet_featuremap_min_edge_length, self.unet_max_numpool, current_spacing) ref = Generic_UNet.use_this_for_batch_size_computation_3D here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage) while here > ref: # here is the difference to ExperimentPlanner. In the old version we made the aspect ratio match # between patch and new_median_shape, regardless of spacing. It could be better to enforce isotropy # (in mm) instead current_patch_in_mm = new_shp * current_spacing axis_to_be_reduced = np.argsort(current_patch_in_mm)[-1] # from here on it's the same as before tmp = deepcopy(new_shp) tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced] _, _, _, _, shape_must_be_divisible_by_new = \ get_pool_and_conv_props_poolLateV2(tmp, self.unet_featuremap_min_edge_length, self.unet_max_numpool, current_spacing) new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced] # we have to recompute numpool now: network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \ shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp, self.unet_featuremap_min_edge_length, self.unet_max_numpool, current_spacing) here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage) print(new_shp) input_patch_size = new_shp batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3 batch_size = int(np.floor(max(ref / here, 1) * batch_size)) # check if batch size is too large max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels / np.prod(input_patch_size, dtype=np.int64)).astype(int) max_batch_size = max(max_batch_size, self.unet_min_batch_size) batch_size = max(1, min(batch_size, max_batch_size)) do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[ 0]) > self.anisotropy_threshold plan = { 'batch_size': batch_size, 'num_pool_per_axis': network_num_pool_per_axis, 'patch_size': input_patch_size, 'median_patient_size_in_voxels': new_median_shape, 'current_spacing': current_spacing, 'original_spacing': original_spacing, 'do_dummy_2D_data_aug': do_dummy_2D_data_aug, 'pool_op_kernel_sizes': pool_op_kernel_sizes, 'conv_kernel_sizes': conv_kernel_sizes, } return plan
PypiClean
/CellPhoneDBu-1.1.1.7-py3-none-any.whl/cellphonedb/src/core/models/interaction/interaction_filter.py
from cellphonedb.src.core.core_logger import core_logger import pandas as pd def filter_by_any_multidatas(multidatas: pd.DataFrame, interactions: pd.DataFrame) -> pd.DataFrame: """ Filters interactions if any interactions multidatas are in multidatas list """ interactions_filtered = pd.merge(multidatas, interactions, left_on='id_multidata', right_on='multidata_1_id') interactions_filtered = interactions_filtered.append( pd.merge(multidatas, interactions, left_on='id_multidata', right_on='multidata_2_id')) interactions_filtered.drop_duplicates('id_interaction', inplace=True) interactions_filtered.reset_index(drop=True, inplace=True) return interactions_filtered[interactions.columns.values] def filter_by_multidatas(multidatas: pd.DataFrame, interactions: pd.DataFrame) -> pd.DataFrame: """ Filters interactions if both interactions multidatas are in multidatas list """ interactions_filtered = pd.merge(multidatas, interactions, left_on='id_multidata', right_on='multidata_1_id') interactions_filtered = pd.merge(multidatas, interactions_filtered, left_on='id_multidata', right_on='multidata_2_id') interactions_filtered.drop_duplicates('id_interaction', inplace=True) interactions_filtered.reset_index(drop=True, inplace=True) return interactions_filtered[interactions.columns.values] # TODO: add test def filter_by(parameter: str, multidatas: pd.DataFrame, interactions: pd.DataFrame, suffix: ()) -> pd.DataFrame: """ Filters interactions if both interactions multidatas are in multidatas list """ interactions_filtered = pd.merge(multidatas, interactions, left_on=parameter, right_on='{}_{}') interactions_filtered = pd.merge(multidatas, interactions_filtered, left_on=parameter, right_on='multidata_2_id') interactions_filtered.drop_duplicates('id_interaction', inplace=True) interactions_filtered.reset_index(drop=True, inplace=True) return interactions_filtered[interactions.columns.values] def filter_by_min_score2(interactions: pd.DataFrame, min_score2: float): filtered_interactions = interactions[interactions['score_2'] > min_score2] return filtered_interactions def filter_by_receptor_ligand_integrin(proteins: pd.DataFrame, interactions: pd.DataFrame) -> pd.DataFrame: """ Returns a DataFrame of enabled integrin interactions """ core_logger.debug('Filtering by integrin') multidata_receptors = proteins[proteins['integrin_interaction']] receptor_interactions = pd.merge(multidata_receptors, interactions, left_on='id_multidata', right_on='multidata_1_id') enabled_interactions = pd.merge(proteins, receptor_interactions, left_on='id_multidata', right_on='multidata_2_id', suffixes=['_ligands', '_receptors']) receptor_interactions_inverted = pd.merge(multidata_receptors, interactions, left_on='id_multidata', right_on='multidata_2_id') enabled_interactions_inverted = pd.merge(proteins, receptor_interactions_inverted, left_on='id_multidata', right_on='multidata_1_id', suffixes=['_ligands', '_receptors']) enabled_interactions = enabled_interactions.append(enabled_interactions_inverted).reset_index(drop=True) enabled_interactions.drop_duplicates(inplace=True) return enabled_interactions def filter_by_is_interactor(interactions: pd.DataFrame) -> pd.DataFrame: interactions_filtered = interactions[interactions['is_cellphonedb_interactor']] return interactions_filtered
PypiClean
/NiMARE-0.2.0rc2.tar.gz/NiMARE-0.2.0rc2/README.md
# NiMARE: Neuroimaging Meta-Analysis Research Environment A Python library for coordinate- and image-based meta-analysis. [![Latest Version](https://img.shields.io/pypi/v/nimare.svg)](https://pypi.python.org/pypi/nimare/) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/nimare.svg)](https://pypi.python.org/pypi/nimare/) [![GitHub Repository](https://img.shields.io/badge/Source%20Code-neurostuff%2Fnimare-purple)](https://github.com/neurostuff/NiMARE) [![DOI](https://zenodo.org/badge/117724523.svg)](https://zenodo.org/badge/latestdoi/117724523) [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Test Status](https://github.com/neurostuff/NiMARE/actions/workflows/testing.yml/badge.svg)](https://github.com/neurostuff/NiMARE/actions/workflows/testing.yml) [![Documentation Status](https://readthedocs.org/projects/nimare/badge/?version=stable)](http://nimare.readthedocs.io/en/stable/?badge=stable) [![Codecov](https://codecov.io/gh/neurostuff/NiMARE/branch/main/graph/badge.svg)](https://codecov.io/gh/neurostuff/nimare) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![Join the chat at https://mattermost.brainhack.org/brainhack/channels/nimare](https://img.shields.io/badge/mattermost-join_chat%20%E2%86%92-brightgreen.svg)](https://mattermost.brainhack.org/brainhack/channels/nimare) [![RRID:SCR_017398](https://img.shields.io/badge/RRID-SCR__017398-blue.svg)](https://scicrunch.org/scicrunch/Resources/record/nlx_144509-1/SCR_017398/resolver?q=nimare&l=nimare) [![DOI](http://neurolibre.herokuapp.com/papers/10.55458/neurolibre.00007/status.svg)](https://doi.org/10.55458/neurolibre.00007) Currently, NiMARE implements a range of image- and coordinate-based meta-analytic algorithms, as well as several methods for advanced meta-analytic methods, like automated annotation and functional decoding. ## Installation Please see our [installation instructions](https://nimare.readthedocs.io/en/stable/installation.html) for information on how to install NiMARE. ### Installation with pip ``` pip install nimare ``` ### Local installation (development version) ``` pip install git+https://github.com/neurostuff/NiMARE.git ``` ## Citing NiMARE If you use NiMARE in your research, we recommend citing the Zenodo DOI associated with the NiMARE version you used, as well as the NeuroLibre preprint for the NiMARE Jupyter book. You can find the Zenodo DOI associated with each NiMARE release at https://zenodo.org/record/6642243#.YqiXNy-B1KM. ```BibTeX # This is the NeuroLibre preprint. @article{Salo2022, doi = {10.55458/neurolibre.00007}, url = {https://doi.org/10.55458/neurolibre.00007}, year = {2022}, publisher = {The Open Journal}, volume = {1}, number = {1}, pages = {7}, author = {Taylor Salo and Tal Yarkoni and Thomas E. Nichols and Jean-Baptiste Poline and Murat Bilgel and Katherine L. Bottenhorn and Dorota Jarecka and James D. Kent and Adam Kimbler and Dylan M. Nielson and Kendra M. Oudyk and Julio A. Peraza and Alexandre Pérez and Puck C. Reeders and Julio A. Yanes and Angela R. Laird}, title = {NiMARE: Neuroimaging Meta-Analysis Research Environment}, journal = {NeuroLibre} } # This is the Zenodo citation for version 0.0.11. @software{salo_taylor_2022_5826281, author = {Salo, Taylor and Yarkoni, Tal and Nichols, Thomas E. and Poline, Jean-Baptiste and Kent, James D. and Gorgolewski, Krzysztof J. and Glerean, Enrico and Bottenhorn, Katherine L. and Bilgel, Murat and Wright, Jessey and Reeders, Puck and Kimbler, Adam and Nielson, Dylan N. and Yanes, Julio A. and Pérez, Alexandre and Oudyk, Kendra M. and Jarecka, Dorota and Enge, Alexander and Peraza, Julio A. and Laird, Angela R.}, title = {neurostuff/NiMARE: 0.0.11}, month = jan, year = 2022, publisher = {Zenodo}, version = {0.0.11}, doi = {10.5281/zenodo.5826281}, url = {https://doi.org/10.5281/zenodo.5826281} } ``` To cite NiMARE in your manuscript, we recommend something like the following: > We used NiMARE v0.0.11 (RRID:SCR_017398; Salo et al., 2022a; Salo et al., 2022b). ## Contributing Please see our [contributing guidelines](https://github.com/neurostuff/NiMARE/blob/main/CONTRIBUTING.md) for more information on contributing to NiMARE. We ask that all contributions to `NiMARE` respect our [code of conduct](https://github.com/neurostuff/NiMARE/blob/main/CODE_OF_CONDUCT.md).
PypiClean
/ChadBot2-0.1.tar.gz/ChadBot2-0.1/ChadBot/chadBot.py
import os from typing import List, Tuple, Dict from numpy.testing._private.utils import assert_raises from .core.FAQ import Answer, FAQ, Question , FAQUnit, FAQOutput import numpy as np from sentence_transformers.readers import InputExample from sentence_transformers import SentenceTransformer,SentencesDataset,losses from .core.exceptions import * from .modelRelated.utils import cosineSim , convertForBatchHardTripletLoss from torch.utils.data import DataLoader import warnings class FAQWrapper: def __init__(self,id : int , FAQ : FAQ): self.FAQ = FAQ self.id = id self.vectors = self.getVectors() def getVectors(self) -> np.ndarray: faq = self.FAQ.FAQ vectors = [] for unit in faq: vector = unit.vectorRep if(vector is None): raise VectorNotAssignedException() vectors.append(vector) return np.array(vectors) def _getClosestQuestions(self,rankedIndices : List[int] ,K : int, topAnswer : str): includedSet = set() includedSet.add(topAnswer) closestQuestions = [] for ind in rankedIndices: currentUnit = self.FAQ.FAQ[ind] currentOrignal = currentUnit.orignal.text if(currentOrignal not in includedSet): includedSet.add(currentOrignal) closestQuestions.append(currentOrignal) if(len(closestQuestions) == K): break return closestQuestions def solveForQuery(self,queryVector : np.ndarray, K : int, topSimilar : int = 5) -> FAQOutput: # queryVector has shape (1,emeddingDim) if(len(queryVector.shape) == 1): queryVector = queryVector.reshape(1,-1) assert queryVector.shape[0] == 1 cosineScores = cosineSim(queryVector, self.vectors)[0] cosineScores = cosineScores.tolist() rankedIndices = [x for x in range(len(cosineScores))] rankedIndices.sort(reverse = True, key = lambda x : cosineScores[x]) maxScore = cosineScores[rankedIndices[0]] # Now rankedIndices hold the order of indices with highest to lowest similirity !!! competeDict = dict() for ind in rankedIndices[:K]: # using top K results !!! currentlabel = self.FAQ.FAQ[ind].label if(currentlabel not in competeDict): competeDict[currentlabel] = 0 competeDict[currentlabel] += cosineScores[ind] competeList = [(label,score) for label , score in competeDict.items()] competeList.sort(key= lambda x : x[1] , reverse= True) bestScore = competeList[0][1] bestLabel = competeList[0][0] bestAnswer = self.FAQ.getAnswerWithLabel(bestLabel) bestMatchQuestion = self.FAQ.getQuestionWithLabel(bestLabel) return FAQOutput(faqId= self.id,faqName= self.FAQ.name, answer = bestAnswer, question= bestMatchQuestion , score= bestScore, similarQuestions=self._getClosestQuestions(rankedIndices,topSimilar,bestMatchQuestion.text) , maxScore= maxScore) class ChadBot: def __init__(self,FAQs : List[FAQ], modelPath : str = None): if(modelPath == None): modelPath = 'roberta-base-nli-stsb-mean-tokens' self.model : SentenceTransformer = SentenceTransformer(modelPath) self.FAQs : List[FAQWrapper] = [] self.idToFAQ : Dict[int,FAQWrapper] = dict() self._registerFAQs(FAQs = FAQs) def _registerFAQs(self,FAQs : List[FAQ]): """ registers all the faqs given and then extracts vectors , and forms a gobal index and vector to use for combined question answering """ assert len(FAQs) > 0 # All FAQs should be Usable !!! for faq in FAQs: if(faq.isUsable() == False): raise ValueError("All faqs passed to chadBot must be Usable !!!! please build FAQ again or load from preexisting one") for faq in FAQs: if(faq.hasVectorsAssigned()): warnings.warn("Vectors already assigned to {} FAQ , if you want to reassign using the current model please clear the vectors using resetAssigned vectors".format(faq.name)) else: print("Assigning vectors to {} faq".format(faq.name)) faq._assignVectors(model = self.model) id = 0 for faq in FAQs: newFAQ = FAQWrapper(id,faq) self.FAQs.append(newFAQ) self.idToFAQ[id] = newFAQ id += 1 def findClosest(self,query : str, K : int = 3 , topSimilar : int = 5) -> FAQOutput: """ Here we find the closest from each faq and then compare of the top contenders from different faqs are not dangerouusly similar """ competeList : List[FAQOutput] = [] queryVector = self.model.encode([query])[0].reshape(1,-1) for faq in self.FAQs: competeList.append(faq.solveForQuery(queryVector=queryVector, K = K, topSimilar= topSimilar)) competeList.sort(key = lambda x : x.score, reverse= True) # competeList now has answer from each faq in the descending order return competeList def findClosestFromFAQ(self,faqId : int, query : str, K : int = 3, topSimilar : int = 5) -> FAQOutput: assert faqId in self.idToFAQ faq = self.idToFAQ[faqId] queryVector = self.model.encode([query])[0].reshape(1,-1) return faq.solveForQuery(queryVector= queryVector, K = K,topSimilar= topSimilar) def train(self,outputPath : str, batchSize = 16, epochs : int = 1, **kwargs): """ Trains the model using batch hard triplet loss , for the other kwargs take a look at the documentation for sentencetransformers """ assert batchSize > 4 and epochs > 0 and os.path.exists(outputPath) trainingObjectives = [] # training each faq on a different objective for faq in self.FAQs: trainExamples = convertForBatchHardTripletLoss(faq.FAQ) trainDataset = SentencesDataset(trainExamples,self.model) trainDataloader = DataLoader(trainDataset, shuffle=True, batch_size= batchSize) trainLoss = losses.BatchHardTripletLoss(model= self.model) trainingObjectives.append((trainDataloader, trainLoss)) self.model.fit(train_objectives= trainingObjectives, warmup_steps= 100,epochs= epochs, save_best_model= False, output_path= outputPath, **kwargs) def saveFAQs(self, rootDirPath : str): for faq in self.FAQs: coreFaQ = faq.FAQ coreFaQ.save(rootDirPath) def saveFAQ(self, id : int, rootDirPath : str): assert id in self.idToFAQ self.idToFAQ[id].FAQ.save(rootDirPath)
PypiClean
/AoikRegistryEditor-0.1.0-py3-none-any.whl/aoikregistryeditor/aoikargutil.py
from __future__ import absolute_import from argparse import ArgumentTypeError import itertools import re import sys #/ __version__ = '0.2' #/ def str_nonempty(txt): if txt != '': return txt else: raise ArgumentTypeError('Empty value is not allowed.') #/ def str_strip_nonempty(txt): #/ txt = txt.strip() #/ if txt != '': return txt else: raise ArgumentTypeError('Empty value is not allowed.') #/ def bool_0or1(txt): if txt == '0': return False elif txt == '1': return True else: raise ArgumentTypeError('|%s| is not 0 or 1.' % txt) #/ def float_lt0(txt): try: val = float(txt) assert val < 0 except Exception: raise ArgumentTypeError('|%s| is not a negative number.' % txt) return val #/ def float_le0(txt): try: val = float(txt) assert val <= 0 except Exception: raise ArgumentTypeError('|%s| is not zero or a negative number.' % txt) return val #/ def float_gt0(txt): try: val = float(txt) assert val > 0 except Exception: raise ArgumentTypeError('|%s| is not a positive number.' % txt) return val #/ def float_ge0(txt): try: val = float(txt) assert val >= 0 except Exception: raise ArgumentTypeError('|%s| is not zero or a positive number.' % txt) return val #/ def int_lt0(txt): try: val = int(txt) assert val < 0 except Exception: raise ArgumentTypeError('|%s| is not a negative integer.' % txt) return val #/ def int_le0(txt): try: val = int(txt) assert val <= 0 except Exception: raise ArgumentTypeError('|%s| is not zero or a negative integer.' % txt) return val #/ def int_gt0(txt): try: val = int(txt) assert val > 0 except Exception: raise ArgumentTypeError('|%s| is not a positive integer.' % txt) return val #/ def int_ge0(txt): try: val = int(txt) assert val >= 0 except Exception: raise ArgumentTypeError('|%s| is not zero or a positive integer.' % txt) return val #/ def ensure_exc(parser, spec, args=None): #/ if args is None: args = sys.argv[1:] #/ if isinstance(spec, list): spec_s = spec elif isinstance(spec, tuple): spec_s = [spec] else: assert False, spec #/ for spec in spec_s: #/ spec_len = len(spec) #/ if spec_len < 2: continue #/ #assert spec_len >= 2 if spec_len == 2: #/ s0, s1 = spec #/ if is special syntax e.g. ['-a', ['-b', '-c']] if isinstance(s1, (list, tuple)): #/ transform to pairs [('-a', '-b'), ('-a', '-c')] pair_s = [(s0, x) for x in s1] #/ if is regular syntax e.g. ['-a', '-b'] else: #/ transform to pairs [('-a', '-b')] pair_s = [spec] #/ if is regular syntax e.g. ['-a', '-b', '-c'] else: #/ transform to pairs [('-a', '-b'), ('-a', '-c'), ('-b', '-c')] pair_s = list(itertools.combinations(spec, 2)) #/ for pair in pair_s: #/ arg_a, arg_b = pair arg_a_rec = re.compile('^%s($|=|[0-9])' % arg_a) arg_b_rec = re.compile('^%s($|=|[0-9])' % arg_b) #/ if any(map(lambda x: bool(arg_a_rec.search(x)), args))\ and any(map(lambda x: bool(arg_b_rec.search(x)), args)): #/ msg = 'argument %s: not allowed with argument %s' % (arg_a, arg_b) parser.error(msg) ## raise error #/ def ensure_one_arg_specs_to_arg_names(specs): #/ arg_name_s = [] for arg_spec_x in specs: if isinstance(arg_spec_x, str): #/ arg_name_s.append(arg_spec_x) #/ elif isinstance(arg_spec_x, (list, tuple)): #/ arg_name_s.append(arg_spec_x[0]) #/ else: assert False, arg_spec_x #/ return arg_name_s #/ def ensure_one(parser, spec, args=None): #/ if args is None: args = sys.argv[1:] #/ if isinstance(spec, list): spec_s = spec elif isinstance(spec, tuple): spec_s = [spec] else: assert False, spec #/ for spec in spec_s: #/ spec_pass = False #/ arg_spec_s = spec for arg_spec in arg_spec_s: #/ sub_spec = None #/ if isinstance(arg_spec, str): #/ arg_name = arg_spec sub_spec = None #/ elif isinstance(arg_spec, (list, tuple)): #/ arg_name = arg_spec[0] sub_spec = arg_spec[1] #/ else: assert False, arg_spec #/ arg_name_rec = re.compile('^%s($|=|[0-9])' % arg_name) #/ arg_name_exists = any(map(lambda x: bool(arg_name_rec.search(x)), args)) if arg_name_exists: #/ if isinstance(arg_spec_s, tuple): #/ exc_arg_name_s = ensure_one_arg_specs_to_arg_names(arg_spec_s) #/ exc_spec = tuple(exc_arg_name_s) #/ ensure_exc(parser=parser, spec=exc_spec, args=args) #/ if sub_spec is not None: ensure_spec(parser=parser, spec=sub_spec, args=args) #/ spec_pass = True break #/ if not spec_pass: arg_name_s = ensure_one_arg_specs_to_arg_names(arg_spec_s) msg = """one of the arguments %s is required""" % (', '.join(arg_name_s)) parser.error(msg) ## raise error #/ def ensure_two(parser, spec, args=None): #/ if args is None: args = sys.argv[1:] #/ if isinstance(spec, list): spec_s = spec elif isinstance(spec, tuple): spec_s = [spec] else: assert False, spec #/ for spec in spec_s: #/ arg_a_spec, arg_b_spec = spec #/ if isinstance(arg_a_spec, (list, tuple)): arg_a_s = arg_a_spec else: arg_a_s = [arg_a_spec] #/ for arg_a in arg_a_s: #/ arg_a_rec = re.compile('^%s($|=|[0-9])' % arg_a) #/ arg_a_exists = any(bool(arg_a_rec.search(arg)) for arg in args) #/ if arg_a_exists: #/ if isinstance(arg_b_spec, (list, tuple)): #/ arg_b_s = arg_b_spec else: #/ arg_b_s = [arg_b_spec] #/ arg_b_rec_s = [re.compile('^%s($|=|[0-9])' % arg_b) for arg_b in arg_b_s] #/ if isinstance(arg_b_spec, tuple): req_all_arg_bs = True else: req_all_arg_bs = False #/ arg_b_exists = False for arg_b_rec in arg_b_rec_s: #/ arg_b_exists = any(bool(arg_b_rec.search(arg)) for arg in args) #/ if arg_b_exists: if not req_all_arg_bs: break else: if req_all_arg_bs: break #/ if not arg_b_exists: #/ if isinstance(arg_b_spec, tuple): #/ msg = 'argument %s: requires all of the arguments %s' % (arg_a, ', '.join(arg_b_spec)) parser.error(msg) ## raise error #/ elif isinstance(arg_b_spec, list): #/ msg = 'argument %s: requires one of the arguments %s' % (arg_a, ', '.join(arg_b_spec)) parser.error(msg) ## raise error else: #/ msg = 'argument %s: requires argument %s' % (arg_a, arg_b_spec) parser.error(msg) ## raise error #/ SPEC_DI_K_EXC = 'exc' SPEC_DI_K_ONE = 'one' SPEC_DI_K_TWO = 'two' def ensure_spec(parser, spec, args=None): #/ if args is None: args = sys.argv[1:] #/ one_spec = spec.get(SPEC_DI_K_ONE, None) if one_spec is not None: ensure_one(parser=parser, spec=one_spec, args=args) #/ two_spec = spec.get(SPEC_DI_K_TWO, None) if two_spec is not None: ensure_two(parser=parser, spec=two_spec, args=args) #/ exc_spec = spec.get(SPEC_DI_K_EXC, None) if exc_spec is not None: ensure_exc(parser=parser, spec=exc_spec, args=args)
PypiClean
/KratosContactStructuralMechanicsApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/ContactStructuralMechanicsApplication/mpc_contact_structural_mechanics_implicit_dynamic_solver.py
import KratosMultiphysics # Import applications import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication import KratosMultiphysics.ContactStructuralMechanicsApplication as ContactStructuralMechanicsApplication # Import the implicit solver (the explicit one is derived from it) from KratosMultiphysics.StructuralMechanicsApplication import structural_mechanics_implicit_dynamic_solver # Import auxiliary methods from KratosMultiphysics.ContactStructuralMechanicsApplication import auxiliary_methods_solvers # Import convergence_criteria_factory from KratosMultiphysics.StructuralMechanicsApplication import convergence_criteria_factory def GetDefaults(): return auxiliary_methods_solvers.AuxiliaryMPCContactSettings() def CreateSolver(model, custom_settings): return MPCContactImplicitMechanicalSolver(model, custom_settings) class MPCContactImplicitMechanicalSolver(structural_mechanics_implicit_dynamic_solver.ImplicitMechanicalSolver): """The MPC contact implicit dynamic solver. This class creates the mechanical solvers for contact implicit dynamic analysis. It currently supports linear and Newton-Raphson strategies. Public member variables: See structural_mechanics_solver.py for more information. """ def __init__(self, model, custom_settings): self._validate_settings_in_baseclass=True # To be removed eventually # Construct the base solver. super().__init__(model, custom_settings) self.mpc_contact_settings = self.settings["mpc_contact_settings"] self.mpc_contact_settings.RecursivelyAddMissingParameters(GetDefaults()["mpc_contact_settings"]) # Setting the parameters auxiliary_methods_solvers.AuxiliaryMPCSetSettings(self.settings, self.mpc_contact_settings) # Logger KratosMultiphysics.Logger.PrintInfo("::[MPCContactImplicitMechanicalSolver]:: ", "Construction finished") def ValidateSettings(self): """This function validates the settings of the solver """ auxiliary_methods_solvers.AuxiliaryValidateSettings(self) def AddVariables(self): super().AddVariables() # We add the contact related variables contact_type = self.mpc_contact_settings["contact_type"].GetString() auxiliary_methods_solvers.AuxiliaryMPCAddVariables(self.main_model_part, contact_type) def Initialize(self): KratosMultiphysics.Logger.PrintInfo("::[MPCContactImplicitMechanicalSolver]:: ", "Initializing ...") super().Initialize() # The mechanical solver is created here. # We set the flag INTERACTION if self.mpc_contact_settings["simplified_semi_smooth_newton"].GetBool(): computing_model_part = self.GetComputingModelPart() computing_model_part.ProcessInfo.Set(KratosMultiphysics.INTERACTION, True) KratosMultiphysics.Logger.PrintInfo("::[MPCContactImplicitMechanicalSolver]:: ", "Finished initialization.") def ComputeDeltaTime(self): return auxiliary_methods_solvers.AuxiliaryComputeDeltaTime(self.main_model_part, self.GetComputingModelPart(), self.settings, self.mpc_contact_settings) #### Private functions #### def _CreateConvergenceCriterion(self): convergence_criterion = convergence_criteria_factory.convergence_criterion(self._get_convergence_criterion_settings()) conv_criteria = convergence_criterion.mechanical_convergence_criterion contact_criteria = ContactStructuralMechanicsApplication.MPCContactCriteria() return KratosMultiphysics.AndCriteria(conv_criteria, contact_criteria) def _CreateSolutionStrategy(self): mechanical_solution_strategy = self._create_contact_newton_raphson_strategy() return mechanical_solution_strategy def _create_contact_newton_raphson_strategy(self): computing_model_part = self.GetComputingModelPart() self.mechanical_scheme = self._GetScheme() self.mechanical_convergence_criterion = self._GetConvergenceCriterion() self.builder_and_solver = self._GetBuilderAndSolver() return auxiliary_methods_solvers.AuxiliaryMPCNewton(computing_model_part, self.mechanical_scheme, self.mechanical_convergence_criterion, self.builder_and_solver, self.settings, self.mpc_contact_settings) @classmethod def GetDefaultParameters(cls): this_defaults = GetDefaults() this_defaults.RecursivelyAddMissingParameters(super(MPCContactImplicitMechanicalSolver, cls).GetDefaultParameters()) return this_defaults
PypiClean
/MufSim-1.2.2.tar.gz/MufSim-1.2.2/mufsim/insts/connections.py
import mufsim.gamedb as db import mufsim.stackitems as si from mufsim.interface import network_interface as netifc from mufsim.logger import log # from mufsim.errors import MufRuntimeError from mufsim.insts.base import Instruction, instr @instr("awake?") class InstAwakeP(Instruction): def execute(self, fr): who = fr.data_pop_object() fr.data_push(1 if netifc.is_user_online(who.dbref) else 0) @instr("online") class InstOnline(Instruction): def execute(self, fr): users = netifc.get_users_online() for who in users: fr.data_push(who) fr.data_push(len(users)) @instr("online_array") class InstOnlineArray(Instruction): def execute(self, fr): fr.data_push_list(netifc.get_users_online()) @instr("concount") class InstConCount(Instruction): def execute(self, fr): fr.data_push(len(netifc.get_descriptors())) @instr("condbref") class InstConDBRef(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) fr.data_push(si.DBRef(netifc.descr_dbref(descr))) @instr("contime") class InstConTime(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: fr.data_push(netifc.descr_time(descr)) else: fr.data_push(0) @instr("conidle") class InstConIdle(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: fr.data_push(netifc.descr_idle(descr)) else: fr.data_push(0) @instr("conhost") class InstConHost(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: fr.data_push(netifc.descr_host(descr)) else: fr.data_push("") @instr("conuser") class InstConUser(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: who = netifc.descr_user(descr) fr.data_push(db.getobj(who).name) else: fr.data_push("") @instr("conboot") class InstConBoot(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: who = netifc.descr_dbref(descr) netifc.descr_disconnect(descr) log("BOOTED DESCRIPTOR %d: %s" % (descr, db.getobj(who))) @instr("connotify") class InstConNotify(Instruction): def execute(self, fr): fr.check_underflow(2) msg = fr.data_pop(str) con = fr.data_pop(int) descr = netifc.descr_from_con(con) if descr >= 0: who = netifc.descr_dbref(descr) log("NOTIFY TO DESCR %d, USER %s: %s" % (descr, db.getobj(who), msg)) @instr("condescr") class InstConDescr(Instruction): def execute(self, fr): con = fr.data_pop(int) descr = netifc.descr_from_con(con) fr.data_push(descr) # vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
PypiClean
/fastmbar-1.4.1.tar.gz/fastmbar-1.4.1/docs/source/dialanine_PMF.rst
Example 2. Compute a 2D-PMF of dihedrals for dialanine. ================================================================================= This example includes a step-by-step description on computing the two dimensional PMF of dialanine dihedrals with umbrella sampling and FastMBAR. Umbrella sampling is used to exhaustively sample relevant dialanine configurations that are centered around multiple dihedral values. FastMBAR is used here to compute the PMF by reweighing the configurations sampled from umbrella sampling. To run this example in your local computer, you need to clone/download the git repository `FastMBAR <https://github.com/BrooksResearchGroup-UM/FastMBAR>`_ onto your computer. After downloading the `FastMBAR`_ repository, change current working directory to ``FastMBAR/examples/dialanine`` before starting to run the following script inside the ``Python`` interpreter. .. code-block:: python ## import required packages import os, math, sys import numpy as np import matplotlib.pyplot as plt import openmm.app as omm_app import openmm as omm import openmm.unit as unit from tqdm import tqdm import mdtraj from FastMBAR import * 1. Construct an OpenMM system of dialanine ------------------------------------------- Because we are using OpenMM as our MD engine, we need to setup the MD molecular system in the format required by OpenMM. The format/object used by OpenMM for a molecular system happens to be a class called `System <http://docs.openmm.org/latest/api-python/generated/openmm.openmm.System.html#openmm.openmm.System>`_. Therefore, we will prepare our MD molecular system as an OpenMM System. When we prepare the OpenMM system, we add a `CustomTorsionForce <http://docs.openmm.org/latest/api-python/generated/openmm.openmm.CustomTorsionForce.html#openmm.openmm.CustomTorsionForce>`_ so that we can add biasing potentials to the system in the following umbrella sampling. Read psf and pdb files of dialanine: dialanine.psf and dialanine.pdb. The psf file, dialanine.psf, contains topology of dialanine and it is the topology file format used by CHARMM. The psf file, dialanine.psf, used here is generated using CHARMM. In your study, you usually already have a pdb file of your system. You can generate the topology file of your system using various MD softwares such as CHARMM, Gromacs and Amber among others. Just note that different softwares use different format for topology files and OpenMM has several parser for topology files with different format. .. code-block:: python psf = omm_app.CharmmPsfFile('./data/dialanine.psf') pdb = omm_app.PDBFile('./data/dialanine.pdb') Read CHARMM force field for dialanine. The CHARMM force field is downloaded from `here <http://mackerell.umaryland.edu/charmm_ff.shtml>`_. .. code-block:: python charmm_toppar = omm_app.CharmmParameterSet('./data/top_all36_prot.rtf', './data/par_all36_prot.prm') Create a OpenMM system based on the psf file of dialanine and the CHARMM force field. Then two harmonic biasing potentials are added to the system for dihedral :math:`\psi` (4-6-8-14) and dihedral :math:`\phi` (6-8-14,16) so that we can use biasing potentials in the following umbrella sampling. Adding biasing potentials to torsions of a system is very easy in OpenMM. We don't have to change any source code of OpenMM. All we need to do is to tell OpenMM the formula of the biasing potential and degree of freedom we want to add biasing potentials to. .. code-block:: python ## create a OpenMM system based on psf of dialanine and CHARMM force field system = psf.createSystem(charmm_toppar, nonbondedMethod = omm_app.NoCutoff) ## add harmonic biasing potentials on two dihedrals of dialanine (psi, phi) in the OpenMM system ## for dihedral psi bias_torsion_psi = omm.CustomTorsionForce("0.5*k_psi*dtheta^2; dtheta = min(tmp, 2*pi-tmp); tmp = abs(theta - psi)") bias_torsion_psi.addGlobalParameter("pi", math.pi) bias_torsion_psi.addGlobalParameter("k_psi", 1.0) bias_torsion_psi.addGlobalParameter("psi", 0.0) ## 4, 6, 8, 14 are indices of the atoms of the torsion psi bias_torsion_psi.addTorsion(4, 6, 8, 14) ## for dihedral phi bias_torsion_phi = omm.CustomTorsionForce("0.5*k_phi*dtheta^2; dtheta = min(tmp, 2*pi-tmp); tmp = abs(theta - phi)") bias_torsion_phi.addGlobalParameter("pi", math.pi) bias_torsion_phi.addGlobalParameter("k_phi", 1.0) bias_torsion_phi.addGlobalParameter("phi", 0.0) ## 6, 8, 14, 16 are indices of the atoms of the torsion phi bias_torsion_phi.addTorsion(6, 8, 14, 16) system.addForce(bias_torsion_psi) system.addForce(bias_torsion_phi) After constructing the OpenMM system of dialanine, we can save it in an XML formatted text file, which can be used later for simulations. Therefore, if we want to use the same system in another script, we can just read the text file in an OpenMM system instead of constructing it again. You can even open the XML formatted text file using a text editor and see what information about the system is included in the XML file. .. code-block:: python ## save the OpenMM system of dialanine with open("./output/system.xml", 'w') as file_handle: file_handle.write(omm.XmlSerializer.serialize(system)) 2. Run umbrella sampling ------------------------ We run umbrella sampling for two dialanine dihedrals: dihedral :math:`\psi` with atom indices of 4-6-8-14 and dihedral :math:`\phi` with atom indices of 6-8-14-16. Both dihedrals are split into multiple windows and in each window, the two dihedrals are restrained around a center using a harmonic biasing potential. In this script, we run simulations in each window sequentially, but they can be run in parallel if you have a computer cluster with multiple nodes. .. code-block:: python ## read the OpenMM system of dialanine with open("./output/system.xml", 'r') as file_handle: xml = file_handle.read() system = omm.XmlSerializer.deserialize(xml) ## read psf and pdb file of dialanine psf = omm_app.CharmmPsfFile("./data/dialanine.psf") pdb = omm_app.PDBFile('./data/dialanine.pdb') In order to run simulations in OpenMM, we need to construct an OpenMM context, which consists of a system, an integrator and a platform. The system is just the dialanine system we have constructed above. The integrator specifies what kind integration method we should use. Here, we will use Langevin dynamics for NVP ensemble simulation, which corresponds to the OpenMM.LangevinMiddleIntegrator. The platform specifies what kind of hardware we will run simulation on. Here, we choose to use CPUs. .. code-block:: python #### setup an OpenMM context ## platform platform = omm.Platform.getPlatformByName('CPU') ## integrator T = 298.15 * unit.kelvin ## temperature fricCoef = 10/unit.picoseconds ## friction coefficient stepsize = 1 * unit.femtoseconds ## integration step size integrator = omm.LangevinMiddleIntegrator(T, fricCoef, stepsize) ## construct an OpenMM context context = omm.Context(system, integrator, platform) Here we set the force constant and centers for the harmonic biasing potentials on dialanine dihedral. .. code-block:: python ## set force constant K for the biasing potential. ## the unit here is kJ*mol^{-1}*nm^{-2}, which is the default unit used in OpenMM k_psi = 100 k_phi = 100 context.setParameter("k_psi", k_psi) context.setParameter("k_phi", k_phi) ## equilibrium value for both psi and phi in biasing potentials m = 25 M = m*m psi = np.linspace(-math.pi, math.pi, m, endpoint = False) phi = np.linspace(-math.pi, math.pi, m, endpoint = False) The following loop is used to run umbrella sampling window by window. In each iteration, we first set the centers of the two harmonic biasing potentials. Then the configuration of dialanine is minimized and equilibrated with the biasing potentials. After initial equilibration, configurations are sampled and saved. .. code-block:: python ## the main loop to run umbrella sampling window by window for idx in range(M): psi_index = idx // m phi_index = idx % m print(f"sampling at psi index: {psi_index} out of {m}, phi index: {phi_index} out of {m}") ## set the center of the biasing potential context.setParameter("psi", psi[psi_index]) context.setParameter("phi", phi[phi_index]) ## minimize context.setPositions(pdb.positions) state = context.getState(getEnergy = True) energy = state.getPotentialEnergy() for i in range(50): omm.LocalEnergyMinimizer.minimize(context, 1, 20) state = context.getState(getEnergy = True) energy = state.getPotentialEnergy() ## initial equilibrium integrator.step(5000) ## sampling production. trajectories are saved in dcd files file_handle = open(f"./output/traj/traj_psi_{psi_index}_phi_{phi_index}.dcd", 'bw') dcd_file = omm_app.dcdfile.DCDFile(file_handle, psf.topology, dt = stepsize) for i in tqdm(range(100)): integrator.step(100) state = context.getState(getPositions = True) positions = state.getPositions() dcd_file.writeModel(positions) file_handle.close() 3. Compute and collect values of both dialanine dihedral --------------------------------------------------------- For configurations in trajectories sampled using umbrella sampling, we compute the two alanine dihedral :math:`\psi` and :math:`\phi` and save them in csv files. The dihedral :math:`\psi` is between four atoms with indices of 4, 6, 8, and 14. The dihedral :math:`\phi` is between four atoms with indices of 6, 8, 14, 16. Here we use the Python package mdtraj to compute dihedrals. .. code-block:: python topology = mdtraj.load_psf("./output/dialanine.psf") K = 100 m = 25 M = m*m psi = np.linspace(-math.pi, math.pi, m, endpoint = False) phi = np.linspace(-math.pi, math.pi, m, endpoint = False) psis = [] phis = [] for psi_index in range(m): for phi_index in range(m): traj = mdtraj.load_dcd(f"./output/traj/traj_psi_{psi_index}_phi_{phi_index}.dcd", topology) psis.append(mdtraj.compute_dihedrals(traj, [[4, 6, 8, 14]])) phis.append(mdtraj.compute_dihedrals(traj, [[6, 8, 14, 16]])) psi_array = np.squeeze(np.stack(psis)) phi_array = np.squeeze(np.stack(phis)) 4. Use FastMBAR to solve MBAR/UWHAM equations and compute the PMF ----------------------------------------------------------------- Two steps are required to compute PMF using FastMBAR based on umbrella sampling. Firstly, we need to compute the relative free energies of the biased ensembles used in umbrella sampling, i.e., the NVT ensembles with biased potential energies. Secondly, samples from umbrella sampling are reweighed to compute the PMF. Simulations in umbrella sampling have different biasing potential energies. They are viewed as different thermodynamic states. Therefore, we have :math:`M` states and samples from these states. As shown in Usage, we can use FastMBAR to compute the relative free energies of these :math:`M` states. In order to do it, we need to compute the reduced energy matrix :math:`A_{M,N}` as shown in Fig. 1, where :math:`U(x)` is the potential energy function; :math:`B_k(x)` is the biasing potential added in the :math:`i` th state. In this case, biasing potentials are added to dihedral :math:`\psi` (4-6-8-14) and dihedral :math:`\phi` (6-8,14,16). :math:`B_k(x) = 0.5*k_{\psi}*\Delta\psi^2 + 0.5*k_{\phi}*\Delta\phi^2`, where :math:`\Delta\psi = min(|\psi(x) - \psi^0_i|, 2\pi - |\psi(x) - \psi^0_i|)`, :math:`\Delta\phi = min(|\phi(x) - \phi^0_j|, 2\pi - |\phi(x) - \phi^0_j|)` where :math:`\psi(x)` and :math:`\phi(x)` are the dihedrals (4-6-8-14 and 6-8-14-16) calculated based on Cartesian coordinates :math:`x`; :math:`\psi^0_i` is :math:`i` th equilibrium torsion for :math:`\psi` used in umbrella sampling; :math:`\phi^0_j` is :math:`j` th equilibrium torsion for :math:`\phi` used inf umbrella sampling. We cam compute :math:`i` and :math:`j` based on :math:`k = i*m + j, M = m*m`. .. image:: ../../examples/dialanine/data/Fig_1.png Compared to general cases, the reduced potential energy matrix :math:`A_{M,N}` in umbrella sampling has a special property. The energy functions of the :math:`M` states are :math:`U(x) + B_k(x)`. They all have the common component :math:`U(x)`. Removing the common component :math:`U(x)` from the energy matrix :math:`A_{M,N}` does not affect the relative free energies of the :math:`M` states. Therefore, we can omitting computing :math:`U(x)` when compute the energy matrix :math:`A_{M,N}`, as shown in Fig. 2 .. image:: ../../examples/dialanine/data/Fig_2.png As shown in Fig. 2, we can compute the reduced energy matrix :math:`A_{M,N}` just based on dihedral values from umbrella sampling. In the following script, we read the dihedral values and compute the reduced energy matrix :math:`A_{M,N}`. Based on the reduced energy matrix and the number of conformations sampled from each state, we can compute the relative free energies of the :math:`M` states using FastMBAR. .. code-block:: python ## compute energy matrix A T = 298.15 * unit.kelvin kbT = unit.BOLTZMANN_CONSTANT_kB * 298.15 * unit.kelvin * unit.AVOGADRO_CONSTANT_NA kbT = kbT.value_in_unit(unit.kilojoule_per_mole) n = psi_array.shape[1] A = np.zeros((M, n*M)) psi_array = np.reshape(psi_array, (-1,)) phi_array = np.reshape(phi_array, (-1,)) for index in range(M): psi_index = index // m phi_index = index % m psi_c = psi[psi_index] phi_c = phi[phi_index] psi_diff = np.abs(psi_array - psi_c) psi_diff = np.minimum(psi_diff, 2*math.pi-psi_diff) phi_diff = np.abs(phi_array - phi_c) phi_diff = np.minimum(phi_diff, 2*math.pi-phi_diff) A[index, :] = 0.5*K*(psi_diff**2 + phi_diff**2)/kbT ## solve MBAR equations num_conf_all = np.array([n for i in range(M)]) fastmbar = FastMBAR(energy = A, num_conf = num_conf_all, cuda = False, verbose = True) Now we are ready to compute the PMF. Solving MBAR equations yields the relative free energies of the :math:`M` states, all of which have biasing potential energies. Knowing the relative free energies of the :math:`M` states enables us to compute the PMF using an easy reweighing procedure. In order to do that, we need to compute the energy matrix :math:`B_{L,N}` as shown in Fig. 1 and Fig. 2. To represent the PMF of the dihedral, we split the dihedral range, :math:`[-\pi, \pi]` into :math:`l` windows for both :math:`\psi` and :math:`\phi`: :math:`[\psi_{i-1}, \psi_i]` for :math:`i = 0, ..., l-1` and :math:`[\psi_{j-1}, \psi_j]` for :math:`j = 0, ..., l-1` Then we can represent the PMF by computing the relative free energies of these :math:`L` states each of which has a potential energy of :math:`U(x)`. Because the :math:`k` th state is constrained in the dihedral range :math:`[\psi_{i-1}, \psi_i]` and :math:`[\phi_{j-1}, \phi_j]`, where :math:`k = i*l + j`. we need to add a biasing potential :math:`R_k(\theta)` to enforce the constraint. The value of the biasing potential :math:`R_k(\theta = (\psi, \phi))` is 0 when :math:`\psi \in [\psi_{i-1}, \psi_i]` and :math:`\phi \in [\phi_{j-1}, \psi_j]`, where :math:`k = i*l + j`. The value of the biasing potential :math:`R_k(\theta = (\psi, \phi))` is infinity otherwise. .. code-block:: python ## compute the reduced energy matrix B l_PMF = 25 L_PMF = l_PMF * l_PMF psi_PMF = np.linspace(-math.pi, math.pi, l_PMF, endpoint = False) phi_PMF = np.linspace(-math.pi, math.pi, l_PMF, endpoint = False) width = 2*math.pi / l_PMF B = np.zeros((L_PMF, A.shape[1])) for index in range(L_PMF): psi_index = index // l_PMF phi_index = index % l_PMF psi_c_PMF = psi_PMF[psi_index] phi_c_PMF = phi_PMF[phi_index] psi_low = psi_c_PMF - 0.5*width psi_high = psi_c_PMF + 0.5*width phi_low = phi_c_PMF - 0.5*width phi_high = phi_c_PMF + 0.5*width psi_indicator = ((psi_array > psi_low) & (psi_array <= psi_high)) | \ ((psi_array + 2*math.pi > psi_low) & (psi_array + 2*math.pi <= psi_high)) | \ ((psi_array - 2*math.pi > psi_low) & (psi_array - 2*math.pi <= psi_high)) phi_indicator = ((phi_array > phi_low) & (phi_array <= phi_high)) | \ ((phi_array + 2*math.pi > phi_low) & (phi_array + 2*math.pi <= phi_high)) | \ ((phi_array - 2*math.pi > phi_low) & (phi_array - 2*math.pi <= phi_high)) indicator = psi_indicator & phi_indicator B[index, ~indicator] = np.inf ## compute PMF using the energy matrix B results = fastmbar.calculate_free_energies_of_perturbed_states(B) PMF = results['F'] ## plot the PMF fig = plt.figure(0) fig.clf() plt.imshow(np.flipud(PMF.reshape((l_PMF, l_PMF)).T), extent = (-180, 180, -180, 180)) plt.xlabel(r"$\psi$") plt.ylabel(r"$\phi$") plt.colorbar() plt.savefig("./output/PMF_fastmbar.pdf") The PMF saved in the file ``./output/PMF_fastmbar.pdf`` should be like the following PMF: .. image:: ../../examples/dialanine/data/PMF.png
PypiClean
/Copreus-0.4.0.tar.gz/Copreus-0.4.0/copreus/schema/dht.py
import copreus.schema.adriver as adriver import copreus.schema.apolling as apolling import copreus.schema.calibratedvalue as calibratedvalue import copreus.schema.valuerange as valuerange def _add_schema_part(schema, schema_part): schema["driver"]["required"].extend(schema_part["required"]) schema["driver"]["properties"].update(schema_part["properties"]) def get_schema(): driver_specific_properties = { "pin": { "description": "gpio @ raspberry.", "type": "integer" }, "sensor-type": { "description": "DHT11, DHT22, AM2302", "type": "string", "enum": ["DHT11", "DHT22", "AM2302"] }, "event-pin": { "description": "input pin trigger for poll_now (optional)", "type": "object", "properties": { "pin": { "description": "gpio pin", "type": "integer" }, "flank": { "description": "trigger poll_now on which flank of the pin signal", "type": "string", "enum": ["rising", "falling", "both"] }, "topics-pub": { "description": "list of topics that the driver publishes to.", "type": "object", "properties": { "button_pressed": { "description": "mqtt-translations.button_pressed", "type": "string" }, "button_state": { "description": "mqtt-translations.button_state-open and mqtt-translations.button_state-closed", "type": "string" } }, "required": [ "button_pressed", "button_state" ], "additionalItems": False }, "mqtt-translations": { "description": "what are the commands/states-values that are transmitted via mqtt", "type": "object", "properties": { "button_pressed": { "description": "value for button pressed", "type": "string" }, "button_state-open": { "description": "value for button state", "type": "string" }, "button_state-closed": { "description": "value for button state", "type": "string" } }, "required": [ "button_pressed", "button_state-open", "button_state-closed" ], "additionalItems": False }, }, "required": ["pin", "flank"], "additionalProperties": False } } topics_pub = { "temperature": "raw temperature", "humidity": "raw humidity" } apolling_schema_parts, topics_sub, mqtt_translations = apolling.get_schema_parts() schema = adriver.get_schema("dht", driver_specific_properties, topics_pub, topics_sub, mqtt_translations) schema["driver"]["required"].remove("event-pin") _add_schema_part(schema, apolling_schema_parts) _add_schema_part(schema, calibratedvalue.get_schema_parts("calibration-humidity")) _add_schema_part(schema, calibratedvalue.get_schema_parts("calibration-temperature")) _add_schema_part(schema, valuerange.get_schema_parts("valuerange-humidity")) _add_schema_part(schema, valuerange.get_schema_parts("valuerange-temperature")) return schema
PypiClean
/EventDriven-0.1.3.tar.gz/EventDriven-0.1.3/eventdriven/adapter/timer.py
from .base import AbstractAdapter from threading import Thread, Event __all__ = ['Timer', 'EVT_DRI_TIMING'] EVT_DRI_TIMING = '|EVT|TIMING|' class Timer(AbstractAdapter): """ 定时产生信号。 :public method set_timing: 设置定时器的定时间隔。 suspend: 挂起定时器的事件产生。 resume: 恢复挂起的定时器。 """ def __init__(self, interval=None, toggle=EVT_DRI_TIMING): """ :param interval : 定时信号发生器事件间隔。 toggle : 定时发生的事件,默认EVT_DRI_TIMING """ self.__timer_thread = None self._interval = interval self._close_evt = Event() self._no_suspend = Event() self._no_suspend.set() self.__toggle = toggle def set_timing(self, interval=None): """ 设置定时器信号发生间隔。单位秒 设置为None即关闭定时器。 若控制器已经启动了,修改间隔会在下一次信号发生前生效。 """ self._interval = interval if not interval: self._close_evt.set() elif self._parent.is_alive() and (not self.__timer_thread or not self.__timer_thread.is_alive()): self.__run__() def join(self): """ 等待线程结束。""" if self.__timer_thread and self.__timer_thread.is_alive(): self.__timer_thread.join() def __run__(self): if self._interval: thread = Thread(target=self.__timing_thread, name=str(self._parent.name) + '-timer') self.__timer_thread = thread thread.start() def __timing_thread(self): """ 定时器信号发生线程。 """ while True: self._no_suspend.wait() interval = self._interval # 避免无限阻塞,屏蔽掉值为None的间隔,而改为关闭定时器。 if interval is None: break try: if self._close_evt.wait(interval): # 定时器事件被设置说明是要关闭定时器。 self._close_evt.clear() break # 发送定时信号 self._parent.dispatch(self.__toggle, interval) except TypeError: break def __suspend__(self): self._no_suspend.clear() suspend = __suspend__ def __resume__(self): self._no_suspend.set() resume = __resume__ def __closing__(self): self._no_suspend.set() self._close_evt.set()
PypiClean
/Flask-Helper-2.0.8.tar.gz/Flask-Helper-2.0.8/flask_helper/sessions.py
from itsdangerous import BadSignature try: from flask.helpers import total_seconds except ImportError: total_seconds = None from flask.sessions import SecureCookieSessionInterface from flask import g __author__ = 'meisa' class SecureCookieSessionInterface2(SecureCookieSessionInterface): def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(self.get_cookie_name(app)) header_cookie = request.headers.get("X-COOKIE-%s" % self.get_cookie_name(app).upper()) if not val and not header_cookie: return self.session_class() if hasattr(app.permanent_session_lifetime, 'total_seconds'): max_age = int(app.permanent_session_lifetime.total_seconds()) else: max_age = total_seconds(app.permanent_session_lifetime) session_data = dict() if val is not None: try: data = s.loads(val, max_age=max_age) session_data.update(data) except BadSignature: return self.session_class() if header_cookie is not None: try: data2 = s.loads(header_cookie, max_age=max_age) session_data.update(data2) except BadSignature: pass sc = self.session_class(session_data) return sc def get_expiration_time(self, app, session): if "permanent_session" in session and session["permanent_session"] is True: return None if "permanent_session" in g and g.permanent_session is True: return None SecureCookieSessionInterface.get_expiration_time(self, app, session) def save_session(self, app, session, response): SecureCookieSessionInterface.save_session(self, app, session, response) # val = self.get_signing_serializer(app).dumps(dict(session)) # response.headers["X-COOKIE-%s" % app.session_cookie_name.upper()] = val
PypiClean
/Dovetail-1.0beta2.tar.gz/Dovetail-1.0beta2/dovetail/util/logging.py
"""Logging and stdout formatting""" import sys from dovetail.util.utilities import enum from datetime import datetime # pylint: disable-msg=W0212 # Classes in this module are specifically 'friends' and may access internal # members for efficiency LEVEL = enum(DEBUG=0, INFO=1, MAJOR=2, WARN=3, ERROR=4) """Enumeration of different logging levels: 0. DEBUG 1. INFO 2. MAJOR 3. WARN 4. ERROR """ LEVEL_PREFIX = [' ', '.', '>', '!', '#'] class Message(object): """A log message capturing a piece of information about the execution of Dovetail. Attributes: +--------------+----------------------+------------------------------------------+ | Attribute | Type | Description | +==============+======================+==========================================+ | message | string | The line captured from the logging | | | | system | +--------------+----------------------+------------------------------------------+ | level | Enum from | The log level | | | :data:`LEVEL` | | +--------------+----------------------+------------------------------------------+ | when | :class:`datetime.\ | When the message was received | | | datetime` | | +--------------+----------------------+------------------------------------------+ .. note:: The overall reporting level is changed by calling Logging.setLevel().""" def __init__(self, message, level=LEVEL.INFO): assert LEVEL.DEBUG <= level <= LEVEL.ERROR assert message is not None assert isinstance(message, basestring) self.message = message self.level = level self.when = datetime.now() def shown(self): """Returns True if this message should be shown (its level >= logging level)""" return Logger.show(self.level) def __str__(self): return "{0} {1}".format(self.when.isoformat(), self.message) class StdErr(object): """A log message which was captured from stderr""" def __init__(self, message): self.message = str(message).rstrip() def shown(self): """Returns True if the error should be shown, which is likely always True""" return Logger.show(LEVEL.ERROR) def __str__(self): return self.message class Logger(object): """Co-ordinates output from the execution of Dovetail. The global variable LEVEL contains an enumeration of the following log levels: * LEVEL.DEBUG - Debugging information. Very verbose * LEVEL.INTO - Default level * LEVEL.MAJOR - important messages and sys.stdout * LEVEL.WARN * LEVEL.ERROR - errors and sys.stderr The overall reporting level is changed by calling Message.setLevel()""" _level = LEVEL.MAJOR _out_raw = sys.stdout _out_nl = True # Keep track of terminating newlines _err_raw = sys.stderr _err_nl = True # Keep track of terminating newlines _captured = False _frame = None _indent = "" _nested = False @staticmethod def set_nested(nested): """Switch nesting of the log file on or off. Can only be called when Tasks are not executing""" if nested == Logger._nested: return assert Logger._captured assert Logger._frame is None Logger._nested = nested @staticmethod def set_level(level): """Sets the overall level of log output. This setting does not adjust what is *captured*, only what is reported""" if isinstance(level, basestring): level = LEVEL.lookup(level.upper()) Logger._level = level Logger.log("Setting log level to {0}".format(LEVEL.as_str(level)), level=LEVEL.INFO) @staticmethod def show(level): """Returns True if level >= logging level""" return level >= Logger._level @staticmethod def set_frame(frame): """Sets the Logger frame which is used to calculate the indent when nesting the logging output""" if Logger._nested: if frame is None: Logger.indent = "" else: Logger._indent = " " * frame.depth() Logger._frame = frame @staticmethod def _write_raw(out, indent, level, message): """Internal method to write output to stdout or stderr""" for line in message.splitlines(True): if indent and Logger._nested: assert (out is Logger._out_raw and level < LEVEL.ERROR) or \ (out is Logger._err_raw and level == LEVEL.ERROR) out.write(LEVEL_PREFIX[level]) out.write('|') out.write(Logger._indent) out.write(line) @staticmethod def ends_with_newline(message): """Return True if the message ends with a new line""" if not len(message): return False return message[-1] == "\n" @staticmethod def _write_stdout(level, message): """Internal method to write to stdout handling newlines""" assert level < LEVEL.ERROR Logger._write_raw(Logger._out_raw, Logger._out_nl, level, message) Logger._out_nl = Logger.ends_with_newline(message) @staticmethod def _write_stderr(message): """Internal method to write to stderr handling newlines""" Logger._write_raw(Logger._err_raw, Logger._err_nl, LEVEL.ERROR, message) Logger._err_nl = Logger.ends_with_newline(message) @staticmethod def flush(): """Flushes all output to the logging channels""" if Logger._captured: sys.stderr.flush() @staticmethod def log(message, level=LEVEL.INFO): """Logs a message to the currently executing frame""" if Logger._frame: Logger._frame.log(Message(message, level)) if LEVEL.ERROR > level >= Logger._level: Logger._write_stdout(level, message + "\n") elif level >= LEVEL.ERROR: Logger._write_stderr(message + "\n") @staticmethod def debug(message): """Logs a method at DEBUG level""" Logger.log(message, level=LEVEL.DEBUG) @staticmethod def major(message): """Logs a method at MAJOR level""" Logger.log(message, level=LEVEL.MAJOR) @staticmethod def warn(message): """Logs a method at ERROR level""" Logger.log(message, level=LEVEL.WARN) @staticmethod def error(message): """Logs a method at ERROR level""" Logger.log(message, level=LEVEL.ERROR) # pylint: disable-msg=R0201 # TeeOut is a duck-type which requires a write(data) method @staticmethod def _capture_std_out_and_err(): """This method replaces the sys.stdout and sys.stderr with variants that pipe the data to the Logger""" class TeeOut(object): """An object that assists the capture of stdout""" def __del__(self): if Logger: # When called, Logger can have already been destroyed sys.stdout = Logger._out_raw def write(self, data): """Writes the stdout data to the Logger at MAJOR level""" Logger._write_stdout(LEVEL.MAJOR, data) def flush(self): """Flushes output to the screen, and ensures all logging has been flushed from the internal Logger buffers""" Logger._out_raw.flush() class TeeErr(object): """An object that assists the capture of stdout""" def __init__(self): self.buffer = "" def __del__(self): if Logger: # When called, Logger can have already been destroyed sys.stderr = Logger._err_raw def write(self, data): """Writes the stderr data to the Logger at ERROR level""" Logger._write_stderr(data) if len(data) > 0: self.buffer += data if self.buffer[-1] == '\n': self.flush() def flush(self): """Flushes output to the screen, and ensures all logging has been flushed from the internal Logger buffers""" if len(self.buffer) > 0: if Logger._frame is not None: Logger._frame.log(StdErr(buffer)) self.buffer = "" Logger._err_raw.flush() # Start intercepting stdout and stderr Logger._captured = True sys.stdout = TeeOut() sys.stderr = TeeErr() Logger._capture_std_out_and_err()
PypiClean
/BPMN_RPA-7.1.2.tar.gz/BPMN_RPA-7.1.2/BPMN_RPA/Scripts/Set_Value.py
import json # The BPMN-RPA Set_Value module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # The BPMN-RPA Set_Value module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import uuid def value_to_variable(value: any, convert_to_list=False) -> any: """ Function for returning a value to the WorkflowEngine. :param value: Any value. :param convert_to_list: Optional. Indicator whether to try to convert the value to a List. :return: The original input value. """ if ',' in str(value) and isinstance(value, str) and convert_to_list: values = [] value = value.split(',') for v in value: values.append(v.strip()) value = values if isinstance(value, str) and value.startswith("{") and value.endswith("}") and not convert_to_list: value = json.loads(value) return value def json_to_object(json_string: str) -> any: """ Convert a JSON string to an Object. :param json_string: The JSON string to convert. :return: The Object from the JSON string. """ return json.loads(json_string) def object_to_json(object: any) -> str: """ Convert an Object to a JSON string. :param object: The Object to convert. :return: The JSON string. """ return json.dumps(object) def split_string_to_list(string: str, separator: str = " ", maxsplit: int = -1) -> list: """ Convert a string with a separator to a list. :param string: The string to convert. :param separator: Optional. Specifies the separator to use when splitting the string. By default any whitespace is a separator. :param maxsplit: Optional. Specifies how many splits to do. Default value is -1, which is "all occurrences". :return: A list created from the string. """ separator = separator.replace('\\n', '\n').replace('\\r', '\r').replace('\\t', '\t') return string.split(separator, maxsplit=int(maxsplit)) def increment_counter(counter: any, step: int = 1) -> any: """ Increment a counter variable by step (default = 1) :param counter: the variable to add step to :param step: the increase value :return: Any: the value after increase (int or float) """ # handle int and float if isinstance(counter, int) or isinstance(counter, float): newcounter = counter + step # handle string, might convert to int of float or might not elif isinstance(counter, str): try: newcounter = int(counter) + step except (ValueError, Exception): newcounter = 0 # handle all data types other than int, float and str else: newcounter = 0 return newcounter def create_unique_id(): """ Generate a unique ID :return: Unique ID as string """ return uuid.uuid4().hex def dummy_function(): """ Dummy function for testing purposes """ pass
PypiClean
/C-Rank-3.tar.gz/C-Rank-3/CRank/porterStemmer.py
import sys class PorterStemmer: def __init__(self): """The main part of the stemming algorithm starts here. b is a buffer holding a word to be stemmed. The letters are in b[k0], b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is readjusted downwards as the stemming progresses. Zero termination is not in fact used in the algorithm. Note that only lower case sequences are stemmed. Forcing to lower case should be done before stem(...) is called. """ self.b = "" # buffer for word to be stemmed self.k = 0 self.k0 = 0 self.j = 0 # j is a general offset into the string def cons(self, i): """cons(i) is TRUE <=> b[i] is a consonant.""" if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u': return 0 if self.b[i] == 'y': if i == self.k0: return 1 else: return (not self.cons(i - 1)) return 1 def m(self): """m() measures the number of consonant sequences between k0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 .... """ n = 0 i = self.k0 while 1: if i > self.j: return n if not self.cons(i): break i = i + 1 i = i + 1 while 1: while 1: if i > self.j: return n if self.cons(i): break i = i + 1 i = i + 1 n = n + 1 while 1: if i > self.j: return n if not self.cons(i): break i = i + 1 i = i + 1 def vowelinstem(self): """vowelinstem() is TRUE <=> k0,...j contains a vowel""" for i in range(self.k0, self.j + 1): if not self.cons(i): return 1 return 0 def doublec(self, j): """doublec(j) is TRUE <=> j,(j-1) contain a double consonant.""" if j < (self.k0 + 1): return 0 if (self.b[j] != self.b[j-1]): return 0 return self.cons(j) def cvc(self, i): """cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. """ if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2): return 0 ch = self.b[i] if ch == 'w' or ch == 'x' or ch == 'y': return 0 return 1 def ends(self, s): """ends(s) is TRUE <=> k0,...k ends with the string s.""" length = len(s) if s[length - 1] != self.b[self.k]: # tiny speed-up return 0 if length > (self.k - self.k0 + 1): return 0 if self.b[self.k-length+1:self.k+1] != s: return 0 self.j = self.k - length return 1 def setto(self, s): """setto(s) sets (j+1),...k to the characters in the string s, readjusting k.""" length = len(s) self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:] self.k = self.j + length def r(self, s): """r(s) is used further down.""" if self.m() > 0: self.setto(s) def step1ab(self): """step1ab() gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet """ if self.b[self.k] == 's': if self.ends("sses"): self.k = self.k - 2 elif self.ends("ies"): self.setto("i") elif self.b[self.k - 1] != 's': self.k = self.k - 1 if self.ends("eed"): if self.m() > 0: self.k = self.k - 1 elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem(): self.k = self.j if self.ends("at"): self.setto("ate") elif self.ends("bl"): self.setto("ble") elif self.ends("iz"): self.setto("ize") elif self.doublec(self.k): self.k = self.k - 1 ch = self.b[self.k] if ch == 'l' or ch == 's' or ch == 'z': self.k = self.k + 1 elif (self.m() == 1 and self.cvc(self.k)): self.setto("e") def step1c(self): """step1c() turns terminal y to i when there is another vowel in the stem.""" if (self.ends("y") and self.vowelinstem()): self.b = self.b[:self.k] + 'i' + self.b[self.k+1:] def step2(self): """step2() maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m() > 0. """ if self.b[self.k - 1] == 'a': if self.ends("ational"): self.r("ate") elif self.ends("tional"): self.r("tion") elif self.b[self.k - 1] == 'c': if self.ends("enci"): self.r("ence") elif self.ends("anci"): self.r("ance") elif self.b[self.k - 1] == 'e': if self.ends("izer"): self.r("ize") elif self.b[self.k - 1] == 'l': if self.ends("bli"): self.r("ble") # --DEPARTURE-- # To match the published algorithm, replace this phrase with # if self.ends("abli"): self.r("able") elif self.ends("alli"): self.r("al") elif self.ends("entli"): self.r("ent") elif self.ends("eli"): self.r("e") elif self.ends("ousli"): self.r("ous") elif self.b[self.k - 1] == 'o': if self.ends("ization"): self.r("ize") elif self.ends("ation"): self.r("ate") elif self.ends("ator"): self.r("ate") elif self.b[self.k - 1] == 's': if self.ends("alism"): self.r("al") elif self.ends("iveness"): self.r("ive") elif self.ends("fulness"): self.r("ful") elif self.ends("ousness"): self.r("ous") elif self.b[self.k - 1] == 't': if self.ends("aliti"): self.r("al") elif self.ends("iviti"): self.r("ive") elif self.ends("biliti"): self.r("ble") elif self.b[self.k - 1] == 'g': # --DEPARTURE-- if self.ends("logi"): self.r("log") # To match the published algorithm, delete this phrase def step3(self): """step3() dels with -ic-, -full, -ness etc. similar strategy to step2.""" if self.b[self.k] == 'e': if self.ends("icate"): self.r("ic") elif self.ends("ative"): self.r("") elif self.ends("alize"): self.r("al") elif self.b[self.k] == 'i': if self.ends("iciti"): self.r("ic") elif self.b[self.k] == 'l': if self.ends("ical"): self.r("ic") elif self.ends("ful"): self.r("") elif self.b[self.k] == 's': if self.ends("ness"): self.r("") def step4(self): """step4() takes off -ant, -ence etc., in context <c>vcvc<v>.""" if self.b[self.k - 1] == 'a': if self.ends("al"): pass else: return elif self.b[self.k - 1] == 'c': if self.ends("ance"): pass elif self.ends("ence"): pass else: return elif self.b[self.k - 1] == 'e': if self.ends("er"): pass else: return elif self.b[self.k - 1] == 'i': if self.ends("ic"): pass else: return elif self.b[self.k - 1] == 'l': if self.ends("able"): pass elif self.ends("ible"): pass else: return elif self.b[self.k - 1] == 'n': if self.ends("ant"): pass elif self.ends("ement"): pass elif self.ends("ment"): pass elif self.ends("ent"): pass else: return elif self.b[self.k - 1] == 'o': if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass elif self.ends("ou"): pass # takes care of -ous else: return elif self.b[self.k - 1] == 's': if self.ends("ism"): pass else: return elif self.b[self.k - 1] == 't': if self.ends("ate"): pass elif self.ends("iti"): pass else: return elif self.b[self.k - 1] == 'u': if self.ends("ous"): pass else: return elif self.b[self.k - 1] == 'v': if self.ends("ive"): pass else: return elif self.b[self.k - 1] == 'z': if self.ends("ize"): pass else: return else: return if self.m() > 1: self.k = self.j def step5(self): """step5() removes a final -e if m() > 1, and changes -ll to -l if m() > 1. """ self.j = self.k if self.b[self.k] == 'e': a = self.m() if a > 1 or (a == 1 and not self.cvc(self.k-1)): self.k = self.k - 1 if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: self.k = self.k -1 def stem(self, p, i, j): """In stem(p,i,j), p is a char pointer, and the string to be stemmed is from p[i] to p[j] inclusive. Typically i is zero and j is the offset to the last character of a string, (p[j+1] == '\0'). The stemmer adjusts the characters p[i] ... p[j] and returns the new end-point of the string, k. Stemming never increases word length, so i <= k <= j. To turn the stemmer into a module, declare 'stem' as extern, and delete the remainder of this file. """ # copy the parameters into statics self.b = p self.k = j self.k0 = i if self.k <= self.k0 + 1: return self.b # --DEPARTURE-- # With this line, strings of length 1 or 2 don't go through the # stemming process, although no mention is made of this in the # published algorithm. Remove the line to match the published # algorithm. self.step1ab() self.step1c() self.step2() self.step3() self.step4() self.step5() return self.b[self.k0:self.k+1]
PypiClean
/DendroPy-4.6.1.tar.gz/DendroPy-4.6.1/src/dendropy/utility/vcsinfo.py
############################################################################## ## DendroPy Phylogenetic Computing Library. ## ## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder. ## All rights reserved. ## ## See "LICENSE.rst" for terms and conditions of usage. ## ## If you use this work or any portion thereof in published work, ## please cite it as: ## ## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library ## for phylogenetic computing. Bioinformatics 26: 1569-1571. ## ############################################################################## """ Wraps up source version control system system information. """ import os import sys import subprocess import datetime from dendropy.utility import textprocessing from dendropy.utility import processio class Revision(object): """ Provides (Git) version control information about a project. """ class VcsUnavailable(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class NonRepositoryError(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class NonBranchException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class UntaggedException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) def __init__(self, repo_path, release=None, vcs_app_path='git'): self.vcs_app_path = vcs_app_path self.repo_path = repo_path self.release = release self._commit_id = None self._commit_date = None self._commit_tag = None self._branch_name = None self._description = None self._long_description = None self._is_available = None def __str__(self): parts = [] if self.branch_name: parts.append("%s-" % self.branch_name) if self.commit_id: parts.append(self.commit_id[:10]) if self.commit_date: parts.append(", %s" % str(self.commit_date)) if parts: return "".join(parts) else: return "" def __repr__(self): return "<%s: '%s'>" % (self.__class__.__name__, self.__str__()) @property def commit_id(self): if self._commit_id is None: self.update() return self._commit_id @property def commit_date(self): if self._commit_date is None: self.update() return self._commit_date @property def commit_tag(self): if self._commit_tag is None: self.update() return self._commit_tag @property def branch_name(self): if self._branch_name is None: self.update() return self._branch_name @property def description(self): if self._description is None: self.update() return self._description @property def long_description(self): if self._long_description is None: self.update() return self._long_description @property def is_available(self): if self._is_available is None: self.update() return self._is_available def update(self, repo_path=None): if repo_path is not None: self.repo_path = repo_path if not self.repo_path or not self._vcs_available(): self._commit_id = None self._commit_date = None self._commit_tag = None self._branch_name = None self._description = None self._long_description = None self._is_available = False return self._commit_id = self.get_commit_id() self._commit_date = self.get_datetime() self._commit_tag = self.get_commit_tag() self._branch_name = self.get_branch_name() self._description = self.get_description() self._long_description = self._build_long_description() self._is_available = True def _run_vcs(self, cmd): if textprocessing.is_str_type(cmd): cmd = self.vcs_app_path + " " + cmd else: cmd.insert(0, self.vcs_app_path) try: p = subprocess.Popen(cmd, shell=True, cwd=os.path.abspath(self.repo_path), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = processio.communicate(p) retcode = p.returncode except OSError as e: return -999, "", str(e) return retcode, stdout, stderr def _vcs_available(self): retcode, stdout, stderr = self._run_vcs("--version") if retcode != 0: return False retcode, stdout, stderr = self._run_vcs("status") if "fatal: Not a git repository" in stderr: return False return True def get_commit_id(self): # cmd = "show --quiet --pretty=format:'%H' HEAD" cmd = "rev-parse --short HEAD" retcode, stdout, stderr = self._run_vcs(cmd) return stdout.replace('\n', '') def get_datetime(self): cmd = "show --quiet --pretty=format:'%at' HEAD" retcode, stdout, stderr = self._run_vcs(cmd) if stdout: try: return datetime.datetime.fromtimestamp(float(stdout.replace('\n', '').replace("'", "").replace('"',''))) except ValueError: return None else: return None def get_commit_tag(self): cmd = "name-rev --name-only --tags --no-undefined HEAD" retcode, stdout, stderr = self._run_vcs(cmd) if "fatal: cannot describe" in stderr: return None else: return stdout.strip('\n') def get_branch_name(self): # git name-rev --name-only HEAD cmd = "symbolic-ref HEAD" retcode, stdout, stderr = self._run_vcs(cmd) if retcode != 0: if "fatal: ref HEAD is not a symbolic ref" in stderr: return "(no branch)" else: return None else: return stdout.replace('\n', '').split('/')[-1] def get_description(self): cmd = "describe --tags --long --always --abbrev=12" retcode, stdout, stderr = self._run_vcs(cmd) if retcode != 0: if "fatal: No names found, cannot describe anything." in stderr: return "(unnamed)" else: return None else: return stdout.replace('\n', '') def get_long_description(self): parts = [] if self.commit_id: parts.append(self.commit_id) if self.branch_name: parts.append("on branch '%s'" % self.branch_name) if self.commit_date: parts.append("committed on %s" % str(self.commit_date)) if parts: return ", ".join(parts) else: return None def _build_long_description(self): parts = [] if self._commit_id: parts.append(self._commit_id) if self._branch_name: parts.append("on branch '%s'" % self._branch_name) if self._commit_date: parts.append("committed on %s" % str(self._commit_date)) if parts: return ", ".join(parts) else: return None
PypiClean
/IPFX-1.0.8.tar.gz/IPFX-1.0.8/ipfx/bin/run_feature_extraction.py
import logging import argschema as ags import ipfx.error as er import ipfx.data_set_features as dsft from ipfx.stimulus import StimulusOntology from ipfx._schemas import FeatureExtractionParameters from ipfx.dataset.create import create_ephys_data_set import ipfx.sweep_props as sp import allensdk.core.json_utilities as ju from ipfx.nwb_append import append_spike_times import ipfx.plot_qc_figures as plotqc import ipfx.logging_utils as lu def collect_spike_times(sweep_features): spikes = {} for sweep_num in sweep_features: spike_features = sweep_features[sweep_num]['spikes'] spike_times = [ s['threshold_t'] for s in spike_features ] spikes[sweep_num] = spike_times return spikes def run_feature_extraction(input_nwb_file, stimulus_ontology_file, output_nwb_file, qc_fig_dir, sweep_info, cell_info, write_spikes=True): lu.log_pretty_header("Extract ephys features", level=1) sp.drop_failed_sweeps(sweep_info) if len(sweep_info) == 0: raise er.FeatureError("There are no QC-passed sweeps available to analyze") if not stimulus_ontology_file: stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE logging.info(F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}") ont = StimulusOntology(ju.read(stimulus_ontology_file)) data_set = create_ephys_data_set(sweep_info=sweep_info, nwb_file=input_nwb_file, ontology=ont) (cell_features, sweep_features, cell_record, sweep_records, cell_state, feature_states) = dsft.extract_data_set_features(data_set) if cell_state['failed_fx']: feature_data = {'cell_state': cell_state} else: if cell_info: cell_record.update(cell_info) feature_data = {'cell_features': cell_features, 'sweep_features': sweep_features, 'cell_record': cell_record, 'sweep_records': sweep_records, 'cell_state': cell_state, 'feature_states': feature_states } if write_spikes: if not feature_states['sweep_features_state']['failed_fx']: sweep_spike_times = collect_spike_times(sweep_features) append_spike_times(input_nwb_file, sweep_spike_times, output_nwb_path=output_nwb_file) else: logging.warn("extract_sweep_features failed, " "unable to write spikes") if qc_fig_dir is None: logging.info("qc_fig_dir is not provided, will not save figures") else: plotqc.display_features(qc_fig_dir, data_set, feature_data) # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing. # Thus, we are converting them here: feature_data["sweep_features"] = {str(k): v for k, v in feature_data["sweep_features"].items()} return feature_data def main(): """ Usage: python run_feature_extraction.py --input_json INPUT_JSON --output_json OUTPUT_JSON """ module = ags.ArgSchemaParser(schema_type=FeatureExtractionParameters) feature_data = run_feature_extraction(module.args["input_nwb_file"], module.args.get("stimulus_ontology_file", None), module.args["output_nwb_file"], module.args.get("qc_fig_dir", None), module.args["sweep_features"], module.args["cell_features"]) ju.write(module.args["output_json"], feature_data) if __name__ == "__main__": main()
PypiClean
/OASYS1-WOFRY-1.0.41.tar.gz/OASYS1-WOFRY-1.0.41/orangecontrib/wofry/widgets/beamline_elements/ow_double_slit.py
from orangecontrib.wofry.widgets.gui.ow_optical_element import OWWOOpticalElementWithDoubleBoundaryShape from syned.beamline.optical_elements.absorbers.slit import Slit from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit class OWWODoubleSlit(OWWOOpticalElementWithDoubleBoundaryShape): name = "DoubleSlit" description = "Wofry: DoubleSlit" icon = "icons/double_slit.png" priority = 42 def __init__(self): super().__init__() def get_optical_element(self): return WOSlit(name=self.oe_name,boundary_shape=self.get_boundary_shape()) def check_syned_instance(self, optical_element): if not isinstance(optical_element, Slit): raise Exception("Syned Data not correct: Optical Element is not a Slit") if __name__ == "__main__": import sys from PyQt5.QtWidgets import QApplication def get_example_wofry_data(): from wofryimpl.propagator.light_source import WOLightSource from wofryimpl.beamline.beamline import WOBeamline from orangecontrib.wofry.util.wofry_objects import WofryData light_source = WOLightSource(dimension=2, initialize_from=0, range_from_h=-0.0003, range_to_h=0.0003, range_from_v=-0.0001, range_to_v=0.0001, number_of_points_h=1000, number_of_points_v=500, energy=10000.0, ) return WofryData(wavefront=light_source.get_wavefront(), beamline=WOBeamline(light_source=light_source)) a = QApplication(sys.argv) ow = OWWODoubleSlit() ow.set_input(get_example_wofry_data()) ow.horizontal_shift = -50e-6 ow.vertical_shift = -25e-6 ow.width = 5e-6 ow.height = 5e-6 ow.radius = 5e-6 ow.min_ax = 5e-6 ow.maj_ax = 5e-6 # the same for patch 2 ow.horizontal_shift2 = 50e-6 ow.vertical_shift2 = 25e-6 ow.width2 = 5e-6 ow.height2 = 5e-6 ow.radius2 = 5e-6 ow.min_ax2 = 5e-6 ow.maj_ax2 = 5e-6 ow.show() a.exec_() ow.saveSettings()
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/bs-stepper/js/bs-stepper.js
(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : typeof define === 'function' && define.amd ? define(factory) : (global = global || self, global.Stepper = factory()); }(this, function () { 'use strict'; function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } var matches = window.Element.prototype.matches; var closest = function closest(element, selector) { return element.closest(selector); }; var WinEvent = function WinEvent(inType, params) { return new window.Event(inType, params); }; var createCustomEvent = function createCustomEvent(eventName, params) { var cEvent = new window.CustomEvent(eventName, params); return cEvent; }; /* istanbul ignore next */ function polyfill() { if (!window.Element.prototype.matches) { matches = window.Element.prototype.msMatchesSelector || window.Element.prototype.webkitMatchesSelector; } if (!window.Element.prototype.closest) { closest = function closest(element, selector) { if (!document.documentElement.contains(element)) { return null; } do { if (matches.call(element, selector)) { return element; } element = element.parentElement || element.parentNode; } while (element !== null && element.nodeType === 1); return null; }; } if (!window.Event || typeof window.Event !== 'function') { WinEvent = function WinEvent(inType, params) { params = params || {}; var e = document.createEvent('Event'); e.initEvent(inType, Boolean(params.bubbles), Boolean(params.cancelable)); return e; }; } if (typeof window.CustomEvent !== 'function') { var originPreventDefault = window.Event.prototype.preventDefault; createCustomEvent = function createCustomEvent(eventName, params) { var evt = document.createEvent('CustomEvent'); params = params || { bubbles: false, cancelable: false, detail: null }; evt.initCustomEvent(eventName, params.bubbles, params.cancelable, params.detail); evt.preventDefault = function () { if (!this.cancelable) { return; } originPreventDefault.call(this); Object.defineProperty(this, 'defaultPrevented', { get: function get() { return true; } }); }; return evt; }; } } polyfill(); var MILLISECONDS_MULTIPLIER = 1000; var ClassName = { ACTIVE: 'active', LINEAR: 'linear', BLOCK: 'dstepper-block', NONE: 'dstepper-none', FADE: 'fade', VERTICAL: 'vertical' }; var transitionEndEvent = 'transitionend'; var customProperty = 'bsStepper'; var show = function show(stepperNode, indexStep, options, done) { var stepper = stepperNode[customProperty]; if (stepper._steps[indexStep].classList.contains(ClassName.ACTIVE) || stepper._stepsContents[indexStep].classList.contains(ClassName.ACTIVE)) { return; } var showEvent = createCustomEvent('show.bs-stepper', { cancelable: true, detail: { from: stepper._currentIndex, to: indexStep, indexStep: indexStep } }); stepperNode.dispatchEvent(showEvent); var activeStep = stepper._steps.filter(function (step) { return step.classList.contains(ClassName.ACTIVE); }); var activeContent = stepper._stepsContents.filter(function (content) { return content.classList.contains(ClassName.ACTIVE); }); if (showEvent.defaultPrevented) { return; } if (activeStep.length) { activeStep[0].classList.remove(ClassName.ACTIVE); } if (activeContent.length) { activeContent[0].classList.remove(ClassName.ACTIVE); if (!stepperNode.classList.contains(ClassName.VERTICAL) && !stepper.options.animation) { activeContent[0].classList.remove(ClassName.BLOCK); } } showStep(stepperNode, stepper._steps[indexStep], stepper._steps, options); showContent(stepperNode, stepper._stepsContents[indexStep], stepper._stepsContents, activeContent, done); }; var showStep = function showStep(stepperNode, step, stepList, options) { stepList.forEach(function (step) { var trigger = step.querySelector(options.selectors.trigger); trigger.setAttribute('aria-selected', 'false'); // if stepper is in linear mode, set disabled attribute on the trigger if (stepperNode.classList.contains(ClassName.LINEAR)) { trigger.setAttribute('disabled', 'disabled'); } }); step.classList.add(ClassName.ACTIVE); var currentTrigger = step.querySelector(options.selectors.trigger); currentTrigger.setAttribute('aria-selected', 'true'); // if stepper is in linear mode, remove disabled attribute on current if (stepperNode.classList.contains(ClassName.LINEAR)) { currentTrigger.removeAttribute('disabled'); } }; var showContent = function showContent(stepperNode, content, contentList, activeContent, done) { var stepper = stepperNode[customProperty]; var toIndex = contentList.indexOf(content); var shownEvent = createCustomEvent('shown.bs-stepper', { cancelable: true, detail: { from: stepper._currentIndex, to: toIndex, indexStep: toIndex } }); function complete() { content.classList.add(ClassName.BLOCK); content.removeEventListener(transitionEndEvent, complete); stepperNode.dispatchEvent(shownEvent); done(); } if (content.classList.contains(ClassName.FADE)) { content.classList.remove(ClassName.NONE); var duration = getTransitionDurationFromElement(content); content.addEventListener(transitionEndEvent, complete); if (activeContent.length) { activeContent[0].classList.add(ClassName.NONE); } content.classList.add(ClassName.ACTIVE); emulateTransitionEnd(content, duration); } else { content.classList.add(ClassName.ACTIVE); content.classList.add(ClassName.BLOCK); stepperNode.dispatchEvent(shownEvent); done(); } }; var getTransitionDurationFromElement = function getTransitionDurationFromElement(element) { if (!element) { return 0; } // Get transition-duration of the element var transitionDuration = window.getComputedStyle(element).transitionDuration; var floatTransitionDuration = parseFloat(transitionDuration); // Return 0 if element or transition duration is not found if (!floatTransitionDuration) { return 0; } // If multiple durations are defined, take the first transitionDuration = transitionDuration.split(',')[0]; return parseFloat(transitionDuration) * MILLISECONDS_MULTIPLIER; }; var emulateTransitionEnd = function emulateTransitionEnd(element, duration) { var called = false; var durationPadding = 5; var emulatedDuration = duration + durationPadding; function listener() { called = true; element.removeEventListener(transitionEndEvent, listener); } element.addEventListener(transitionEndEvent, listener); window.setTimeout(function () { if (!called) { element.dispatchEvent(WinEvent(transitionEndEvent)); } element.removeEventListener(transitionEndEvent, listener); }, emulatedDuration); }; var detectAnimation = function detectAnimation(contentList, options) { if (options.animation) { contentList.forEach(function (content) { content.classList.add(ClassName.FADE); content.classList.add(ClassName.NONE); }); } }; var buildClickStepLinearListener = function buildClickStepLinearListener() { return function clickStepLinearListener(event) { event.preventDefault(); }; }; var buildClickStepNonLinearListener = function buildClickStepNonLinearListener(options) { return function clickStepNonLinearListener(event) { event.preventDefault(); var step = closest(event.target, options.selectors.steps); var stepperNode = closest(step, options.selectors.stepper); var stepper = stepperNode[customProperty]; var stepIndex = stepper._steps.indexOf(step); show(stepperNode, stepIndex, options, function () { stepper._currentIndex = stepIndex; }); }; }; var DEFAULT_OPTIONS = { linear: true, animation: false, selectors: { steps: '.step', trigger: '.step-trigger', stepper: '.bs-stepper' } }; var Stepper = /*#__PURE__*/ function () { function Stepper(element, _options) { var _this = this; if (_options === void 0) { _options = {}; } this._element = element; this._currentIndex = 0; this._stepsContents = []; this.options = _extends({}, DEFAULT_OPTIONS, {}, _options); this.options.selectors = _extends({}, DEFAULT_OPTIONS.selectors, {}, this.options.selectors); if (this.options.linear) { this._element.classList.add(ClassName.LINEAR); } this._steps = [].slice.call(this._element.querySelectorAll(this.options.selectors.steps)); this._steps.filter(function (step) { return step.hasAttribute('data-target'); }).forEach(function (step) { _this._stepsContents.push(_this._element.querySelector(step.getAttribute('data-target'))); }); detectAnimation(this._stepsContents, this.options); this._setLinkListeners(); Object.defineProperty(this._element, customProperty, { value: this, writable: true }); if (this._steps.length) { show(this._element, this._currentIndex, this.options, function () {}); } } // Private var _proto = Stepper.prototype; _proto._setLinkListeners = function _setLinkListeners() { var _this2 = this; this._steps.forEach(function (step) { var trigger = step.querySelector(_this2.options.selectors.trigger); if (_this2.options.linear) { _this2._clickStepLinearListener = buildClickStepLinearListener(_this2.options); trigger.addEventListener('click', _this2._clickStepLinearListener); } else { _this2._clickStepNonLinearListener = buildClickStepNonLinearListener(_this2.options); trigger.addEventListener('click', _this2._clickStepNonLinearListener); } }); } // Public ; _proto.next = function next() { var _this3 = this; var nextStep = this._currentIndex + 1 <= this._steps.length - 1 ? this._currentIndex + 1 : this._steps.length - 1; show(this._element, nextStep, this.options, function () { _this3._currentIndex = nextStep; }); }; _proto.previous = function previous() { var _this4 = this; var previousStep = this._currentIndex - 1 >= 0 ? this._currentIndex - 1 : 0; show(this._element, previousStep, this.options, function () { _this4._currentIndex = previousStep; }); }; _proto.to = function to(stepNumber) { var _this5 = this; var tempIndex = stepNumber - 1; var nextStep = tempIndex >= 0 && tempIndex < this._steps.length ? tempIndex : 0; show(this._element, nextStep, this.options, function () { _this5._currentIndex = nextStep; }); }; _proto.reset = function reset() { var _this6 = this; show(this._element, 0, this.options, function () { _this6._currentIndex = 0; }); }; _proto.destroy = function destroy() { var _this7 = this; this._steps.forEach(function (step) { var trigger = step.querySelector(_this7.options.selectors.trigger); if (_this7.options.linear) { trigger.removeEventListener('click', _this7._clickStepLinearListener); } else { trigger.removeEventListener('click', _this7._clickStepNonLinearListener); } }); this._element[customProperty] = undefined; this._element = undefined; this._currentIndex = undefined; this._steps = undefined; this._stepsContents = undefined; this._clickStepLinearListener = undefined; this._clickStepNonLinearListener = undefined; }; return Stepper; }(); return Stepper; })); //# sourceMappingURL=bs-stepper.js.map
PypiClean
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/tl/custom/inlinebuilder.py
import hashlib from .. import functions, types from ... import utils _TYPE_TO_MIMES = { "gif": ["image/gif"], # 'video/mp4' too, but that's used for video "article": ["text/html"], "audio": ["audio/mpeg"], "contact": [], "file": ["application/pdf", "application/zip"], # actually any "geo": [], "photo": ["image/jpeg"], "sticker": ["image/webp", "application/x-tgsticker"], "venue": [], "video": ["video/mp4"], # tdlib includes text/html for some reason "voice": ["audio/ogg"], } class InlineBuilder: """ Helper class to allow defining `InlineQuery <telethon.events.inlinequery.InlineQuery>` ``results``. Common arguments to all methods are explained here to avoid repetition: text (`str`, optional): If present, the user will send a text message with this text upon being clicked. link_preview (`bool`, optional): Whether to show a link preview in the sent text message or not. geo (:tl:`InputGeoPoint`, :tl:`GeoPoint`, :tl:`InputMediaVenue`, :tl:`MessageMediaVenue`, optional): If present, it may either be a geo point or a venue. period (int, optional): The period in seconds to be used for geo points. contact (:tl:`InputMediaContact`, :tl:`MessageMediaContact`, optional): If present, it must be the contact information to send. game (`bool`, optional): May be `True` to indicate that the game will be sent. buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`, optional): Same as ``buttons`` for `client.send_message() <telethon.client.messages.MessageMethods.send_message>`. parse_mode (`str`, optional): Same as ``parse_mode`` for `client.send_message() <telethon.client.messageparse.MessageParseMethods.parse_mode>`. id (`str`, optional): The string ID to use for this result. If not present, it will be the SHA256 hexadecimal digest of converting the created :tl:`InputBotInlineResult` with empty ID to ``bytes()``, so that the ID will be deterministic for the same input. .. note:: If two inputs are exactly the same, their IDs will be the same too. If you send two articles with the same ID, it will raise ``ResultIdDuplicateError``. Consider giving them an explicit ID if you need to send two results that are the same. """ def __init__(self, client): self._client = client # noinspection PyIncorrectDocstring async def article( self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): """ Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. Example: .. code-block:: python results = [ # Option with title and description sending a message. builder.article( title='First option', description='This is the first option', text='Text sent after clicking this option', ), # Option with title URL to be opened when clicked. builder.article( title='Second option', url='https://example.com', text='Text sent if the user clicks the option and not the URL', ), # Sending a message with buttons. # You can use a list or a list of lists to include more buttons. builder.article( title='Third option', text='Text sent with buttons below', buttons=Button.url('https://example.com'), ), ] """ # TODO Does 'article' work always? # article, photo, gif, mpeg4_gif, video, audio, # voice, document, location, venue, contact, game result = types.InputBotInlineResult( id=id or "", type="article", send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons, ), title=title, description=description, url=url, thumb=thumb, content=content, ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result # noinspection PyIncorrectDocstring async def photo( self, file, *, id=None, include_media=True, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): """ Creates a new inline result of photo type. Args: include_media (`bool`, optional): Whether the photo file used to display the result should be included in the message itself or not. By default, the photo is included, and the text parameter alters the caption. file (`obj`, optional): Same as ``file`` for `client.send_file() <telethon.client.uploads.UploadMethods.send_file>`. Example: .. code-block:: python results = [ # Sending just the photo when the user selects it. builder.photo('/path/to/photo.jpg'), # Including a caption with some in-memory photo. photo_bytesio = ... builder.photo( photo_bytesio, text='This will be the caption of the sent photo', ), # Sending just the message without including the photo. builder.photo( photo, text='This will be a normal text message', include_media=False, ), ] """ try: fh = utils.get_input_photo(file) except TypeError: _, media, _ = await self._client._file_to_media( file, allow_cache=True, as_image=True ) if isinstance(media, types.InputPhoto): fh = media else: r = await self._client( functions.messages.UploadMediaRequest( types.InputPeerSelf(), media=media ) ) fh = utils.get_input_photo(r.photo) result = types.InputBotInlineResultPhoto( id=id or "", type="photo", photo=fh, send_message=await self._message( text=text or "", parse_mode=parse_mode, link_preview=link_preview, media=include_media, geo=geo, period=period, contact=contact, game=game, buttons=buttons, ), ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result # noinspection PyIncorrectDocstring async def document( self, file, title=None, *, description=None, type=None, mime_type=None, attributes=None, force_document=False, voice_note=False, video_note=False, use_cache=True, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None, include_media=True ): """ Creates a new inline result of document type. `use_cache`, `mime_type`, `attributes`, `force_document`, `voice_note` and `video_note` are described in `client.send_file <telethon.client.uploads.UploadMethods.send_file>`. Args: file (`obj`): Same as ``file`` for `client.send_file() <telethon.client.uploads.UploadMethods.send_file>`. title (`str`, optional): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. type (`str`, optional): The type of the document. May be one of: article, audio, contact, file, geo, gif, photo, sticker, venue, video, voice. It will be automatically set if ``mime_type`` is specified, and default to ``'file'`` if no matching mime type is found. you may need to pass ``attributes`` in order to use ``type`` effectively. attributes (`list`, optional): Optional attributes that override the inferred ones, like :tl:`DocumentAttributeFilename` and so on. include_media (`bool`, optional): Whether the document file used to display the result should be included in the message itself or not. By default, the document is included, and the text parameter alters the caption. Example: .. code-block:: python results = [ # Sending just the file when the user selects it. builder.document('/path/to/file.pdf'), # Including a caption with some in-memory file. file_bytesio = ... builder.document( file_bytesio, text='This will be the caption of the sent file', ), # Sending just the message without including the file. builder.document( photo, text='This will be a normal text message', include_media=False, ), ] """ if type is None: if voice_note: type = "voice" elif mime_type: for ty, mimes in _TYPE_TO_MIMES.items(): for mime in mimes: if mime_type == mime: type = ty break if type is None: type = "file" try: fh = utils.get_input_document(file) except TypeError: _, media, _ = await self._client._file_to_media( file, mime_type=mime_type, attributes=attributes, force_document=force_document, voice_note=voice_note, video_note=video_note, allow_cache=use_cache, ) if isinstance(media, types.InputDocument): fh = media else: r = await self._client( functions.messages.UploadMediaRequest( types.InputPeerSelf(), media=media ) ) fh = utils.get_input_document(r.document) result = types.InputBotInlineResultDocument( id=id or "", type=type, document=fh, send_message=await self._message( # Empty string for text if there's media but text is None. # We may want to display a document but send text; however # default to sending the media (without text, i.e. stickers). text=text or "", parse_mode=parse_mode, link_preview=link_preview, media=include_media, geo=geo, period=period, contact=contact, game=game, buttons=buttons, ), title=title, description=description, ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result # noinspection PyIncorrectDocstring async def game( self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): """ Creates a new inline result of game type. Args: short_name (`str`): The short name of the game to use. """ result = types.InputBotInlineResultGame( id=id or "", short_name=short_name, send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons, ), ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result async def _message( self, *, text=None, parse_mode=(), link_preview=True, media=False, geo=None, period=60, contact=None, game=False, buttons=None ): # Empty strings are valid but false-y; if they're empty use dummy '\0' args = ("\0" if text == "" else text, geo, contact, game) if sum(1 for x in args if x is not None and x is not False) != 1: raise ValueError( "Must set exactly one of text, geo, contact or game (set {})".format( ", ".join( x[0] for x in zip("text geo contact game".split(), args) if x[1] ) or "none" ) ) markup = self._client.build_reply_markup(buttons, inline_only=True) if text is not None: text, msg_entities = await self._client._parse_message_text( text, parse_mode ) if media: # "MediaAuto" means it will use whatever media the inline # result itself has (stickers, photos, or documents), while # respecting the user's text (caption) and formatting. return types.InputBotInlineMessageMediaAuto( message=text, entities=msg_entities, reply_markup=markup ) else: return types.InputBotInlineMessageText( message=text, no_webpage=not link_preview, entities=msg_entities, reply_markup=markup, ) elif isinstance(geo, (types.InputGeoPoint, types.GeoPoint)): return types.InputBotInlineMessageMediaGeo( geo_point=utils.get_input_geo(geo), period=period, reply_markup=markup ) elif isinstance(geo, (types.InputMediaVenue, types.MessageMediaVenue)): if isinstance(geo, types.InputMediaVenue): geo_point = geo.geo_point else: geo_point = geo.geo return types.InputBotInlineMessageMediaVenue( geo_point=geo_point, title=geo.title, address=geo.address, provider=geo.provider, venue_id=geo.venue_id, venue_type=geo.venue_type, reply_markup=markup, ) elif isinstance(contact, (types.InputMediaContact, types.MessageMediaContact)): return types.InputBotInlineMessageMediaContact( phone_number=contact.phone_number, first_name=contact.first_name, last_name=contact.last_name, vcard=contact.vcard, reply_markup=markup, ) elif game: return types.InputBotInlineMessageGame(reply_markup=markup) else: raise ValueError("No text, game or valid geo or contact given")
PypiClean
/LineDream-0.3.0.tar.gz/LineDream-0.3.0/README.md
![](./example.svg) LineDream is a generative art library for Python. It is heavily influenced by P5 and Processing. However, it takes a more object oriented approach, with less global state in regards to styling and transformations. The current output target is SVG. As this provides a robust output set for vector graphics. There is not yet support for a draw loop - it is single frame output, but you could use a loop to simulate this. There are future plans to implement an OpenGL render window. LineDream library was originally created to make art for a pen plotter, however, the inner object structure could be applied to many different rendering platforms. Installation ------------ `pip install LineDream` Documentation ------------- https://linedream.marcrleonard.com/documentation/ Example ------- ```python import random from LineDream import Line, Canvas, Rectangle, Square, Ellipse, Point, Circle, CircleMath, TextLine Canvas.width=900 Canvas.height=500 Canvas.background_color='black' for pp in range(100): x = random.randint(0, Canvas.width) y = random.randint(0, 400) coords = (x,y) p = Point(*coords) p.stroke_color= 'white' c_size = 180 circle_center = Canvas.width/2, Canvas.height+c_size/2 c = Circle(*circle_center, 180) c.stroke_color='white' c = Circle(*circle_center, 200) c.stroke_color='white' c = Circle(*circle_center, 220) c.stroke_color='white' long=True for degrees in range(360,180,-10): dist_from_circle = 250 line_len = 40 if long: line_len = 100 long=False else: long=True d_x_s, d_y_s = CircleMath.distance_to_coords(degrees, dist_from_circle) x1 = circle_center[0] + d_x_s y1 = circle_center[1] + d_y_s d_x, d_y = CircleMath.distance_to_coords(degrees, dist_from_circle + line_len) x2 = circle_center[0] + d_x y2 = circle_center[1] + d_y Line([(x1,y1), (x2,y2)], stroke_color='white') # EXPERIMENTAL tt = TextLine('LineDream', kerning=10, stroke_color='white', stroke_width=2) tt.transform(100, 100) tt.scale(1.4) Canvas.save(f'example.svg') ``` Todos: ----- - Better document colors/opacity/styles for the SVG - Integrate TextPath with Hershey (initial implementation complete) - Add .scale() (partially implemented in some classes) - Add 'tag' notion for lines - Add `Group` to the example Internal -------- To push to PyPI run: ``` python setup.py update ```
PypiClean
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/rst/rst.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror"), require("../python/python"), require("../stex/stex"), require("../../addon/mode/overlay")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror", "../python/python", "../stex/stex", "../../addon/mode/overlay"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; CodeMirror.defineMode('rst', function (config, options) { var rx_strong = /^\*\*[^\*\s](?:[^\*]*[^\*\s])?\*\*/; var rx_emphasis = /^\*[^\*\s](?:[^\*]*[^\*\s])?\*/; var rx_literal = /^``[^`\s](?:[^`]*[^`\s])``/; var rx_number = /^(?:[\d]+(?:[\.,]\d+)*)/; var rx_positive = /^(?:\s\+[\d]+(?:[\.,]\d+)*)/; var rx_negative = /^(?:\s\-[\d]+(?:[\.,]\d+)*)/; var rx_uri_protocol = "[Hh][Tt][Tt][Pp][Ss]?://"; var rx_uri_domain = "(?:[\\d\\w.-]+)\\.(?:\\w{2,6})"; var rx_uri_path = "(?:/[\\d\\w\\#\\%\\&\\-\\.\\,\\/\\:\\=\\?\\~]+)*"; var rx_uri = new RegExp("^" + rx_uri_protocol + rx_uri_domain + rx_uri_path); var overlay = { token: function (stream) { if (stream.match(rx_strong) && stream.match (/\W+|$/, false)) return 'strong'; if (stream.match(rx_emphasis) && stream.match (/\W+|$/, false)) return 'em'; if (stream.match(rx_literal) && stream.match (/\W+|$/, false)) return 'string-2'; if (stream.match(rx_number)) return 'number'; if (stream.match(rx_positive)) return 'positive'; if (stream.match(rx_negative)) return 'negative'; if (stream.match(rx_uri)) return 'link'; while (stream.next() != null) { if (stream.match(rx_strong, false)) break; if (stream.match(rx_emphasis, false)) break; if (stream.match(rx_literal, false)) break; if (stream.match(rx_number, false)) break; if (stream.match(rx_positive, false)) break; if (stream.match(rx_negative, false)) break; if (stream.match(rx_uri, false)) break; } return null; } }; var mode = CodeMirror.getMode( config, options.backdrop || 'rst-base' ); return CodeMirror.overlayMode(mode, overlay, true); // combine }, 'python', 'stex'); /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// CodeMirror.defineMode('rst-base', function (config) { /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function format(string) { var args = Array.prototype.slice.call(arguments, 1); return string.replace(/{(\d+)}/g, function (match, n) { return typeof args[n] != 'undefined' ? args[n] : match; }); } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// var mode_python = CodeMirror.getMode(config, 'python'); var mode_stex = CodeMirror.getMode(config, 'stex'); /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// var SEPA = "\\s+"; var TAIL = "(?:\\s*|\\W|$)", rx_TAIL = new RegExp(format('^{0}', TAIL)); var NAME = "(?:[^\\W\\d_](?:[\\w!\"#$%&'()\\*\\+,\\-\\.\/:;<=>\\?]*[^\\W_])?)", rx_NAME = new RegExp(format('^{0}', NAME)); var NAME_WWS = "(?:[^\\W\\d_](?:[\\w\\s!\"#$%&'()\\*\\+,\\-\\.\/:;<=>\\?]*[^\\W_])?)"; var REF_NAME = format('(?:{0}|`{1}`)', NAME, NAME_WWS); var TEXT1 = "(?:[^\\s\\|](?:[^\\|]*[^\\s\\|])?)"; var TEXT2 = "(?:[^\\`]+)", rx_TEXT2 = new RegExp(format('^{0}', TEXT2)); var rx_section = new RegExp( "^([!'#$%&\"()*+,-./:;<=>?@\\[\\\\\\]^_`{|}~])\\1{3,}\\s*$"); var rx_explicit = new RegExp( format('^\\.\\.{0}', SEPA)); var rx_link = new RegExp( format('^_{0}:{1}|^__:{1}', REF_NAME, TAIL)); var rx_directive = new RegExp( format('^{0}::{1}', REF_NAME, TAIL)); var rx_substitution = new RegExp( format('^\\|{0}\\|{1}{2}::{3}', TEXT1, SEPA, REF_NAME, TAIL)); var rx_footnote = new RegExp( format('^\\[(?:\\d+|#{0}?|\\*)]{1}', REF_NAME, TAIL)); var rx_citation = new RegExp( format('^\\[{0}\\]{1}', REF_NAME, TAIL)); var rx_substitution_ref = new RegExp( format('^\\|{0}\\|', TEXT1)); var rx_footnote_ref = new RegExp( format('^\\[(?:\\d+|#{0}?|\\*)]_', REF_NAME)); var rx_citation_ref = new RegExp( format('^\\[{0}\\]_', REF_NAME)); var rx_link_ref1 = new RegExp( format('^{0}__?', REF_NAME)); var rx_link_ref2 = new RegExp( format('^`{0}`_', TEXT2)); var rx_role_pre = new RegExp( format('^:{0}:`{1}`{2}', NAME, TEXT2, TAIL)); var rx_role_suf = new RegExp( format('^`{1}`:{0}:{2}', NAME, TEXT2, TAIL)); var rx_role = new RegExp( format('^:{0}:{1}', NAME, TAIL)); var rx_directive_name = new RegExp(format('^{0}', REF_NAME)); var rx_directive_tail = new RegExp(format('^::{0}', TAIL)); var rx_substitution_text = new RegExp(format('^\\|{0}\\|', TEXT1)); var rx_substitution_sepa = new RegExp(format('^{0}', SEPA)); var rx_substitution_name = new RegExp(format('^{0}', REF_NAME)); var rx_substitution_tail = new RegExp(format('^::{0}', TAIL)); var rx_link_head = new RegExp("^_"); var rx_link_name = new RegExp(format('^{0}|_', REF_NAME)); var rx_link_tail = new RegExp(format('^:{0}', TAIL)); var rx_verbatim = new RegExp('^::\\s*$'); var rx_examples = new RegExp('^\\s+(?:>>>|In \\[\\d+\\]:)\\s'); /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function to_normal(stream, state) { var token = null; if (stream.sol() && stream.match(rx_examples, false)) { change(state, to_mode, { mode: mode_python, local: CodeMirror.startState(mode_python) }); } else if (stream.sol() && stream.match(rx_explicit)) { change(state, to_explicit); token = 'meta'; } else if (stream.sol() && stream.match(rx_section)) { change(state, to_normal); token = 'header'; } else if (phase(state) == rx_role_pre || stream.match(rx_role_pre, false)) { switch (stage(state)) { case 0: change(state, to_normal, context(rx_role_pre, 1)); stream.match(/^:/); token = 'meta'; break; case 1: change(state, to_normal, context(rx_role_pre, 2)); stream.match(rx_NAME); token = 'keyword'; if (stream.current().match(/^(?:math|latex)/)) { state.tmp_stex = true; } break; case 2: change(state, to_normal, context(rx_role_pre, 3)); stream.match(/^:`/); token = 'meta'; break; case 3: if (state.tmp_stex) { state.tmp_stex = undefined; state.tmp = { mode: mode_stex, local: CodeMirror.startState(mode_stex) }; } if (state.tmp) { if (stream.peek() == '`') { change(state, to_normal, context(rx_role_pre, 4)); state.tmp = undefined; break; } token = state.tmp.mode.token(stream, state.tmp.local); break; } change(state, to_normal, context(rx_role_pre, 4)); stream.match(rx_TEXT2); token = 'string'; break; case 4: change(state, to_normal, context(rx_role_pre, 5)); stream.match(/^`/); token = 'meta'; break; case 5: change(state, to_normal, context(rx_role_pre, 6)); stream.match(rx_TAIL); break; default: change(state, to_normal); } } else if (phase(state) == rx_role_suf || stream.match(rx_role_suf, false)) { switch (stage(state)) { case 0: change(state, to_normal, context(rx_role_suf, 1)); stream.match(/^`/); token = 'meta'; break; case 1: change(state, to_normal, context(rx_role_suf, 2)); stream.match(rx_TEXT2); token = 'string'; break; case 2: change(state, to_normal, context(rx_role_suf, 3)); stream.match(/^`:/); token = 'meta'; break; case 3: change(state, to_normal, context(rx_role_suf, 4)); stream.match(rx_NAME); token = 'keyword'; break; case 4: change(state, to_normal, context(rx_role_suf, 5)); stream.match(/^:/); token = 'meta'; break; case 5: change(state, to_normal, context(rx_role_suf, 6)); stream.match(rx_TAIL); break; default: change(state, to_normal); } } else if (phase(state) == rx_role || stream.match(rx_role, false)) { switch (stage(state)) { case 0: change(state, to_normal, context(rx_role, 1)); stream.match(/^:/); token = 'meta'; break; case 1: change(state, to_normal, context(rx_role, 2)); stream.match(rx_NAME); token = 'keyword'; break; case 2: change(state, to_normal, context(rx_role, 3)); stream.match(/^:/); token = 'meta'; break; case 3: change(state, to_normal, context(rx_role, 4)); stream.match(rx_TAIL); break; default: change(state, to_normal); } } else if (phase(state) == rx_substitution_ref || stream.match(rx_substitution_ref, false)) { switch (stage(state)) { case 0: change(state, to_normal, context(rx_substitution_ref, 1)); stream.match(rx_substitution_text); token = 'variable-2'; break; case 1: change(state, to_normal, context(rx_substitution_ref, 2)); if (stream.match(/^_?_?/)) token = 'link'; break; default: change(state, to_normal); } } else if (stream.match(rx_footnote_ref)) { change(state, to_normal); token = 'quote'; } else if (stream.match(rx_citation_ref)) { change(state, to_normal); token = 'quote'; } else if (stream.match(rx_link_ref1)) { change(state, to_normal); if (!stream.peek() || stream.peek().match(/^\W$/)) { token = 'link'; } } else if (phase(state) == rx_link_ref2 || stream.match(rx_link_ref2, false)) { switch (stage(state)) { case 0: if (!stream.peek() || stream.peek().match(/^\W$/)) { change(state, to_normal, context(rx_link_ref2, 1)); } else { stream.match(rx_link_ref2); } break; case 1: change(state, to_normal, context(rx_link_ref2, 2)); stream.match(/^`/); token = 'link'; break; case 2: change(state, to_normal, context(rx_link_ref2, 3)); stream.match(rx_TEXT2); break; case 3: change(state, to_normal, context(rx_link_ref2, 4)); stream.match(/^`_/); token = 'link'; break; default: change(state, to_normal); } } else if (stream.match(rx_verbatim)) { change(state, to_verbatim); } else { if (stream.next()) change(state, to_normal); } return token; } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function to_explicit(stream, state) { var token = null; if (phase(state) == rx_substitution || stream.match(rx_substitution, false)) { switch (stage(state)) { case 0: change(state, to_explicit, context(rx_substitution, 1)); stream.match(rx_substitution_text); token = 'variable-2'; break; case 1: change(state, to_explicit, context(rx_substitution, 2)); stream.match(rx_substitution_sepa); break; case 2: change(state, to_explicit, context(rx_substitution, 3)); stream.match(rx_substitution_name); token = 'keyword'; break; case 3: change(state, to_explicit, context(rx_substitution, 4)); stream.match(rx_substitution_tail); token = 'meta'; break; default: change(state, to_normal); } } else if (phase(state) == rx_directive || stream.match(rx_directive, false)) { switch (stage(state)) { case 0: change(state, to_explicit, context(rx_directive, 1)); stream.match(rx_directive_name); token = 'keyword'; if (stream.current().match(/^(?:math|latex)/)) state.tmp_stex = true; else if (stream.current().match(/^python/)) state.tmp_py = true; break; case 1: change(state, to_explicit, context(rx_directive, 2)); stream.match(rx_directive_tail); token = 'meta'; if (stream.match(/^latex\s*$/) || state.tmp_stex) { state.tmp_stex = undefined; change(state, to_mode, { mode: mode_stex, local: CodeMirror.startState(mode_stex) }); } break; case 2: change(state, to_explicit, context(rx_directive, 3)); if (stream.match(/^python\s*$/) || state.tmp_py) { state.tmp_py = undefined; change(state, to_mode, { mode: mode_python, local: CodeMirror.startState(mode_python) }); } break; default: change(state, to_normal); } } else if (phase(state) == rx_link || stream.match(rx_link, false)) { switch (stage(state)) { case 0: change(state, to_explicit, context(rx_link, 1)); stream.match(rx_link_head); stream.match(rx_link_name); token = 'link'; break; case 1: change(state, to_explicit, context(rx_link, 2)); stream.match(rx_link_tail); token = 'meta'; break; default: change(state, to_normal); } } else if (stream.match(rx_footnote)) { change(state, to_normal); token = 'quote'; } else if (stream.match(rx_citation)) { change(state, to_normal); token = 'quote'; } else { stream.eatSpace(); if (stream.eol()) { change(state, to_normal); } else { stream.skipToEnd(); change(state, to_comment); token = 'comment'; } } return token; } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function to_comment(stream, state) { return as_block(stream, state, 'comment'); } function to_verbatim(stream, state) { return as_block(stream, state, 'meta'); } function as_block(stream, state, token) { if (stream.eol() || stream.eatSpace()) { stream.skipToEnd(); return token; } else { change(state, to_normal); return null; } } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function to_mode(stream, state) { if (state.ctx.mode && state.ctx.local) { if (stream.sol()) { if (!stream.eatSpace()) change(state, to_normal); return null; } return state.ctx.mode.token(stream, state.ctx.local); } change(state, to_normal); return null; } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// function context(phase, stage, mode, local) { return {phase: phase, stage: stage, mode: mode, local: local}; } function change(state, tok, ctx) { state.tok = tok; state.ctx = ctx || {}; } function stage(state) { return state.ctx.stage || 0; } function phase(state) { return state.ctx.phase; } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// return { startState: function () { return {tok: to_normal, ctx: context(undefined, 0)}; }, copyState: function (state) { var ctx = state.ctx, tmp = state.tmp; if (ctx.local) ctx = {mode: ctx.mode, local: CodeMirror.copyState(ctx.mode, ctx.local)}; if (tmp) tmp = {mode: tmp.mode, local: CodeMirror.copyState(tmp.mode, tmp.local)}; return {tok: state.tok, ctx: ctx, tmp: tmp}; }, innerMode: function (state) { return state.tmp ? {state: state.tmp.local, mode: state.tmp.mode} : state.ctx.mode ? {state: state.ctx.local, mode: state.ctx.mode} : null; }, token: function (stream, state) { return state.tok(stream, state); } }; }, 'python', 'stex'); /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// CodeMirror.defineMIME('text/x-rst', 'rst'); /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// });
PypiClean
/AutoDiffpyyy-1.0.0.tar.gz/AutoDiffpyyy-1.0.0/AutoDiffpy/ElementaryFunc.py
import numpy as np from dual import Dual from Node import Node # Trig functions def sin(x): """ Return the sine value of x. Parameters ---------- x : int, float, Dual, Node Input to the sine function. Returns ------- value : int, float, Dual The sine value of the dual number implementation or the sine value of a real number. """ if isinstance(x, Node): child = Node(np.sin(x.value)) x.children.append({'partial_der':np.cos(x.value), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.sin(x.real), np.cos(x.real) * x.dual) else: return np.sin(x) def cos(x): """ Return the cosine value of x. Parameters ---------- x : int, float, Dual Input to the cosine function. Returns ------- value : int, float, Dual The cosine value of the dual number implementation or the cosine value of a real number. """ if isinstance(x, Node): child = Node(np.cos(x.value)) x.children.append({'partial_der':-np.sin(x.value), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.cos(x.real), -np.sin(x.real) * x.dual) else: return np.cos(x) def tan(x): """ Return the tangent value of x. Parameters ---------- x : int, float, Dual Input to the tangent function. Returns ------- value : int, float, Dual The tangent value of the dual number implementation or the tangent value of a real number. """ if isinstance(x, Node): child = Node(np.tan(x.value)) x.children.append({'partial_der':1/(np.cos(x.value)**2), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.tan(x.real), 1/np.cos(x.real)**2*np.asarray(x.dual)) else: return np.tan(x) # Inverse trig functions def arcsin(x): """ Return the arcsine value of x. Parameters ---------- x : int, float, Dual Input to the arcsine function. Returns ------- value : int, float, Dual The arcsine value of the dual number implementation or the arcsine value of a real number. """ if isinstance(x,Node): child = Node(np.arcsin(x.value)) temp = 1 - x.value**2 if temp <= 0: raise ValueError('The domain of sqrt should be larger than 0') x.children.append({'partial_der':1/(np.sqrt(temp)), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.arcsin(x.real), x.dual / np.sqrt(1 - x.real**2)) else: return np.arcsin(x) def arccos(x): """ Return the arccosine value of x. Parameters ---------- x : int, float, Dual Input to the arccosine function. Returns ------- value : int, float, Dual The arccosine value of the dual number implementation or the arccosine value of a real number. """ if isinstance(x,Node): child = Node(np.arccos(x.value)) temp = 1 - x.value**2 if temp <= 0: raise ValueError('The domain of sqrt should be larger than 0') x.children.append({'partial_der':-1/(np.sqrt(temp)), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.arccos(x.real), -x.dual / np.sqrt(1 - x.real**2)) else: return np.arccos(x) def arctan(x): """ Return the arctangent value of x. Parameters ---------- x : int, float, Dual Input to the arctangent function. Returns ------- value : int, float, Dual The arctangent value of the dual number implementation or the arctangent value of a real number. """ if isinstance(x,Node): child = Node(np.arctan(x.value)) x.children.append({'partial_der':1/(1+x.value**2), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.arctan(x.real), x.dual / (1 + x.real**2)) else: return np.arctan(x) # Exponentials def exp(x): """ Return the exponential value of x with base e. Parameters ---------- x : int, float, Dual Input to the exponential function with base e. Returns ------- value : int, float, Dual The exponential value of the dual number implementation or the exponential value of a real number with base e. """ if isinstance(x,Node): child = Node(np.exp(x.value)) x.children.append({'partial_der':np.exp(x.value), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.exp(x.real), np.exp(x.real) * x.dual) else: return np.exp(x) def exponential(base, x): """ Return the exponential value of x with specified base. Parameters ---------- base: int, float Base of the exponential function. x : int, float, Dual Input to the exponential function with specified base. Returns ------- value : int, float, Dual The exponential value of the dual number implementation or the exponential value of a real number with specified base. """ if isinstance(x,Node): child = Node(np.exp(x.value)) x.children.append({'partial_der':base**x.value, 'node':child}) return child elif isinstance(x, Dual): return Dual(base**x.real, np.log(base) * (base**x.real) * x.dual) else: return base**x # Hyperbolic functions def sinh(x): """ Return the sinh value of x. Parameters ---------- x : int, float, Dual Input to the sinh function. Returns ------- value : int, float, Dual The sinh value of the dual number implementation or the sinh value of a real number. """ if isinstance(x,Node): child = Node(np.sinh(x.value)) x.children.append({'partial_der':np.cosh(x.value), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.sinh(x.real), np.cosh(x.real) * x.dual) else: return np.sinh(x) def cosh(x): """ Return the cosh value of x. Parameters ---------- x : int, float, Dual Input to the cosh function. Returns ------- value : int, float, Dual The cosh value of the dual number implementation or the cosh value of a real number. """ if isinstance(x,Node): child = Node(np.cosh(x.value)) x.children.append({'partial_der':np.sinh(x.value), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.cosh(x.real), np.sinh(x.real) * x.dual) else: return np.cosh(x) def tanh(x): """ Return the tanh value of x. Parameters ---------- x : int, float, Dual Input to the tanh function. Returns ------- value : int, float, Dual The tanh value of the dual number implementation or the tanh value of a real number. """ if isinstance(x,Node): child = Node(np.tanh(x.value)) x.children.append({'partial_der':1/np.cosh(x.value)**2, 'node':child}) return child elif isinstance(x, Dual): return Dual(np.tanh(x.real), (1 - np.tanh(x.real)**2) * x.dual) else: return np.tanh(x) # Logistic function def logistic(x): """ Return the logistic value of x. Parameters ---------- x : int, float, Dual Input to the logistic function. Returns ------- value : int, float, Dual The logistic value of the dual number implementation or the logistic value of a real number. """ if isinstance(x,Node): child = Node(1/(1+np.exp(-x.value))) nom = np.exp(x.value) dom = (1+np.exp(x.value))**2 x.children.append({'partial_der':nom/dom, 'node':child}) return child elif isinstance(x, Dual): return Dual(1 / (1 + np.exp(-x.real)), np.exp(-x.real) * x.dual / (1 + np.exp(-x.real))**2) else: return 1 / (1 + np.exp(-x)) # Logarithms def log(x): """ Return the logarithm value of x with base e. Parameters ---------- x : int, float, Dual Input to the logarithm function with base e. Returns ------- value : int, float, Dual The logarithm value of the dual number implementation or the logarithm value of a real number with base e. """ if isinstance(x,Node): child = Node(np.log(x.value)) x.children.append({'partial_der':(1/(x.value)), 'node':child}) return child elif isinstance(x, Dual): if x.real <= 0: raise ValueError('Domain of logarithm should be greater than 0') return Dual(np.log(x.real), x.dual / x.real) else: return np.log(x) def log2(x): """ Return the logarithm value of x with base 2. Parameters ---------- x : int, float, Dual Input to the logarithm function with base 2. Returns ------- value : int, float, Dual The logarithm value of the dual number implementation or the logarithm value of a real number with base 2. """ if isinstance(x,Node): child = Node(np.log2(x.value)) x.children.append({'partial_der':(1/(x.value*np.log(2))), 'node':child}) return child elif isinstance(x, Dual): if x.real <= 0: raise ValueError('Domain of logarithm should be greater than 0') return Dual(np.log2(x.real), x.dual / (x.real * np.log(2))) else: return np.log2(x) def log10(x): """ Return the logarithm value of x with base 10. Parameters ---------- x : int, float, Dual Input to the logarithm function with base 10. Returns ------- value : int, float, Dual The logarithm value of the dual number implementation or the logarithm value of a real number with base 10. """ if isinstance(x,Node): child = Node(np.log10(x.value)) x.children.append({'partial_der':(1/(x.value*np.log(10))), 'node':child}) return child elif isinstance(x, Dual): if x.real <= 0: raise ValueError('Domain of logarithm should be greater than 0') return Dual(np.log10(x.real), x.dual / (x.real * np.log(10))) else: return np.log10(x) def logarithm(x, base): """ Return the logarithm value of x with specified base. Parameters ---------- x : int, float, Dual Input to the logarithm function with specified base. base: int Base of the logarithm function. Returns ------- value : int, float, Dual The logarithm value of the dual number implementation or the logarithm value of a real number with specified base. """ if isinstance(x,Node): child = Node(np.log(x.value)/np.log(base)) x.children.append({'partial_der':(1/(x.value*np.log(base))), 'node':child}) return child elif isinstance(x, Dual): if x.real <= 0: raise ValueError('Domain of logarithm should be greater than 0') return Dual(np.log(x.real) / np.log(base), x.dual / (x.real * np.log(base))) else: return np.log(x) / np.log(base) # Square root def sqrt(x): """ Return the square root value of x. Parameters ---------- x : int, float, Dual Input to the square root function. Returns ------- value : int, float, Dual The square root value of the dual number implementation or the square root value of a real number. """ if isinstance(x,Node): child = Node(x.value**(1/2)) x.children.append({'partial_der':((1/2)*x.value**(-1/2)), 'node':child}) return child elif isinstance(x, Dual): return Dual(np.sqrt(x.real), 2 * x.real * x.dual) else: return np.sqrt(x) # if __name__=='__main__': # val = Dual(3,1) # val2 = Dual(2,[1,2]) # z = sin(val) # print(z) # print(cos(val)) # print(tan(val2)) # val = Dual(0.5,0.5) # print(arcsin(val)) # val=Dual(0.5,0.5) # print(arccos(val)) # print(arctan(val)) # print(exp(val)) # base = 2 # print(exponential(2,val)) # print(sinh(val)) # print(cosh(val)) # print(tanh(val)) # print(logistic(val)) # print(log(val)) # print(log2(val)) # print(log10(val)) # print(logarithm(val,base)) # print(sqrt(val))
PypiClean
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/iron-form/.github/ISSUE_TEMPLATE.md
<!-- Instructions: https://github.com/PolymerElements/iron-form/CONTRIBUTING.md#filing-issues --> ### Description <!-- Example: The `paper-foo` element causes the page to turn pink when clicked. --> ### Expected outcome <!-- Example: The page stays the same color. --> ### Actual outcome <!-- Example: The page turns pink. --> ### Live Demo <!-- Example: https://jsbin.com/cagaye/edit?html,output --> ### Steps to reproduce <!-- Example 1. Put a `paper-foo` element in the page. 2. Open the page in a web browser. 3. Click the `paper-foo` element. --> ### Browsers Affected <!-- Check all that apply --> - [ ] Chrome - [ ] Firefox - [ ] Safari 9 - [ ] Safari 8 - [ ] Safari 7 - [ ] Edge - [ ] IE 11 - [ ] IE 10
PypiClean
/MSOIPPU-1.0.tar.gz/MSOIPPU-1.0/README.md
# Flood Risk Prediction tool ## Deadlines - *code 12pm GMT Friday 25th November 2022* - *presentation/ one page report 4pm GMT Friday 25th November 2022* ### Key Requirements Your project must provide the following: - at least one analysis method to estimate a number of properties for unlabelled postcodes extrapolated from sample data which is provided to you: - Flood risk (on a 10 point scale). - Median house price. - at least one analysis method to estimate the Local Authority & flood risk of arbitrary locations. - a method to find the rainfall and water level near a given postcode from provided rainfall, river and tide level data, or by looking online. You should also provide visualization and analysis tools for the postcode, rainfall, river & tide data provided to you, ideally in a way which will identify potential areas at immediate risk of flooding. Your code should have installation instructions and basic documentation, either as docstrings for functions & class methods, a full manual or both. ![London postcode density](images/LondonPostcodeDensity.png) ![England Flood Risk](images/EnglandFloodRisk.png) ![UK soil types](images/UKSoilTypes.png) This README file *should be updated* over the course of your group's work to represent the scope and abilities of your project. ### Assessment - your code will be assessed for its speed (both at training and prediction) & accuracy. - Your code should include tests of its functionality. - Additional marks will be awarded for high code quality and a clean, well organised repository. ### Installation Guide *To be written by you during the week* ### User instructions *To be written by you during the week* ### Documentation _This section should be updated during the week._ The code includes [Sphinx](https://www.sphinx-doc.org) documentation. On systems with Sphinx installed, this can be build by running ``` python -m sphinx docs html ``` then viewing the generated `index.html` file in the `html` directory in your browser. For systems with [LaTeX](https://www.latex-project.org/get/) installed, a manual pdf can be generated by running ```bash python -m sphinx -b latex docs latex ``` Then following the instructions to process the `FloodTool.tex` file in the `latex` directory in your browser. ### Testing The tool includes several tests, which you can use to check its operation on your system. With [pytest](https://doc.pytest.org/en/latest) installed, these can be run with ```bash python -m pytest --doctest-modules flood_tool ``` ### Reading list - (A guide to coordinate systems in Great Britain)[https://webarchive.nationalarchives.gov.uk/20081023180830/http://www.ordnancesurvey.co.uk/oswebsite/gps/information/coordinatesystemsinfo/guidecontents/index.html] - (Information on postcode validity)[https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/283357/ILRSpecification2013_14Appendix_C_Dec2012_v1.pdf]
PypiClean
/Cohorte-Herald-0.0.3.tar.gz/Cohorte-Herald-0.0.3/herald/exceptions.py
# Module version __version_info__ = (0, 0, 3) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" class HeraldException(Exception): """ Base class for all exceptions in Herald """ def __init__(self, target, text, cause=None): """ Sets up the exception :param target: A Target bean :param text: A description of the error :param cause: The cause of the error """ super(HeraldException, self).__init__(text) self.target = target self.cause = cause class NoTransport(HeraldException): """ No transport has been found to contact the targeted peer """ pass class InvalidPeerAccess(HeraldException): """ The description of an access to peer can't be read by the access handler """ pass class HeraldTimeout(HeraldException): """ A timeout has been reached """ def __init__(self, target, text, message): """ Sets up the exception :param text: Description of the exception :param message: The request which got no reply """ super(HeraldTimeout, self).__init__(target, text) self.message = message class NoListener(HeraldException): """ The message has been received by the remote peer, but no listener has been found to register it. """ def __init__(self, target, uid, subject): """ Sets up the exception :param target: Target peer with no listener :param uid: Original message UID :param subject: Subject of the original message """ super(NoListener, self).__init__(target, "No listener for {0}" .format(uid)) self.uid = uid self.subject = subject class ForgotMessage(HeraldException): """ Exception given to callback methods waiting for a message that has been declared to be forgotten by forget(). """ def __init__(self, uid): """ Sets up the exception :param uid: UID of the forgotten message """ super(ForgotMessage, self).__init__(None, "Forgot message {0}" .format(uid)) self.uid = uid
PypiClean
/emily_editor-0.9-py3-none-any.whl/src/emily0_9/text_printing.py
from enum import Enum from . import string_parsing from guibits1_0 import coloring from guibits1_0 import dialoging from guibits1_0 import font_styling from . import html_parsing from . import looking_ahead from . import page_laying_out from . import persisting from guibits1_0 import printing from . import rendering from . import texting from . import text_tokenizing from guibits1_0 import type_checking2_0 from guibits1_0 import unicoding3_0 from guibits1_0 import windowing from guibits1_0 import writing # author R.N.Bosworth # version 16 Mar 23 15:16 """ Contractor to print texts. Copyright (C) 2019,2021,2022,2023 R.N.Bosworth This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License (gpl.txt) for more details. """ """ Token Grammar version 11 Feb 2019 14:43 This grammar is in EBNF (Extended Backus-Naur Form) using the following meta-symbols: ::== is ( ) | *. The equivalent terminal characters are called LPARENTHESIS, RPARENTHESIS, BAR and STAR. Non-terminals are in CamelCase. Terminal names are in UPPER_CASE. Other symbols represent themselves. The rules are case-sensitive. Text ::== Token* EndTag Token ::== Word | Separator | NewLine | InternalTag EndTag ::== < e n d > Word ::== NonSpaceChar NonSpaceChar* NonSpaceChar is any HtmlCharacter except LF, SPACE, TAB, FF or CR Separator ::== SPACE (U+0020) Newline ::== LF (U+000A) InternalTag ::== (this will be extended later) Note: Rule for HtmlCharacter is given in HTMLGrammar. """ # exposed procedures # ------------------ def print_text(win,t,fn,fsize,fss): """ pre: win = windowing.Window associated with text to be printed t = texting.Text to be printed fn = name of font for printing t, as str fsize = size of font for printing t, as float fss = styles of font for printing t, as font_styling.FontStyles post: either: t has been printed on a page of the current default printer, or: an exception dialog has been displayed to the user tests: empty text text of half a line END alignment text of two lines MIDDLE alignment text of two and a half lines BEGIN alignment """ type_checking2_0.check_derivative(win,windowing.Window) type_checking2_0.check_derivative(t,texting.Text) type_checking2_0.check_identical(fn,str) type_checking2_0.check_identical(fsize,float) type_checking2_0.check_derivative(fss,font_styling.FontStyles) my_globals = persisting.get_persistents() # set external page dimensions try: pj = printing.set_page_dimensions(persisting.get_paper_width(my_globals),persisting.get_paper_height(my_globals),persisting.get_horizontal_indent(my_globals)) # note: this must be changed to allow different horizontal and vertical indents if pj == None: raise Exception("Problem trying to print page. Page was not printed.") # set up internal page pg = rendering.new_page(win,persisting.get_paper_width(my_globals),persisting.get_paper_height(my_globals),persisting.get_horizontal_indent(my_globals),persisting.get_horizontal_indent(my_globals),fn,fss,fsize,coloring.BLACK) with t._my_lock: # find text cursor position loff = texting.cursor_line_offset(t) cpoff = texting.cursor_code_point_offset(t) # set text cursor to start of text texting.set_cursor_start(t) pg._current_line = unicoding3_0.string_of("") pg.line_width = 0.0 pg._current_alignment = texting.get_alignment(t) # scan the text token by token, printing each line token = text_tokenizing.get_next_token(t) while not unicoding3_0.equals(token,unicoding3_0.string_of("<end>")): _print_token(pj,token,texting.get_alignment(t),pg) token = text_tokenizing.get_next_token(t) _print_token(pj,token,texting.get_alignment(t),pg) # deal with <end> token # end the print job printing.end_printing(pj) # restore text cursor texting.set_cursor(t,loff,cpoff) except Exception as ex: # display exception message as dialog dialoging.show_message_dialog(persisting.get_menu_font_size(my_globals),"Printing problem",str(ex)) # private procedures # ------------------ def _de_escape(s1,s2): """ pre: s1 = string which is to be appended to s2 = string of escaped unicode code points which is to be de-escaped and appended to s1 post: s2 has been de-escaped and appended to s1 test: s1 = "" s2 = "" s1 = "ab" s2 = "1&lt;2" """ i = 0 while i < unicoding3_0.length_of(s2): (code_point,i) = string_parsing.parse_html_character(s2,i) unicoding3_0.append(s1,code_point) def _print_line(pj,pg): """ pre: pj = current print job pg = page for this print pg._current_line = line to be printed pg._line_width = width of _current_line in points pg._text_width = width of text in points pg._current_alignment = alignment of line to be printed post: pg._current_line' has been printed pg._page_position.x_offset = 0.0 pg._page_position.y_offset = position for next line pg._current_line = empty string pg._line_width = 0.0 test: line "fred" _current_alignment = BEGIN _current_alignment = END _current_alignment = MIDDLE """ pg._page_position.x_offset = page_laying_out.x_offset_of_line(pg._line_width,pg._text_width,pg._current_alignment) _try_line(pj,pg) pg._page_position.x_offset = 0.0 pg._page_position.y_offset += pg._font_size pg._current_line = unicoding3_0.new_string() pg._line_width = 0.0 def _print_token(pj,tk,a,pg): """ pre: pj = current PrintJob tk = token to be printed a = alignment of text following tk pg = Page on which the token is to be printed, with _current_line, _line_width and _current_alignment set up post: if tk is an <end> token, the current line has been printed if tk is a Separator token, the width in points of the current line, plus the separator, has been measured, and if overflow has occurred, the current line has been printed a new empty line has been started if tk is a Newline token, the current line has been printed the alignment has been set to a if tk is a Word token, the width in points of the current line, plus the width in points of the word, has been measured, and if overflow has occurred, any trailing space has been removed from the current line the current line has been printed a new line has been started with the word y-offset in page has been updated test: pg._current_alignment = BEGIN "the cat sat on the mat." "\n" a = MIDDLE "\n" a = END "the cat sat on the \nmat.<end>" "the<end>" "the<exit>" """ try: # first code point of token gives sort # Tag if unicoding3_0.code_point_at(tk,0) == ord('<'): la = looking_ahead.lookahead_of_string(tk) label = html_parsing.parse_tag(la) if unicoding3_0.equals(label,unicoding3_0.string_of("end")): pg._page_position.x_offset = page_laying_out.x_offset_of_line(pg._line_width,pg._text_width,pg._current_alignment) _try_line(pj,pg) else: raise Exception("unexpected tag label when printing text:" + unicoding3_0.python_string_of(label)) # Separator elif unicoding3_0.code_point_at(tk,0) == ord(' '): space_width = writing.width_in_points_of(pg._my_window," ",pg._font_name,pg._font_styles,pg._font_size) #space_width = font_measuring.glyph_width_of(pg._font_metrics,ord(' ')) if pg._line_width + space_width > pg._text_width: _print_line(pj,pg) else: unicoding3_0.append(pg._current_line,ord(' ')) pg._line_width += space_width # Newline elif unicoding3_0.code_point_at(tk,0) == ord('\n'): _print_line(pj,pg) pg._current_alignment = a # Word else: pg.word_width = page_laying_out.width_in_points_of_escaped(pg._my_window,tk,pg._font_name,pg._font_styles,pg._font_size) if pg._line_width + pg.word_width > pg._text_width: last_pos = unicoding3_0.length_of(pg._current_line) - 1 if last_pos >= 0: if unicoding3_0.code_point_at(pg._current_line,last_pos) == ord(' '): space_width = writing.width_in_points_of(pg._my_window," ",pg._font_name,pg._font_styles,pg._font_size) unicoding3_0.remove(pg._current_line,last_pos) pg._line_width -= space_width _print_line(pj,pg) _de_escape(pg._current_line,tk) pg._line_width = pg.word_width else: _de_escape(pg._current_line,tk) pg._line_width += pg.word_width except Exception as ex: raise Exception("Unexpected exception when printing token:"+str(ex)) def _try_line(pj,pg): """ pre: pj = current print job pg = page to be printed post: the current line has been printed on the page, if above the bottom of the text area test: line just fits on text area line just overlaps end of text area """ if pg._page_position.y_offset + pg._font_size <= pg._text_height: printing.print_string(pj,unicoding3_0.python_string_of(pg._current_line),pg._font_name,pg._font_styles,pg._font_size,pg._page_position.x_offset,pg._page_position.y_offset)
PypiClean
/MSOIooitfs-1.0.tar.gz/MSOIooitfs-1.0/flood_tool/geo.py
from numpy import array, asarray, mod, sin, cos, tan, sqrt, arctan2, floor, rad2deg, deg2rad, stack, pi from scipy.linalg import inv import numpy as np __all__ = ['get_easting_northing_from_gps_lat_long', 'get_gps_lat_long_from_easting_northing'] class Ellipsoid(object): """ Data structure for a global ellipsoid. """ def __init__(self, a, b, F_0): self.a = a self.b = b self.n = (a - b) / (a + b) self.e2 = (a ** 2 - b ** 2) / a ** 2 self.F_0 = F_0 class Datum(Ellipsoid): """ Data structure for a global datum. """ def __init__(self, a, b, F_0, phi_0, lam_0, E_0, N_0, H): super().__init__(a, b, F_0) self.phi_0 = phi_0 self.lam_0 = lam_0 self.E_0 = E_0 self.N_0 = N_0 self.H = H self.af0 = self.a * self.F_0 self.bf0 = self.b * self.F_0 def dms2rad(deg, min=0, sec=0): """Convert degrees, minutes, seconds to radians. Parameters ---------- deg: array_like Angle in degrees. min: array_like (optional) Angle component in minutes. sec: array_like (optional) Angle component in seconds. Returns ------- numpy.ndarray Angle in radians. """ deg = asarray(deg) return deg2rad(deg + min / 60. + sec / 3600.) def rad2dms(rad, dms=False): """Convert radians to degrees, minutes, seconds. Parameters ---------- rad: array_like Angle in radians. dms: bool Use degrees, minutes, seconds format. If False, use decimal degrees. Returns ------- numpy.ndarray Angle in degrees, minutes, seconds or decimal degrees. """ rad = asarray(rad) deg = rad2deg(rad) if dms: min = 60.0 * mod(deg, 1.0) sec = 60.0 * mod(min, 1.0) return stack((floor(deg), floor(min), sec.round(4))) else: return deg osgb36 = Datum(a=6377563.396, b=6356256.910, F_0=0.9996012717, phi_0=deg2rad(49.0), lam_0=deg2rad(-2.), E_0=400000, N_0=-100000, H=24.7) wgs84 = Ellipsoid(a=6378137, b=6356752.3142, F_0=0.9996) def lat_long_to_xyz(phi, lam, rads=False, datum=osgb36): """Convert input latitude/longitude in a given datum into Cartesian (x, y, z) coordinates. Parameters ---------- phi: array_like Latitude in degrees (if radians=False) or radians (if radians=True). lam: array_like Longitude in degrees (if radians=False) or radians (if radians=True). rads: bool (optional) If True, input latitudes and longitudes are in radians. datum: Datum (optional) Datum to use for conversion. """ if not rads: phi = deg2rad(phi) lam = deg2rad(lam) nu = datum.a * datum.F_0 / sqrt(1 - datum.e2 * sin(phi) ** 2) return array(((nu + datum.H) * cos(phi) * cos(lam), (nu + datum.H) * cos(phi) * sin(lam), ((1 - datum.e2) * nu + datum.H) * sin(phi))) def xyz_to_lat_long(x, y, z, rads=False, datum=osgb36): p = sqrt(x ** 2 + y ** 2) lam = arctan2(y, x) phi = arctan2(z, p * (1 - datum.e2)) for _ in range(10): nu = datum.a * datum.F_0 / sqrt(1 - datum.e2 * sin(phi) ** 2) dnu = -datum.a * datum.F_0 * cos(phi) * sin(phi) / (1 - datum.e2 * sin(phi) ** 2) ** 1.5 f0 = (z + datum.e2 * nu * sin(phi)) / p - tan(phi) f1 = datum.e2 * (nu ** cos(phi) + dnu * sin(phi)) / p - 1.0 / cos(phi) ** 2 phi -= f0 / f1 if not rads: phi = rad2dms(phi) lam = rad2dms(lam) return phi, lam def get_easting_northing_from_gps_lat_long(phis, lams, rads=False): """ Get OSGB36 easting/northing from GPS latitude and longitude pairs. Parameters ---------- phis: float/arraylike GPS (i.e. WGS84 datum) latitude value(s) lams: float/arrayling GPS (i.e. WGS84 datum) longitude value(s). rads: bool (optional) If true, specifies input is is radians. Returns ------- numpy.ndarray Easting values (in m) numpy.ndarray Northing values (in m) Examples -------- >>> get_easting_northing_from_gps_lat_long([55.5], [-1.54]) (array([429157.0]), array([623009])) References ---------- Based on the formulas in "A guide to coordinate systems in Great Britain". See also https://webapps.bgs.ac.uk/data/webservices/convertForm.cfm """ if not isinstance(phis, list) and not isinstance(phis,np.ndarray): phis = [phis] lams = [lams] assert len(phis) == len(lams) res_east = [] res_north = [] for i, phi in enumerate(phis): lam = lams[i] if not rads: phi = deg2rad(phi) lam = deg2rad(lams[i]) M = bigM(osgb36.bf0, osgb36.n, osgb36.phi_0, phi) nu = osgb36.af0 / (sqrt(1 - (osgb36.e2 * ((sin(phi)) ** 2)))) rho = (nu * (1 - osgb36.e2)) / (1 - (osgb36.e2 * (sin(phi)) ** 2)) eta2 = (nu / rho) - 1 I = M + osgb36.N_0 II = (nu / 2) * sin(phi) * cos(phi) III = (nu / 24) * sin(phi) * cos(phi) ** 3 * (5 - tan(phi) ** 2 + 9 * eta2) IIIA = (nu / 720) * sin(phi) * cos(phi) ** 5 * (61 - 58 * tan(phi) ** 2 + tan(phi) ** 4) IV = nu * cos(phi) V = (nu / 6) * cos(phi) ** 3 * (nu / rho - tan(phi) ** 2) VI = (nu / 120) * cos(phi) ** 5 * ( 5 - 18 * tan(phi) ** 2 + tan(phi) ** 4 + 14 * eta2 - 58 * tan(phi) ** 2 * eta2) diff = lam - osgb36.lam_0 N = I + II * diff ** 2 + III * diff ** 4 + IIIA * diff ** 6 E = osgb36.E_0 + IV * diff + V * diff ** 3 + VI * diff ** 5 res_east.append(E) res_north.append(N) return res_east, res_north def bigM(bf0, n, PHI0, PHI): """ Compute meridional arc. Input: - ellipsoid semi major axis multiplied by central meridian scale factor (bf0) in meters; - n (computed from a, b and f0); - lat of false origin (PHI0) and initial or final latitude of point (PHI) IN RADIANS. """ M = bf0 * (((1 + n + ((5 / 4) * (n ** 2)) + ((5 / 4) * (n ** 3))) * (PHI - PHI0)) - (((3 * n) + (3 * (n ** 2)) + ((21 / 8) * (n ** 3))) * (sin(PHI - PHI0)) * ( cos(PHI + PHI0))) + ((((15 / 8) * (n ** 2)) + ((15 / 8) * (n ** 3))) * (sin(2 * (PHI - PHI0))) * ( cos(2 * (PHI + PHI0)))) - (((35 / 24) * (n ** 3)) * (sin(3 * (PHI - PHI0))) * (cos(3 * (PHI + PHI0))))) return M def getNewPhi(North, datum): PHI1 = ((North - datum.N_0) / datum.af0) + datum.phi_0 M = bigM(datum.bf0, datum.n, datum.phi_0, PHI1) # Calculate new PHI value (PHI2) PHI2 = ((North - datum.N_0 - M) / datum.af0) + PHI1 # Iterate to get final value for InitialLat while abs(North - datum.N_0 - M) > 0.00001: PHI2 = ((North - datum.N_0 - M) / datum.af0) + PHI1 M = bigM(datum.bf0, datum.n, datum.phi_0, PHI2) PHI1 = PHI2 return PHI2 def get_gps_lat_long_from_easting_northing(easts: float, norths: float, rads=False, dms=False): """ Get OSGB36 easting/northing from GPS latitude and longitude pairs. Parameters ---------- east: float/arraylike OSGB36 easting value(s) (in m). north: float/arrayling OSGB36 easting value(s) (in m). rads: bool (optional) If true, specifies ouput is radians. dms: bool (optional) If true, output is in degrees/minutes/seconds. Incompatible with rads option. Returns ------- numpy.ndarray GPS (i.e. WGS84 datum) latitude value(s). numpy.ndarray GPS (i.e. WGS84 datum) longitude value(s). Examples -------- >>> get_gps_lat_long_from_easting_northing([429157], [623009]) (array([55.5]), array([-1.540008])) References ---------- Based on the formulas in "A guide to coordinate systems in Great Britain". See also https://webapps.bgs.ac.uk/data/webservices/convertForm.cfm """ if not isinstance(easts, list) and not isinstance(easts,np.ndarray): easts = [easts] norths = [norths] assert len(easts) == len(norths) res_lat = [] res_lon = [] for i, east in enumerate(easts): north = norths[i] Et = east - osgb36.E_0 PHId = getNewPhi(north, datum=osgb36) nu = osgb36.af0 / (sqrt(1 - (osgb36.e2 * ((sin(PHId)) ** 2)))) rho = (nu * (1 - osgb36.e2)) / (1 - (osgb36.e2 * (sin(PHId)) ** 2)) eta2 = (nu / rho) - 1 # Compute Latitude VII = (tan(PHId)) / (2 * rho * nu) VIII = ((tan(PHId)) / (24 * rho * (nu ** 3))) * ( 5 + (3 * ((tan(PHId)) ** 2)) + eta2 - (9 * eta2 * ((tan(PHId)) ** 2))) IX = ((tan(PHId)) / (720 * rho * (nu ** 5))) * ( 61 + (90 * ((tan(PHId)) ** 2)) + (45 * ((tan(PHId)) ** 4))) X = ((cos(PHId)) ** -1) / nu XI = (((cos(PHId)) ** -1) / (6 * (nu ** 3))) * ((nu / rho) + (2 * ((tan(PHId)) ** 2))) XII = (((cos(PHId)) ** -1) / (120 * (nu ** 5))) * ( 5 + (28 * ((tan(PHId)) ** 2)) + (24 * ((tan(PHId)) ** 4))) XIIA = (((cos(PHId)) ** -1) / (5040 * (nu ** 7))) * ( 61 + (662 * ((tan(PHId)) ** 2)) + (1320 * ((tan(PHId)) ** 4)) + ( 720 * ((tan(PHId)) ** 6))) E_N_Lat = PHId - ((Et ** 2) * VII) + ((Et ** 4) * VIII) - ((Et ** 6) * IX) E_N_Long = osgb36.lam_0 + (Et * X) - ((Et ** 3) * XI) + ((Et ** 5) * XII) - ((Et ** 7) * XIIA) if not rads: E_N_Lat = rad2dms(E_N_Lat, dms=dms) E_N_Long = rad2dms(E_N_Long, dms=dms) res_lat.append(E_N_Lat) res_lon.append(E_N_Long) return res_lat, res_lon class HelmertTransform(object): """Callable class to perform a Helmert transform.""" def __init__(self, s, rx, ry, rz, T): self.T = T.reshape((3, 1)) self.M = array([[1 + s, -rz, ry], [rz, 1 + s, -rx], [-ry, rx, 1 + s]]) def __call__(self, X): X = X.reshape((3, -1)) return self.T + self.M @ X class HelmertInverseTransform(object): """Callable class to perform the inverse of a Helmert transform.""" def __init__(self, s, rx, ry, rz, T): self.T = T.reshape((3, 1)) self.M = inv(array([[1 + s, -rz, ry], [rz, 1 + s, -rx], [-ry, rx, 1 + s]])) def __call__(self, X): X = X.reshape((3, -1)) return self.M @ (X - self.T) OSGB36transform = HelmertTransform(20.4894e-6, -dms2rad(0, 0, 0.1502), -dms2rad(0, 0, 0.2470), -dms2rad(0, 0, 0.8421), array([-446.448, 125.157, -542.060])) WGS84transform = HelmertInverseTransform(20.4894e-6, -dms2rad(0, 0, 0.1502), -dms2rad(0, 0, 0.2470), -dms2rad(0, 0, 0.8421), array([-446.448, 125.157, -542.060])) def WGS84toOSGB36(phi, lam, rads=False): """Convert WGS84 latitude/longitude to OSGB36 latitude/longitude. Parameters ---------- phi : array_like or float Latitude in degrees or radians on WGS84 datum. lam : array_like or float Longitude in degrees or radians on WGS84 datum. rads : bool, optional If True, phi and lam are in radians. If False, phi and lam are in degrees. Returns ------- tuple of numpy.ndarrays Latitude and longitude on OSGB36 datum in degrees or radians. """ xyz = OSGB36transform(lat_long_to_xyz(asarray(phi), asarray(lam), rads=rads, datum=wgs84)) return xyz_to_lat_long(*xyz, rads=rads, datum=osgb36) def OSGB36toWGS84(phi, lam, rads=False): """Convert OSGB36 latitude/longitude to WGS84 latitude/longitude. Parameters ---------- phi : array_like or float Latitude in degrees or radians on OSGB36 datum. lam : array_like or float Longitude in degrees or radians on OSGB36 datum. rads : bool, optional If True, phi and lam are in radians. If False, phi and lam are in degrees. Returns ------- tuple of numpy.ndarrays Latitude and longitude on WGS84 datum in degrees or radians. """ xyz = WGS84transform(lat_long_to_xyz(asarray(phi), asarray(lam), rads=rads, datum=osgb36)) return xyz_to_lat_long(*xyz, rads=rads, datum=wgs84) if __name__ == "__main__": print(get_easting_northing_from_gps_lat_long([55.5], [-1.54]))
PypiClean
/MPT5.0.1.2-0.1.2.tar.gz/MPT5.0.1.2-0.1.2/src/MPT5/Fount/Mainpro.py
from Allimp import * from Config.Init import * import GUI.window2 as window2 import GUI.Start as Strt class mainApp(wx.App): def OnInit(self): self.locale = None wx.Locale.AddCatalogLookupPathPrefix(LOCALE_PATH) self.config = self.GetConfig() lang = self.config.Read("Language") langu_dic = LANGUAGE_LIST self.UpdateLanguage(langu_dic[int(lang)]) self.SetAppName('Temp5') if self.config.Read('Splash') != '': splash = Strt.MySplashScreen(window2) splash.Show(True) else: frame = window2.MainWin() if self.config.Read('WinSize') != '(-1, -1)': SIZE = wx.Size(eval(self.config.Read(u'WinSize'))) else: SIZE = (wx.GetDisplaySize()[0],wx.GetDisplaySize()[1]-30) frame.SetSize(SIZE) frame.SetPosition((0,0)) #frame.EnableFullScreenView(True) frame.Show() return True def GetConfig(self): config = wx.FileConfig(appName='Temp5',localFilename=CONFIG_PATH+'option.ini',globalFilename=CONFIG_PATH+'system1.ini') return config def UpdateLanguage(self, lang): supportedLangs = {"English": wx.LANGUAGE_ENGLISH, "Farsi": wx.LANGUAGE_FARSI, "French": wx.LANGUAGE_FRENCH, "German": wx.LANGUAGE_GERMAN, "Spanish": wx.LANGUAGE_SPANISH, "Turkish": wx.LANGUAGE_TURKISH, } if self.locale: assert sys.getrefcount(self.locale) <= 2 del self.locale if supportedLangs[lang]: self.locale = wx.Locale(supportedLangs[lang]) if self.locale.IsOk(): self.locale.AddCatalog("Temp5fa") # self.locale.AddCatalog("Temp5fr") self.locale.AddCatalog("Temp5de") # self.locale.AddCatalog("Temp5sp") # self.locale.AddCatalog("Temp5tr") else: self.locale = None else: wx.MessageBox("Language support not found please sending an email to us for update new language!") def main(argv): #print(argv) if len(argv) > 0: if argv[0] == '-c': app = mainApp() else: app = mainApp(redirect=True) app.MainLoop() if __name__ == '__main__': main(sys.argv[1:])
PypiClean
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/tree/Building.py
import marshal import os from nuitka import ( ModuleRegistry, Options, OutputDirectories, SourceCodeReferences, ) from nuitka.__past__ import long, unicode from nuitka.BytecodeCaching import ( getCachedImportedModuleUsageAttempts, hasCachedImportedModuleUsageAttempts, ) from nuitka.Bytecodes import loadCodeObjectData from nuitka.containers.OrderedSets import OrderedSet from nuitka.Errors import CodeTooComplexCode from nuitka.freezer.ImportDetection import ( detectEarlyImports, detectStdlibAutoInclusionModules, ) from nuitka.importing import Importing from nuitka.importing.ImportCache import addImportedModule from nuitka.importing.PreloadedPackages import getPthImportedPackages from nuitka.importing.StandardLibrary import isStandardLibraryPath from nuitka.nodes.AttributeNodes import ( StatementAssignmentAttribute, makeExpressionAttributeLookup, ) from nuitka.nodes.BuiltinFormatNodes import ( ExpressionBuiltinAscii, ExpressionBuiltinFormat, ) from nuitka.nodes.BuiltinRefNodes import quick_names from nuitka.nodes.BuiltinTypeNodes import ExpressionBuiltinStrP3 from nuitka.nodes.ConditionalNodes import ( ExpressionConditional, makeStatementConditional, ) from nuitka.nodes.ConstantRefNodes import ( ExpressionConstantEllipsisRef, ExpressionConstantNoneRef, makeConstantRefNode, ) from nuitka.nodes.ExceptionNodes import ( StatementRaiseException, StatementReraiseException, ) from nuitka.nodes.FutureSpecs import FutureSpec from nuitka.nodes.GeneratorNodes import StatementGeneratorReturn from nuitka.nodes.ImportNodes import ( isHardModuleWithoutSideEffect, makeExpressionImportModuleFixed, ) from nuitka.nodes.LoopNodes import StatementLoopBreak, StatementLoopContinue from nuitka.nodes.ModuleAttributeNodes import ( ExpressionModuleAttributeFileRef, ExpressionModuleAttributeSpecRef, ) from nuitka.nodes.ModuleNodes import ( CompiledPythonModule, CompiledPythonPackage, PythonExtensionModule, PythonMainModule, makeUncompiledPythonModule, ) from nuitka.nodes.NodeMakingHelpers import ( makeRaiseExceptionStatementFromInstance, ) from nuitka.nodes.OperatorNodes import makeBinaryOperationNode from nuitka.nodes.OperatorNodesUnary import makeExpressionOperationUnary from nuitka.nodes.ReturnNodes import makeStatementReturn from nuitka.nodes.SliceNodes import makeExpressionBuiltinSlice from nuitka.nodes.StatementNodes import StatementExpressionOnly from nuitka.nodes.StringConcatenationNodes import ExpressionStringConcatenation from nuitka.nodes.VariableNameNodes import ( ExpressionVariableNameRef, StatementAssignmentVariableName, ) from nuitka.optimizations.BytecodeDemotion import demoteSourceCodeToBytecode from nuitka.Options import shallWarnUnusualCode from nuitka.pgo.PGO import decideCompilationFromPGO from nuitka.plugins.Plugins import Plugins from nuitka.PythonVersions import python_version from nuitka.Tracing import ( general, optimization_logger, plugins_logger, recursion_logger, unusual_logger, ) from nuitka.utils import MemoryUsage from nuitka.utils.ModuleNames import ModuleName from . import SyntaxErrors from .ReformulationAssertStatements import buildAssertNode from .ReformulationAssignmentStatements import ( buildAnnAssignNode, buildAssignNode, buildDeleteNode, buildInplaceAssignNode, buildNamedExprNode, ) from .ReformulationBooleanExpressions import buildBoolOpNode from .ReformulationCallExpressions import buildCallNode from .ReformulationClasses import buildClassNode from .ReformulationComparisonExpressions import buildComparisonNode from .ReformulationContractionExpressions import ( buildDictContractionNode, buildGeneratorExpressionNode, buildListContractionNode, buildSetContractionNode, ) from .ReformulationDictionaryCreation import buildDictionaryNode from .ReformulationExecStatements import buildExecNode from .ReformulationForLoopStatements import ( buildAsyncForLoopNode, buildForLoopNode, ) from .ReformulationFunctionStatements import ( buildAsyncFunctionNode, buildFunctionNode, ) from .ReformulationImportStatements import ( buildImportFromNode, buildImportModulesNode, checkFutureImportsOnlyAtStart, getFutureSpec, popFutureSpec, pushFutureSpec, ) from .ReformulationLambdaExpressions import buildLambdaNode from .ReformulationMatchStatements import buildMatchNode from .ReformulationNamespacePackages import ( createNamespacePackage, createPathAssignment, ) from .ReformulationPrintStatements import buildPrintNode from .ReformulationSequenceCreation import ( buildListCreationNode, buildSetCreationNode, buildTupleCreationNode, ) from .ReformulationSubscriptExpressions import buildSubscriptNode from .ReformulationTryExceptStatements import ( buildTryExceptionNode, buildTryStarExceptionNode, ) from .ReformulationTryFinallyStatements import buildTryFinallyNode from .ReformulationWhileLoopStatements import buildWhileLoopNode from .ReformulationWithStatements import buildAsyncWithNode, buildWithNode from .ReformulationYieldExpressions import ( buildAwaitNode, buildYieldFromNode, buildYieldNode, ) from .SourceHandling import ( checkPythonVersionFromCode, getSourceCodeDiff, readSourceCodeFromFilenameWithInformation, ) from .TreeHelpers import ( buildNode, buildNodeTuple, buildStatementsNode, extractDocFromBody, getBuildContext, getKind, makeModuleFrame, makeStatementsSequence, makeStatementsSequenceFromStatement, mangleName, mergeStatements, parseSourceCodeToAst, setBuildingDispatchers, ) from .VariableClosure import completeVariableClosures if str is not bytes: def buildVariableReferenceNode(provider, node, source_ref): # Shortcut for Python3, which gives syntax errors for assigning these. if node.id in quick_names: return makeConstantRefNode( constant=quick_names[node.id], source_ref=source_ref ) return ExpressionVariableNameRef( provider=provider, variable_name=mangleName(node.id, provider), source_ref=source_ref, ) else: def buildVariableReferenceNode(provider, node, source_ref): return ExpressionVariableNameRef( provider=provider, variable_name=mangleName(node.id, provider), source_ref=source_ref, ) # Python3.4 or higher, True and False, are not given as variables anymore. # Python3.8, all kinds of constants are like this. def buildNamedConstantNode(node, source_ref): return makeConstantRefNode( constant=node.value, source_ref=source_ref, user_provided=True ) def buildConditionNode(provider, node, source_ref): # Conditional statements may have one or two branches. We will never see an # "elif", because that's already dealt with by module "ast", which turns it # into nested conditional statements. spell-checker: ignore orelse return makeStatementConditional( condition=buildNode(provider, node.test, source_ref), yes_branch=buildStatementsNode( provider=provider, nodes=node.body, source_ref=source_ref ), no_branch=buildStatementsNode( provider=provider, nodes=node.orelse if node.orelse else None, source_ref=source_ref, ), source_ref=source_ref, ) def buildTryFinallyNode2(provider, node, source_ref): # Try/finally node statements of old style. return buildTryFinallyNode( provider=provider, build_tried=lambda: buildStatementsNode( provider=provider, nodes=node.body, source_ref=source_ref ), node=node, source_ref=source_ref, ) def buildTryNode(provider, node, source_ref): # Note: This variant is used for Python3.3 or higher only, older stuff uses # the above ones, this one merges try/except with try/finally in the # "ast". We split it up again, as it's logically separated of course. # Shortcut missing try/finally. if not node.handlers: return buildTryFinallyNode2(provider, node, source_ref) if not node.finalbody: # spell-checker: ignore finalbody return buildTryExceptionNode( provider=provider, node=node, source_ref=source_ref ) return buildTryFinallyNode( provider=provider, build_tried=lambda: makeStatementsSequence( statements=mergeStatements( ( buildTryExceptionNode( provider=provider, node=node, source_ref=source_ref ), ), allow_none=True, ), allow_none=True, source_ref=source_ref, ), node=node, source_ref=source_ref, ) def buildTryStarNode(provider, node, source_ref): # Note: This variant is used for Python3.11 or higher only, where an exception # group is caught. Mixing groups and non-group catches is not allowed. # Without handlers, this would not be used, but instead "Try" would be used, # but assert against it. assert node.handlers if not node.finalbody: # spell-checker: ignore finalbody return buildTryStarExceptionNode( provider=provider, node=node, source_ref=source_ref ) return buildTryFinallyNode( provider=provider, build_tried=lambda: makeStatementsSequence( statements=mergeStatements( ( buildTryStarExceptionNode( provider=provider, node=node, source_ref=source_ref ), ), allow_none=True, ), allow_none=True, source_ref=source_ref, ), node=node, source_ref=source_ref, ) def buildRaiseNode(provider, node, source_ref): # Raise statements. Under Python2 they may have type, value and traceback # attached, for Python3, you can only give type (actually value) and cause. # spell-checker: ignore tback if python_version < 0x300: exception_type = buildNode(provider, node.type, source_ref, allow_none=True) exception_value = buildNode(provider, node.inst, source_ref, allow_none=True) exception_trace = buildNode(provider, node.tback, source_ref, allow_none=True) exception_cause = None else: exception_type = buildNode(provider, node.exc, source_ref, allow_none=True) exception_value = None exception_trace = None exception_cause = buildNode(provider, node.cause, source_ref, allow_none=True) if exception_type is None: assert exception_value is None assert exception_trace is None assert exception_cause is None result = StatementReraiseException(source_ref=source_ref) else: result = StatementRaiseException( exception_type=exception_type, exception_value=exception_value, exception_trace=exception_trace, exception_cause=exception_cause, source_ref=source_ref, ) if exception_cause is not None: result.setCompatibleSourceReference( source_ref=exception_cause.getCompatibleSourceReference() ) elif exception_trace is not None: result.setCompatibleSourceReference( source_ref=exception_trace.getCompatibleSourceReference() ) elif exception_value is not None: result.setCompatibleSourceReference( source_ref=exception_value.getCompatibleSourceReference() ) elif exception_type is not None: result.setCompatibleSourceReference( source_ref=exception_type.getCompatibleSourceReference() ) return result def handleGlobalDeclarationNode(provider, node, source_ref): # On the module level, there is nothing to do. if provider.isCompiledPythonModule(): if shallWarnUnusualCode(): unusual_logger.warning( "%s: Using 'global' statement on module level has no effect." % source_ref.getAsString(), ) return None # Need to catch the error of declaring a parameter variable as global # ourselves here. The AST parsing doesn't catch it, so we check here. if provider.isExpressionFunctionBody(): parameters = provider.getParameters() for variable_name in node.names: if variable_name in parameters.getParameterNames(): SyntaxErrors.raiseSyntaxError( "name '%s' is %s and global" % ( variable_name, "local" if python_version < 0x300 else "parameter", ), source_ref.atColumnNumber(node.col_offset), ) # The module the "global" statement refers to. module = provider.getParentModule() # Can give multiple names. for variable_name in node.names: closure_variable = None # Re-use already taken global variables, in order to avoid creating yet # another instance, esp. as the indications could then potentially not # be shared. if provider.hasTakenVariable(variable_name): closure_variable = provider.getTakenVariable(variable_name) # Only global variables count. Could have a closure reference to # a location of a parent function here. if not closure_variable.isModuleVariable(): closure_variable = None if closure_variable is None: module_variable = module.getVariableForAssignment( variable_name=variable_name ) closure_variable = provider.addClosureVariable(variable=module_variable) assert closure_variable.isModuleVariable() # Special case, since Python 3.5 it is allowed to use global on the "__class__" # variable as well, but it's not changing visibility of implicit "__class__" of # functions, and as such it will just not be registered. if ( provider.isExpressionClassBodyBase() and closure_variable.getName() == "__class__" ): if python_version < 0x340: SyntaxErrors.raiseSyntaxError( "cannot make __class__ global", source_ref ) else: provider.getLocalsScope().registerClosureVariable( variable=closure_variable, ) # Drop this, not really part of our tree. return None def handleNonlocalDeclarationNode(provider, node, source_ref): # Need to catch the error of declaring a parameter variable as global # ourselves here. The AST parsing doesn't catch it, but we can do it here. parameter_provider = provider while ( parameter_provider.isExpressionGeneratorObjectBody() or parameter_provider.isExpressionCoroutineObjectBody() or parameter_provider.isExpressionAsyncgenObjectBody() ): parameter_provider = parameter_provider.getParentVariableProvider() if parameter_provider.isExpressionClassBodyBase(): parameter_names = () else: parameter_names = parameter_provider.getParameters().getParameterNames() for variable_name in node.names: if variable_name in parameter_names: SyntaxErrors.raiseSyntaxError( "name '%s' is parameter and nonlocal" % (variable_name), source_ref.atColumnNumber(node.col_offset), ) provider.addNonlocalsDeclaration( names=tuple(node.names), user_provided=True, source_ref=source_ref.atColumnNumber(node.col_offset), ) # Drop this, not really part of our tree. return None def buildStringNode(node, source_ref): assert type(node.s) in (str, unicode) return makeConstantRefNode( constant=node.s, source_ref=source_ref, user_provided=True ) def buildNumberNode(node, source_ref): assert type(node.n) in (int, long, float, complex), type(node.n) return makeConstantRefNode( constant=node.n, source_ref=source_ref, user_provided=True ) def buildBytesNode(node, source_ref): return makeConstantRefNode( constant=node.s, source_ref=source_ref, user_provided=True ) def buildEllipsisNode(source_ref): return ExpressionConstantEllipsisRef(source_ref=source_ref) def buildStatementLoopContinue(node, source_ref): source_ref = source_ref.atColumnNumber(node.col_offset) # Python forbids this, although technically it's probably not much of # an issue. if getBuildContext() == "finally" and python_version < 0x380: SyntaxErrors.raiseSyntaxError( "'continue' not supported inside 'finally' clause", source_ref ) return StatementLoopContinue(source_ref=source_ref) def buildStatementLoopBreak(provider, node, source_ref): # A bit unusual, we need the provider, but not the node, # pylint: disable=unused-argument return StatementLoopBreak(source_ref=source_ref.atColumnNumber(node.col_offset)) def buildAttributeNode(provider, node, source_ref): return makeExpressionAttributeLookup( expression=buildNode(provider, node.value, source_ref), attribute_name=mangleName(node.attr, provider), source_ref=source_ref, ) def buildReturnNode(provider, node, source_ref): if provider.isExpressionClassBodyBase() or provider.isCompiledPythonModule(): SyntaxErrors.raiseSyntaxError( "'return' outside function", source_ref.atColumnNumber(node.col_offset) ) expression = buildNode(provider, node.value, source_ref, allow_none=True) if provider.isExpressionGeneratorObjectBody(): if expression is not None and python_version < 0x300: SyntaxErrors.raiseSyntaxError( "'return' with argument inside generator", source_ref.atColumnNumber(node.col_offset), ) if provider.isExpressionAsyncgenObjectBody(): if expression is not None: SyntaxErrors.raiseSyntaxError( "'return' with value in async generator", source_ref.atColumnNumber(node.col_offset), ) if ( provider.isExpressionGeneratorObjectBody() or provider.isExpressionAsyncgenObjectBody() ): if expression is None: expression = ExpressionConstantNoneRef(source_ref=source_ref) return StatementGeneratorReturn(expression=expression, source_ref=source_ref) else: return makeStatementReturn(expression=expression, source_ref=source_ref) def buildExprOnlyNode(provider, node, source_ref): result = StatementExpressionOnly( expression=buildNode(provider, node.value, source_ref), source_ref=source_ref ) result.setCompatibleSourceReference( result.subnode_expression.getCompatibleSourceReference() ) return result def buildUnaryOpNode(provider, node, source_ref): operator = getKind(node.op) # Delegate this one to boolean operation code. if operator == "Not": return buildBoolOpNode(provider=provider, node=node, source_ref=source_ref) operand = buildNode(provider, node.operand, source_ref) return makeExpressionOperationUnary( operator=operator, operand=operand, source_ref=source_ref ) def buildBinaryOpNode(provider, node, source_ref): operator = getKind(node.op) if operator == "Div": operator = "TrueDiv" if getFutureSpec().isFutureDivision() else "OldDiv" left = buildNode(provider, node.left, source_ref) right = buildNode(provider, node.right, source_ref) result = makeBinaryOperationNode( operator=operator, left=left, right=right, source_ref=source_ref ) result.setCompatibleSourceReference(source_ref=right.getCompatibleSourceReference()) return result def buildReprNode(provider, node, source_ref): return makeExpressionOperationUnary( operator="Repr", operand=buildNode(provider, node.value, source_ref), source_ref=source_ref, ) def buildConditionalExpressionNode(provider, node, source_ref): return ExpressionConditional( condition=buildNode(provider, node.test, source_ref), expression_yes=buildNode(provider, node.body, source_ref), expression_no=buildNode(provider, node.orelse, source_ref), source_ref=source_ref, ) def buildFormattedValueNode(provider, node, source_ref): value = buildNode(provider, node.value, source_ref) conversion = node.conversion % 4 if node.conversion > 0 else 0 if conversion == 0: pass elif conversion == 3: # TODO: We might start using this for Python2 too. assert str is not bytes value = ExpressionBuiltinStrP3( value=value, encoding=None, errors=None, source_ref=source_ref ) elif conversion == 2: value = makeExpressionOperationUnary( operator="Repr", operand=value, source_ref=source_ref ) elif conversion == 1: value = ExpressionBuiltinAscii(value=value, source_ref=source_ref) else: assert False, conversion return ExpressionBuiltinFormat( value=value, format_spec=buildNode(provider, node.format_spec, source_ref, allow_none=True), source_ref=source_ref, ) def buildJoinedStrNode(provider, node, source_ref): if node.values: return ExpressionStringConcatenation( values=buildNodeTuple(provider, node.values, source_ref), source_ref=source_ref, ) else: return makeConstantRefNode(constant="", source_ref=source_ref) def buildSliceNode(provider, node, source_ref): """Python3.9 or higher, slice notations.""" return makeExpressionBuiltinSlice( start=buildNode(provider, node.lower, source_ref, allow_none=True), stop=buildNode(provider, node.upper, source_ref, allow_none=True), step=buildNode(provider, node.step, source_ref, allow_none=True), source_ref=source_ref, ) setBuildingDispatchers( path_args3={ "Name": buildVariableReferenceNode, "Assign": buildAssignNode, "AnnAssign": buildAnnAssignNode, "Delete": buildDeleteNode, "Lambda": buildLambdaNode, "GeneratorExp": buildGeneratorExpressionNode, "If": buildConditionNode, "While": buildWhileLoopNode, "For": buildForLoopNode, "AsyncFor": buildAsyncForLoopNode, "Compare": buildComparisonNode, "ListComp": buildListContractionNode, "DictComp": buildDictContractionNode, "SetComp": buildSetContractionNode, "Dict": buildDictionaryNode, "Set": buildSetCreationNode, "Tuple": buildTupleCreationNode, "List": buildListCreationNode, "Global": handleGlobalDeclarationNode, "Nonlocal": handleNonlocalDeclarationNode, "TryExcept": buildTryExceptionNode, "TryFinally": buildTryFinallyNode2, "Try": buildTryNode, "Raise": buildRaiseNode, # Python3.11 exception group catching "TryStar": buildTryStarNode, "Import": buildImportModulesNode, "ImportFrom": buildImportFromNode, "Assert": buildAssertNode, "Exec": buildExecNode, "With": buildWithNode, "AsyncWith": buildAsyncWithNode, "FunctionDef": buildFunctionNode, "AsyncFunctionDef": buildAsyncFunctionNode, "Await": buildAwaitNode, "ClassDef": buildClassNode, "Print": buildPrintNode, "Call": buildCallNode, "Subscript": buildSubscriptNode, "BoolOp": buildBoolOpNode, "Attribute": buildAttributeNode, "Return": buildReturnNode, "Yield": buildYieldNode, "YieldFrom": buildYieldFromNode, "Expr": buildExprOnlyNode, "UnaryOp": buildUnaryOpNode, "BinOp": buildBinaryOpNode, "Repr": buildReprNode, "AugAssign": buildInplaceAssignNode, "IfExp": buildConditionalExpressionNode, "Break": buildStatementLoopBreak, "JoinedStr": buildJoinedStrNode, "FormattedValue": buildFormattedValueNode, "NamedExpr": buildNamedExprNode, "Slice": buildSliceNode, "Match": buildMatchNode, }, path_args2={ "Constant": buildNamedConstantNode, # Python3.8 "NameConstant": buildNamedConstantNode, # Python3.8 or below "Str": buildStringNode, "Num": buildNumberNode, "Bytes": buildBytesNode, "Continue": buildStatementLoopContinue, }, path_args1={"Ellipsis": buildEllipsisNode}, ) def buildParseTree(provider, ast_tree, source_ref, is_module, is_main): # There are a bunch of branches here, mostly to deal with version # differences for module default variables. pylint: disable=too-many-branches # Maybe one day, we do exec inlining again, that is what this is for, # then is_module won't be True, for now it always is. pushFutureSpec() if is_module: provider.setFutureSpec(getFutureSpec()) body, doc = extractDocFromBody(ast_tree) if is_module and is_main and python_version >= 0x360: provider.markAsNeedsAnnotationsDictionary() try: result = buildStatementsNode( provider=provider, nodes=body, source_ref=source_ref ) except RuntimeError as e: if "maximum recursion depth" in e.args[0]: raise CodeTooComplexCode( provider.getFullName(), provider.getCompileTimeFilename() ) # After building, we can verify that all future statements were where they # belong, namely at the start of the module. checkFutureImportsOnlyAtStart(body) internal_source_ref = source_ref.atInternal() statements = [] if is_module: # Add import of "site" module of main programs visibly in the node tree, # so recursion and optimization can pick it up, checking its effects. if is_main and not Options.hasPythonFlagNoSite(): statements.append( StatementExpressionOnly( expression=makeExpressionImportModuleFixed( module_name="site", source_ref=source_ref ), source_ref=source_ref, ) ) for path_imported_name in getPthImportedPackages(): if isHardModuleWithoutSideEffect(path_imported_name): continue statements.append( StatementExpressionOnly( expression=makeExpressionImportModuleFixed( module_name=path_imported_name, source_ref=source_ref ), source_ref=source_ref, ) ) statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__doc__", source=makeConstantRefNode( constant=doc, source_ref=internal_source_ref, user_provided=True ), source_ref=internal_source_ref, ) ) statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__file__", source=ExpressionModuleAttributeFileRef( variable=provider.getVariableForReference("__file__"), source_ref=internal_source_ref, ), source_ref=internal_source_ref, ) ) if provider.isCompiledPythonPackage(): # This assigns "__path__" value. statements.append(createPathAssignment(provider, internal_source_ref)) if python_version >= 0x340 and not is_main: statements += ( StatementAssignmentAttribute( expression=ExpressionModuleAttributeSpecRef( variable=provider.getVariableForReference("__spec__"), source_ref=internal_source_ref, ), attribute_name="origin", source=ExpressionModuleAttributeFileRef( variable=provider.getVariableForReference("__file__"), source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), StatementAssignmentAttribute( expression=ExpressionModuleAttributeSpecRef( variable=provider.getVariableForReference("__spec__"), source_ref=internal_source_ref, ), attribute_name="has_location", source=makeConstantRefNode(True, internal_source_ref), source_ref=internal_source_ref, ), ) if provider.isCompiledPythonPackage(): statements.append( StatementAssignmentAttribute( expression=ExpressionModuleAttributeSpecRef( variable=provider.getVariableForReference("__spec__"), source_ref=internal_source_ref, ), attribute_name="submodule_search_locations", source=ExpressionVariableNameRef( provider=provider, variable_name="__path__", source_ref=internal_source_ref, ), source_ref=internal_source_ref, ) ) if python_version >= 0x300: statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__cached__", source=ExpressionConstantNoneRef(source_ref=internal_source_ref), source_ref=internal_source_ref, ) ) needs__initializing__ = ( not provider.isMainModule() and 0x300 <= python_version < 0x340 ) if needs__initializing__: # Set "__initializing__" at the beginning to True statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__initializing__", source=makeConstantRefNode( constant=True, source_ref=internal_source_ref, user_provided=True ), source_ref=internal_source_ref, ) ) if provider.needsAnnotationsDictionary(): # Set "__annotations__" on module level to {} statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__annotations__", source=makeConstantRefNode( constant={}, source_ref=internal_source_ref, user_provided=True ), source_ref=internal_source_ref, ) ) # Now the module body if there is any at all. if result is not None: statements.extend(result.subnode_statements) if needs__initializing__: # Set "__initializing__" at the end to False statements.append( StatementAssignmentVariableName( provider=provider, variable_name="__initializing__", source=makeConstantRefNode( constant=False, source_ref=internal_source_ref, user_provided=True ), source_ref=internal_source_ref, ) ) if is_module: result = makeModuleFrame( module=provider, statements=statements, source_ref=source_ref ) popFutureSpec() return result else: assert False def decideCompilationMode(is_top, module_name, for_pgo): """Decide the compilation mode for a module. module_name - The module to decide compilation mode for. for_pgo - consider PGO information or not """ # Technically required modules must be bytecode if module_name in detectEarlyImports(): return "bytecode" result = Plugins.decideCompilation(module_name) # Cannot change mode of __main__ to bytecode, that is not going # to work currently. if result == "bytecode" and is_top: plugins_logger.warning( """\ Ignoring plugin decision to compile top level package '%s' \ as bytecode, the extension module entry point is technically \ required to compiled.""" % module_name ) result = "compiled" # Include all of standard library as bytecode, for now. We need to identify # which ones really need that. if not is_top: module_filename = Importing.locateModule( module_name=module_name, parent_package=None, level=0 )[1] if module_filename is not None and isStandardLibraryPath(module_filename): result = "bytecode" # Plugins need to win over PGO, as they might know it better if result is None and not for_pgo: result = decideCompilationFromPGO(module_name=module_name) # Default if neither plugins nor PGO have expressed an opinion if result is None: if module_name in detectStdlibAutoInclusionModules(): result = "bytecode" else: result = "compiled" return result def _loadUncompiledModuleFromCache( module_name, reason, is_package, source_code, source_ref ): result = makeUncompiledPythonModule( module_name=module_name, reason=reason, filename=source_ref.getFilename(), bytecode=demoteSourceCodeToBytecode( module_name=module_name, source_code=source_code, filename=source_ref.getFilename(), ), technical=module_name in detectEarlyImports(), is_package=is_package, ) used_modules = OrderedSet() used_modules = getCachedImportedModuleUsageAttempts( module_name=module_name, source_code=source_code, source_ref=source_ref ) # assert not is_package, (module_name, used_modules, result, result.getCompileTimeFilename()) result.setUsedModules(used_modules) return result def _createModule( module_name, module_kind, reason, source_code, source_ref, is_namespace, is_package, is_top, is_main, main_added, ): if module_kind == "extension": result = PythonExtensionModule( module_name=module_name, reason=reason, technical=module_name in detectEarlyImports(), source_ref=source_ref, ) elif is_main: assert reason == "main", reason result = PythonMainModule( main_added=main_added, module_name=module_name, mode=decideCompilationMode( is_top=is_top, module_name=module_name, for_pgo=False ), future_spec=None, source_ref=source_ref, ) checkPythonVersionFromCode(source_code) elif is_namespace: result = createNamespacePackage( module_name=module_name, reason=reason, is_top=is_top, source_ref=source_ref, ) else: mode = decideCompilationMode( is_top=is_top, module_name=module_name, for_pgo=False ) if ( mode == "bytecode" and not is_top and not Options.shallDisableBytecodeCacheUsage() and hasCachedImportedModuleUsageAttempts( module_name=module_name, source_code=source_code, source_ref=source_ref ) ): result = _loadUncompiledModuleFromCache( module_name=module_name, reason=reason, is_package=is_package, source_code=source_code, source_ref=source_ref, ) # Not used anymore source_code = None else: if is_package: result = CompiledPythonPackage( module_name=module_name, reason=reason, is_top=is_top, mode=mode, future_spec=None, source_ref=source_ref, ) else: result = CompiledPythonModule( module_name=module_name, reason=reason, is_top=is_top, mode=mode, future_spec=None, source_ref=source_ref, ) return result def createModuleTree(module, source_ref, ast_tree, is_main): if Options.isShowMemory(): memory_watch = MemoryUsage.MemoryWatch() module_body = buildParseTree( provider=module, ast_tree=ast_tree, source_ref=source_ref, is_module=True, is_main=is_main, ) if module_body.isStatementsFrame(): module_body = makeStatementsSequenceFromStatement(statement=module_body) module.setChildBody(module_body) completeVariableClosures(module) if Options.isShowMemory(): memory_watch.finish( "Memory usage changed loading module '%s'" % module.getFullName() ) def buildMainModuleTree(filename, source_code): # Detect to be frozen modules if any, so we can consider to not follow # to them. if Options.shallMakeModule(): module_name = Importing.getModuleNameAndKindFromFilename(filename)[0] if module_name is None: general.sysexit( "Error, filename '%s' suffix does not appear to be Python module code." % filename ) else: # TODO: Doesn't work for deeply nested packages at all. if Options.hasPythonFlagPackageMode(): module_name = ModuleName(os.path.basename(filename) + ".__main__") else: module_name = ModuleName("__main__") module = buildModule( module_name=module_name, reason="main", module_filename=filename, source_code=source_code, is_top=True, is_main=not Options.shallMakeModule(), module_kind="py", is_fake=source_code is not None, hide_syntax_error=False, ) if Options.isStandaloneMode(): module.setStandardLibraryModules( early_module_names=detectEarlyImports(), stdlib_modules_names=detectStdlibAutoInclusionModules(), ) # Main modules do not get added to the import cache, but plugins get to see it. if module.isMainModule(): Plugins.onModuleDiscovered(module) else: addImportedModule(imported_module=module) return module def _makeModuleBodyFromSyntaxError(exc, module_name, reason, module_filename): if module_filename not in Importing.warned_about: Importing.warned_about.add(module_filename) recursion_logger.warning( """\ Cannot follow import to module '%s' because of '%s'.""" % (module_name, exc.__class__.__name__) ) source_ref = SourceCodeReferences.fromFilename(filename=module_filename) module = CompiledPythonModule( module_name=module_name, reason=reason, is_top=False, mode="compiled", future_spec=FutureSpec(), source_ref=source_ref, ) module_body = makeModuleFrame( module=module, statements=( makeRaiseExceptionStatementFromInstance( source_ref=source_ref, exception=exc ), ), source_ref=source_ref, ) module_body = makeStatementsSequenceFromStatement(statement=module_body) module.setChildBody(module_body) return module def _makeModuleBodyTooComplex( module_name, reason, module_filename, source_code, is_package ): if module_filename not in Importing.warned_about: Importing.warned_about.add(module_filename) recursion_logger.info( """\ Cannot compile module '%s' because its code is too complex, included as bytecode.""" % module_name ) return makeUncompiledPythonModule( module_name=module_name, reason=reason, filename=module_filename, bytecode=marshal.dumps( compile(source_code, module_filename, "exec", dont_inherit=True) ), is_package=is_package, technical=module_name in detectEarlyImports(), ) def buildModule( module_name, module_kind, module_filename, reason, source_code, is_top, is_main, is_fake, hide_syntax_error, ): # Many details to deal with, # pylint: disable=too-many-branches,too-many-locals,too-many-statements ( main_added, is_package, is_namespace, source_ref, source_filename, ) = Importing.decideModuleSourceRef( filename=module_filename, module_name=module_name, is_main=is_main, is_fake=is_fake, logger=general, ) if Options.hasPythonFlagPackageMode(): if is_top and Options.shallMakeModule(): optimization_logger.warning( "Python flag -m (package_mode) has no effect in module mode, it's only for executables." ) elif is_main and not main_added: optimization_logger.warning( "Python flag -m (package_mode) only works on packages with '__main__.py'." ) # Handle bytecode module case immediately. if module_kind == "pyc": return makeUncompiledPythonModule( module_name=module_name, reason=reason, filename=module_filename, bytecode=loadCodeObjectData(module_filename), is_package=is_package, technical=module_name in detectEarlyImports(), ) # Read source code if necessary. Might give a SyntaxError due to not being proper # encoded source. if source_filename is not None and not is_namespace and module_kind == "py": # For fake modules, source is provided directly. original_source_code = None contributing_plugins = () if source_code is None: try: ( source_code, original_source_code, contributing_plugins, ) = readSourceCodeFromFilenameWithInformation( module_name=module_name, source_filename=source_filename ) except SyntaxError as e: # Avoid hiding our own syntax errors. if not hasattr(e, "generated_by_nuitka"): raise # Do not hide SyntaxError in main module. if not hide_syntax_error: raise return _makeModuleBodyFromSyntaxError( exc=e, module_name=module_name, reason=reason, module_filename=module_filename, ) try: ast_tree = parseSourceCodeToAst( source_code=source_code, module_name=module_name, filename=source_filename, line_offset=0, ) except (SyntaxError, IndentationError) as e: # Do not hide SyntaxError if asked not to. if not hide_syntax_error: raise if original_source_code is not None: try: parseSourceCodeToAst( source_code=original_source_code, module_name=module_name, filename=source_filename, line_offset=0, ) except (SyntaxError, IndentationError): # Also an exception without the plugins, that is OK pass else: source_diff = getSourceCodeDiff(original_source_code, source_code) for line in source_diff: plugins_logger.warning(line) if len(contributing_plugins) == 1: next(iter(contributing_plugins)).sysexit( "Making changes to '%s' that cause SyntaxError '%s'" % (module_name, e) ) else: plugins_logger.sysexit( "One of the plugins '%s' is making changes to '%s' that cause SyntaxError '%s'" % (",".join(contributing_plugins), module_name, e) ) return _makeModuleBodyFromSyntaxError( exc=e, module_name=module_name, reason=reason, module_filename=module_filename, ) except CodeTooComplexCode: # Do not hide CodeTooComplexCode in main module. if is_main: raise return _makeModuleBodyTooComplex( module_name=module_name, reason=reason, module_filename=module_filename, source_code=source_code, is_package=is_package, ) else: ast_tree = None source_code = None module = _createModule( module_name=module_name, module_kind=module_kind, reason=reason, source_code=source_code, source_ref=source_ref, is_top=is_top, is_main=is_main, is_namespace=is_namespace, is_package=is_package, main_added=main_added, ) if is_top: ModuleRegistry.addRootModule(module) OutputDirectories.setMainModule(module) if module.isCompiledPythonModule() and source_code is not None: try: createModuleTree( module=module, source_ref=source_ref, ast_tree=ast_tree, is_main=is_main, ) except CodeTooComplexCode: # Do not hide CodeTooComplexCode in main module. if is_main or is_top: raise return _makeModuleBodyTooComplex( module_name=module_name, reason=reason, module_filename=module_filename, source_code=source_code, is_package=is_package, ) return module
PypiClean
/Ibid-0.1.1.tar.gz/Ibid-0.1.1/docs/contributing.rst
.. _contributing: Contributing ============ .. _bug_reporting: Bug Reporting ------------- Please report any bugs in `the Launchpad tracker <https://bugs.launchpad.net/ibid>`_. (Oh, and check for existing ones that match your problem first.) Good bug reports describe the problem, include the message to the bot that caused the bug, and any logging information / exceptions from ``ibid.log``. Submitting Patches ------------------ .. highlight:: text Want to go one step further, and fix your bug or add a new feature. We welcome contributions from everyone. The best way to get a patch merged quickly is to follow the same development process as the Ibid developers: #. If you don't have one, `create a Launchpad account <https://launchpad.net/+login>`_ and configure Bazaar to use it. #. If there isn't a bug in the tracker for this change, file one. It motivates the change. #. Mark the bug as *In Progress*, assigned to you. #. Take a branch of Ibid trunk (See :ref:`bzr-guide` if you are new to Bazaar):: user@box $ bzr branch lp:ibid description-1234 ``description`` is a two or three-word hyphen-separated description of the branch, ``1234`` is the Launchpad bug number. #. Fix your bug in this branch, following the :ref:`style-guidelines`. See also :ref:`dev-instance`. #. Link the commit that fixes the bug to the launchpad bug:: user@box $ bzr commit --fixes lp:1234 #. Test that the fix works as expected and doesn't introduce any new bugs. ``pyflakes`` can find syntax errors you missed. #. Run the test-cases:: user@box $ trial ibid #. Push the branch to Launchpad:: user@box $ bzr push lp:~yourname/ibid/description-1234 #. Find the branch `on Launchpad <https://code.launchpad.net/ibid>`_ and propose it for merging into the Ibid trunk. #. Proposals require approvals by a member of `ibid-core <https://launchpad.net/~ibid-core>`_ and two members of `ibid-dev <https://launchpad.net/~ibid-dev>`_ (or simply two members of ibid-core). Please join ibid-dev and help out with review. .. _style-guidelines: Style Guidelines ---------------- Writing code that matches the Ibid style will lead to a consistent code base thus happy developers. * Follow `PEP 8 <http://www.python.org/dev/peps/pep-0008>`_, where it makes sense. * 4 space indentation. * Single quotes are preferred to double, where sensible. * Almost all of Ibid should be compatible with Python 2.4+ (but not 3). Compatibility functions, imports, and libraries can be found in :mod:`ibid.compat`. * There is more on good style in `Code Like a Pythonista: Idiomatic Python <http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html>`_. Naming Conventions ^^^^^^^^^^^^^^^^^^ * Features should either go into an existing plugin, or if large enough into a plugin of the same name as the feature (singular). * Database table names are plural. Sources ^^^^^^^ * Follow `Twisted style <http://twistedmatrix.com/trac/browser/trunk/doc/core/development/policy/coding-standard.xhtml?format=raw>`_. Plugins ^^^^^^^ * All features should have help and usage strings. * Try to code for the general case, rather than your specific problem. ``Option`` configurables are handy for this, but don't bother making things that will never be changed configurable (i.e. static API endpoints). * Use ``event.addresponse``'s string formatting abilities where possible. This will aid in future translation. * Any changes to database schema should have upgrade-rules included for painless upgrade by users. .. _bzr-guide: Bazaar for Ibid Developers -------------------------- You'll want a non-ancient version (>=1.6) of Bazaar (check your distribution's backport repository), and a Launchpad account. If you've never used Bazaar before, read `Bazaar in five minutes <http://doc.bazaar-vcs.org/latest/en/mini-tutorial/index.html>`_. Configure Bazaar to know who you are:: ~ $ bzr whoami "Arthur Pewtey <apewtey@example.com>" ~ $ bzr launchpad-login apewtey Make a Bazaar shared repository to contain all your Ibid branches:: ~ $ mkdir ~/code/ibid ~ $ cd ~/code/ibid ~/code/ibid $ bzr init-repo --1.6 . Check out Ibid trunk:: ~/code/ibid $ bzr checkout lp:ibid trunk When you wish to create a new branch:: ~/code/ibid $ bzr update trunk ~/code/ibid $ bzr branch trunk feature-1234 If you want to easily push this to Launchpad, create a ``~/.bazaar/locations.conf`` with the following contents: .. code-block:: ini [/home/apewtey/code/ibid] pull_location = lp:~apewtey/ibid/ pull_location:policy = appendpath push_location = lp:~apewtey/ibid/ push_location:policy = appendpath public_branch = lp:~apewtey/ibid/ public_branch:policy = appendpath That will allow you to push your branch to ``lp:~apewtey/ibid/feature-1234`` by typing:: ~/code/ibid/feature-1234 $ bzr push To delete a branch, you can simply ``rm -r`` it. See also: * `Launchpad code hosting documentation <https://help.launchpad.net/Code>`_ * `Using Bazaar with Launchpad <http://doc.bazaar-vcs.org/latest/en/tutorials/using_bazaar_with_launchpad.html>`_ * `Bazaar User Guide <http://doc.bazaar-vcs.org/latest/en/user-guide/>`_ * `Bazaar Reference <http://doc.bazaar-vcs.org/latest/en/user-reference/index.html>`_ .. _dev-instance: Running a Development Ibid -------------------------- A full-blown Ibid install is overkill for development and debugging cycles. Ibid source contains a developer-oriented ``ibid.ini`` in the root directory. This uses SQLite and connects to a South African IRC server. If you wish to change it, either remember not to commit this file to your branch, or override settings in ``local.ini``, which is ignored by Bazaar. Ibid can be simply run out of a checkout directory:: ~/code/ibid/feature-1234 $ scripts/ibid-setup If you won't need an administrative account, you can hit ``^D`` and avoid setting one up. Test a specific plugin:: ~/code/ibid/feature-1234 $ scripts/ibid-plugin pluginname Test with all plugins loaded:: ~/code/ibid/feature-1234 $ scripts/ibid-plugin -c .. note:: Not all plugin features will work in the ``ibid-plugin`` environment. In particular, anything relying on source-interaction or timed callbacks (such as many of the games). Also, all permissions are granted. If ``ibid-plugin`` isn't sufficient for your debugging needs, you can launch a normal Ibid by running:: ~/code/ibid/feature-1234 $ twistd -n ibid .. vi: set et sta sw=3 ts=3:
PypiClean
/Hand_Tracking_Module-0.1.tar.gz/Hand_Tracking_Module-0.1/Hand_Tracking_Module/handTrackingModule.py
import cv2 import mediapipe as np import time import math '''-------------------------------Creation of Hand_Detection Class---------------------------------''' ''' Methods inside Hand Detection Class 1.findHands() :- Detect No of Hands Inside The Frame 2.FindPosition() :- Find location Of Hands Points 3. FingerUp() :- Count Number Of Finger Up 4. Distance() :- Find Distance Between Two Points Of Finger's ''' class handDetector(): def __init__(self,mode=False,maxHands=2,detectionCon=0.5,trackCon=0.5): self.mode=mode self.maxHands=maxHands self.detectionCon=detectionCon self.trackCon=trackCon self.npHands=np.solutions.hands self.hands=self.npHands.Hands(self.mode,self.maxHands,self.detectionCon,self.trackCon) self.npDraw=np.solutions.drawing_utils self.tipIds=[4,8,12,16,20] def findHands(self,img,draw=True): imgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) self.results=self.hands.process(imgRGB) if self.results.multi_hand_landmarks: for handLMS in self.results.multi_hand_landmarks: if draw: self.npDraw.draw_landmarks(img,handLMS,self.npHands.HAND_CONNECTIONS) return img def findPosition(self,img,handNo=0,draw=True): xList=[] yList=[] bbox=[] self.lmlist=[] if self.results.multi_hand_landmarks: myhand=self.results.multi_hand_landmarks[handNo] for id,lm in enumerate(myhand.landmark): h,w,c=img.shape cx, cy=int(lm.x*w),int(lm.y*h) #print(id,cx,cy) xList.append(cx) yList.append(cy) self.lmlist.append([id,cx,cy]) if draw: cv2.circle(img,(cx,cy),7,(255,0,255),cv2.FILLED) Xmin,Xmax=min(xList),max(xList) Ymin,Ymax=min(yList),max(yList) bbox=Xmin,Ymin,Xmax,Ymax if draw: cv2.rectangle(img,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,255,0),2) return self.lmlist, bbox def fingerUp(self): finger=[] #Thumb if self.lmlist[self.tipIds[0]] [1] <self.lmlist[self.tipIds[0]-1][1]: finger.append(0) else: finger.append(1) # 4 finger for id in range(1,5): if self.lmlist[self.tipIds[id]][2] < self.lmlist[self.tipIds[id]-2][2]: finger.append(1) else: finger.append(0) return finger def Distance(self,img,Top_1,Top_2,draw=True): x1,y1=self.lmlist[Top_1][1:] x2,y2=self.lmlist[Top_2][1:] cx,cy=(x1+x2)//2 , (y1+y2)//2 length=math.hypot(x1-x2,y1-y2) if draw: cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2) cv2.circle(img,(cx,cy),7,(0,0,255),cv2.FILLED) return length '''--------------------------Main Function-----------------------------------''' def main(): pTime=0 cTime=0 cap=cv2.VideoCapture(0) detector=handDetector() while True: success,img=cap.read() img=detector.findHands(img) lmlist=detector.findPosition(img) if len(lmList)!=0: print(lmList[4]) cTime=time.time() fps=1/(cTime-pTime) pTime=cTime cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,170),2) cv2.imshow("output",img) if (cv2.waitKey(1)==27): break if __name__=="__main__": main()
PypiClean
/FMPy-0.3.11-py3-none-any.whl/fmpy/model_description.py
from typing import List, Union, IO from attr import attrs, attrib, Factory @attrs(auto_attribs=True) class DefaultExperiment(object): startTime: str = None stopTime: str = None tolerance: str = None stepSize: str = None @attrs(eq=False) class InterfaceType(object): modelIdentifier = attrib(type=str, default=None) needsExecutionTool = attrib(type=bool, default=False, repr=False) canBeInstantiatedOnlyOncePerProcess = attrib(type=bool, default=False, repr=False) canGetAndSetFMUstate = attrib(type=bool, default=False, repr=False) canSerializeFMUstate = attrib(type=bool, default=False, repr=False) providesDirectionalDerivative = attrib(type=bool, default=False, repr=False) providesAdjointDerivatives = attrib(type=bool, default=False, repr=False) providesPerElementDependencies = attrib(type=bool, default=False, repr=False) # FMI 2.0 canNotUseMemoryManagementFunctions = attrib(type=bool, default=False, repr=False) providesDirectionalDerivative = attrib(type=bool, default=False, repr=False) @attrs(eq=False) class ModelExchange(InterfaceType): needsCompletedIntegratorStep = attrib(type=bool, default=False, repr=False) providesEvaluateDiscreteStates = attrib(type=bool, default=False, repr=False) @attrs(eq=False) class CoSimulation(InterfaceType): canHandleVariableCommunicationStepSize = attrib(type=bool, default=False, repr=False) fixedInternalStepSize = attrib(type=float, default=None, repr=False) maxOutputDerivativeOrder = attrib(type=int, default=0, repr=False) recommendedIntermediateInputSmoothness = attrib(type=int, default=0, repr=False) canInterpolateInputs = attrib(type=bool, default=False, repr=False) providesIntermediateUpdate = attrib(type=bool, default=False, repr=False) canReturnEarlyAfterIntermediateUpdate = attrib(type=bool, default=False, repr=False) hasEventMode = attrib(type=bool, default=False, repr=False) providesEvaluateDiscreteStates = attrib(type=bool, default=False, repr=False) canRunAsynchronuously = attrib(type=bool, default=False, repr=False) @attrs(eq=False) class ScheduledExecution(InterfaceType): pass @attrs(auto_attribs=True, eq=False) class PreProcessorDefinition(object): name: str = None value: str = None optional: bool = False description: str = None @attrs(auto_attribs=True, eq=False) class SourceFileSet(object): name: str = None language: str = None compiler: str = None compilerOptions: str = None preprocessorDefinitions: List[str] = Factory(list) sourceFiles: List[str] = Factory(list) includeDirectories: List[str] = Factory(list) @attrs(auto_attribs=True, eq=False) class BuildConfiguration(object): modelIdentifier: str = None sourceFileSets: List[SourceFileSet] = Factory(list) @attrs(eq=False) class Dimension(object): start = attrib(type=str) valueReference = attrib(type=int) @attrs(eq=False) class Item(object): """ Enumeration Item """ name = attrib(type=str, default=None) value = attrib(type=str, default=None) description = attrib(type=str, default=None, repr=False) @attrs(eq=False) class SimpleType(object): """ Type Definition """ name = attrib(type=str, default=None) type = attrib(type=str, default=None) quantity = attrib(type=str, default=None, repr=False) unit = attrib(type=str, default=None) displayUnit = attrib(type=str, default=None, repr=False) relativeQuantity = attrib(type=str, default=None, repr=False) min = attrib(type=str, default=None, repr=False) max = attrib(type=str, default=None, repr=False) nominal = attrib(type=str, default=None, repr=False) unbounded = attrib(type=str, default=None, repr=False) items = attrib(type=List[Item], default=Factory(list), repr=False) @attrs(eq=False) class DisplayUnit(object): name = attrib(type=str, default=None) factor = attrib(type=float, default=1.0, repr=False) offset = attrib(type=float, default=0.0, repr=False) @attrs(eq=False) class Unit(object): name = attrib(type=str, default=None) baseUnit = attrib(type=str, default=None, repr=False) displayUnits = attrib(type=List[DisplayUnit], default=Factory(list), repr=False) @attrs(eq=False) class BaseUnit(object): kg = attrib(type=int, default=0) m = attrib(type=int, default=0) s = attrib(type=int, default=0) A = attrib(type=int, default=0) K = attrib(type=int, default=0) mol = attrib(type=int, default=0) cd = attrib(type=int, default=0) rad = attrib(type=int, default=0) factor = attrib(type=float, default=1.0) offset = attrib(type=float, default=0.0) @attrs(eq=False) class ScalarVariable(object): name = attrib(type=str) valueReference = attrib(type=int, repr=False) type = attrib(type=str, default=None) "One of 'Real', 'Integer', 'Enumeration', 'Boolean', 'String'" description = attrib(type=str, default=None, repr=False) causality = attrib(type=str, default=None, repr=False) "One of 'parameter', 'calculatedParameter', 'input', 'output', 'local', 'independent'" variability = attrib(type=str, default=None, repr=False) "One of 'constant', 'fixed', 'tunable', 'discrete' or 'continuous'" initial = attrib(type=str, default=None, repr=False) "One of 'exact', 'approx', 'calculated' or None" canHandleMultipleSetPerTimeInstant = attrib(type=bool, default=True, repr=False) intermediateUpdate = attrib(type=bool, default=False, repr=False) previous = attrib(type=int, default=None, repr=False) # TODO: resolve variables clocks = attrib(type=List[int], default=Factory(list)) declaredType = attrib(type=SimpleType, default=None, repr=False) dimensions = attrib(type=List[Dimension], default=Factory(list)) "List of fixed dimensions" dimensionValueReferences = attrib(type=List[int], default=Factory(list)) "List of value references to the variables that hold the dimensions" quantity = attrib(type=str, default=None, repr=False) "Physical quantity" unit = attrib(type=str, default=None, repr=False) "Unit" displayUnit = attrib(type=str, default=None, repr=False) "Default display unit" relativeQuantity = attrib(type=bool, default=False, repr=False) "Relative quantity" min = attrib(type=str, default=None, repr=False) "Minimum value" max = attrib(type=str, default=None, repr=False) "Maximum value" nominal = attrib(type=str, default=None, repr=False) "Nominal value" unbounded = attrib(type=bool, default=False, repr=False) "Value is unbounded" start = attrib(type=str, default=None, repr=False) "Initial or guess value" derivative = attrib(type='ScalarVariable', default=None, repr=False) "The derivative of this variable" reinit = attrib(type=bool, default=False, repr=False) "Can be reinitialized at an event by the FMU" sourceline = attrib(type=int, default=None, repr=False) "Line number in the modelDescription.xml or None if unknown" # Clock attributes canBeDeactivated = attrib(type=bool, default=False, repr=False) priority = attrib(type=int, default=None, repr=False) intervalVariability = attrib(type=str, default=None, repr=False) "One of 'constant', 'fixed', 'tunable', 'changing', 'countdown', 'triggered' or None" intervalDecimal = attrib(type=float, default=None, repr=False) shiftDecimal = attrib(type=float, default=None, repr=False) supportsFraction = attrib(type=bool, default=False, repr=False) resolution = attrib(type=int, default=None, repr=False) intervalCounter = attrib(type=int, default=None, repr=False) shiftCounter = attrib(type=int, default=0, repr=False) @attrs(eq=False) class Unknown(object): index = attrib(type=int, default=0, repr=False) variable = attrib(type=ScalarVariable, default=None) dependencies = attrib(type=List[ScalarVariable], default=Factory(list), repr=False) dependenciesKind = attrib(type=List[str], default=Factory(list), repr=False) sourceline = attrib(type=int, default=0, repr=False) "Line number in the modelDescription.xml" @attrs(eq=False) class ModelDescription(object): guid = attrib(type=str, default=None, repr=False) fmiVersion = attrib(type=str, default=None) modelName = attrib(type=str, default=None) description = attrib(type=str, default=None, repr=False) generationTool = attrib(type=str, default=None, repr=False) generationDateAndTime = attrib(type=str, default=None, repr=False) variableNamingConvention = attrib(type=str, default='flat', repr=False) numberOfContinuousStates = attrib(type=int, default=0, repr=False) numberOfEventIndicators = attrib(type=int, default=0, repr=False) defaultExperiment = attrib(type=DefaultExperiment, default=None, repr=False) coSimulation = attrib(type=CoSimulation, default=None) modelExchange = attrib(type=ModelExchange, default=None) scheduledExecution = attrib(type=ScheduledExecution, default=None) buildConfigurations = attrib(type=List[BuildConfiguration], default=Factory(list), repr=False) unitDefinitions = attrib(type=List[Unit], default=Factory(list), repr=False) typeDefinitions = attrib(type=List[SimpleType], default=Factory(list), repr=False) modelVariables = attrib(type=List[ScalarVariable], default=Factory(list), repr=False) # model structure outputs = attrib(type=List[Unknown], default=Factory(list), repr=False) derivatives = attrib(type=List[Unknown], default=Factory(list), repr=False) clockedStates = attrib(type=List[Unknown], default=Factory(list), repr=False) eventIndicators = attrib(type=List[Unknown], default=Factory(list), repr=False) initialUnknowns = attrib(type=List[Unknown], default=Factory(list), repr=False) class ValidationError(Exception): """ Exception raised for failed validation of the modelDescription.xml Attributes: problems list of problems found """ def __init__(self, problems): message = "Failed to validate modelDescription.xml:\n\n- " + '\n- '.join(problems) self.problems = problems super(ValidationError, self).__init__(message) def _copy_attributes(element, object, attributes=None): """ Copy attributes from an XML element to a Python object """ if attributes is None: attributes = object.__dict__.keys() for attribute in attributes: if attribute not in element.attrib: continue # skip value = element.get(attribute) t = type(getattr(object, attribute)) # convert the value to the correct type if t is bool: value = value == 'true' elif t is int: value = int(value) elif t is float: value = float(value) setattr(object, attribute, value) def read_build_description(filename, validate=True): import zipfile from lxml import etree import os if isinstance(filename, str) and os.path.isdir(filename): # extracted FMU filename = os.path.join(filename, 'sources/buildDescription.xml') if not os.path.isfile(filename): return [] tree = etree.parse(filename) elif isinstance(filename, str) and os.path.isfile(filename) and filename.lower().endswith('.xml'): # XML file if not os.path.isfile(filename): return [] tree = etree.parse(filename) else: # FMU as path or file like object with zipfile.ZipFile(filename, 'r') as zf: if 'sources/buildDescription.xml' not in zf.namelist(): return [] xml = zf.open('sources/buildDescription.xml') tree = etree.parse(xml) root = tree.getroot() fmi_version = root.get('fmiVersion') if fmi_version is None or not fmi_version.startswith('3.0'): raise Exception("Unsupported fmiBuildDescription version: %s" % fmi_version) if validate: module_dir, _ = os.path.split(__file__) schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi3', 'fmi3BuildDescription.xsd')) if not schema.validate(root): message = "Failed to validate buildDescription.xml:" for entry in schema.error_log: message += "\n%s (line %d, column %d): %s" % (entry.level_name, entry.line, entry.column, entry.message) raise Exception(message) build_configurations = [] for bc in root.findall('BuildConfiguration'): buildConfiguration = BuildConfiguration() buildConfiguration.modelIdentifier = bc.get('modelIdentifier') build_configurations.append(buildConfiguration) for sf in bc.findall('SourceFileSet'): sourceFileSet = SourceFileSet() sourceFileSet.language = sf.get('language') for pd in sf.findall('PreprocessorDefinition'): definition = PreProcessorDefinition() definition.name = pd.get('name') definition.value = pd.get('value') definition.optional = pd.get('optional') == 'true' definition.description = pd.get('description') sourceFileSet.preprocessorDefinitions.append(definition) for f in sf.findall('SourceFile'): sourceFileSet.sourceFiles.append(f.get('name')) for d in sf.findall('IncludeDirectory'): sourceFileSet.includeDirectories.append(d.get('name')) buildConfiguration.sourceFileSets.append(sourceFileSet) return build_configurations def read_model_description(filename: Union[str, IO], validate: bool = True, validate_variable_names: bool = False, validate_model_structure: bool = False) -> ModelDescription: """ Read the model description from an FMU without extracting it Parameters: filename filename of the FMU or XML file, directory with extracted FMU or file like object validate whether the model description should be validated validate_variable_names validate the variable names against the EBNF validate_model_structure validate the model structure returns: model_description a ModelDescription object """ import zipfile from lxml import etree import os from . import validation import numpy as np # remember the original filename _filename = filename if isinstance(filename, str) and os.path.isdir(filename): # extracted FMU filename = os.path.join(filename, 'modelDescription.xml') tree = etree.parse(filename) elif isinstance(filename, str) and os.path.isfile(filename) and filename.lower().endswith('.xml'): # XML file tree = etree.parse(filename) else: # FMU as path or file like object with zipfile.ZipFile(filename, 'r') as zf: xml = zf.open('modelDescription.xml') tree = etree.parse(xml) root = tree.getroot() fmiVersion = root.get('fmiVersion') is_fmi1 = fmiVersion == '1.0' is_fmi2 = fmiVersion == '2.0' is_fmi3 = fmiVersion.startswith('3.') if not is_fmi1 and not is_fmi2 and not is_fmi3: raise Exception("Unsupported FMI version: %s" % fmiVersion) if validate: module_dir, _ = os.path.split(__file__) if is_fmi1: schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi1', 'fmiModelDescription.xsd')) elif is_fmi2: schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi2', 'fmi2ModelDescription.xsd')) else: schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi3', 'fmi3ModelDescription.xsd')) if not schema.validate(root): problems = ["%s (line %d, column %d): %s" % (e.level_name, e.line, e.column, e.message) for e in schema.error_log] raise ValidationError(problems) modelDescription = ModelDescription() _copy_attributes(root, modelDescription, ['fmiVersion', 'guid', 'modelName', 'description', 'generationTool', 'generationDateAndTime', 'variableNamingConvention']) if is_fmi3: modelDescription.guid = root.get('instantiationToken') if root.get('numberOfEventIndicators') is not None: modelDescription.numberOfEventIndicators = int(root.get('numberOfEventIndicators')) if is_fmi1: modelDescription.numberOfContinuousStates = int(root.get('numberOfContinuousStates')) elif is_fmi2: modelDescription.numberOfContinuousStates = len(root.findall('ModelStructure/Derivatives/Unknown')) # default experiment for d in root.findall('DefaultExperiment'): modelDescription.defaultExperiment = DefaultExperiment() for attribute in ['startTime', 'stopTime', 'tolerance', 'stepSize']: if attribute in d.attrib: setattr(modelDescription.defaultExperiment, attribute, float(d.get(attribute))) # model description if is_fmi1: modelIdentifier = root.get('modelIdentifier') if root.find('Implementation') is not None: modelDescription.coSimulation = CoSimulation() modelDescription.coSimulation.modelIdentifier = modelIdentifier else: modelDescription.modelExchange = ModelExchange() modelDescription.modelExchange.modelIdentifier = modelIdentifier elif is_fmi2: for me in root.findall('ModelExchange'): modelDescription.modelExchange = ModelExchange() _copy_attributes(me, modelDescription.modelExchange, ['modelIdentifier', 'needsExecutionTool', 'canBeInstantiatedOnlyOncePerProcess', 'canNotUseMemoryManagementFunctions', 'canGetAndSetFMUstate', 'canSerializeFMUstate', 'providesDirectionalDerivative']) for cs in root.findall('CoSimulation'): modelDescription.coSimulation = CoSimulation() _copy_attributes(cs, modelDescription.coSimulation, ['modelIdentifier', 'needsExecutionTool', 'canHandleVariableCommunicationStepSize', 'canInterpolateInputs', 'maxOutputDerivativeOrder', 'canRunAsynchronuously', 'canBeInstantiatedOnlyOncePerProcess', 'canNotUseMemoryManagementFunctions', 'canGetAndSetFMUstate', 'canSerializeFMUstate', 'providesDirectionalDerivative']) else: for me in root.findall('ModelExchange'): modelDescription.modelExchange = ModelExchange() _copy_attributes(me, modelDescription.modelExchange) for cs in root.findall('CoSimulation'): modelDescription.coSimulation = CoSimulation() _copy_attributes(cs, modelDescription.coSimulation) for se in root.findall('ScheduledExecution'): modelDescription.scheduledExecution = ScheduledExecution() _copy_attributes(se, modelDescription.scheduledExecution) # build configurations if is_fmi2: for interface_type in root.findall('ModelExchange') + root.findall('CoSimulation'): modelIdentifier = interface_type.get('modelIdentifier') if len(modelDescription.buildConfigurations) > 0 and modelDescription.buildConfigurations[0].modelIdentifier == modelIdentifier: continue # use existing build configuration for both FMI types source_files = [file.get('name') for file in interface_type.findall('SourceFiles/File')] if len(source_files) > 0: buildConfiguration = BuildConfiguration() modelDescription.buildConfigurations.append(buildConfiguration) buildConfiguration.modelIdentifier = modelIdentifier source_file_set = SourceFileSet() buildConfiguration.sourceFileSets.append(source_file_set) source_file_set.sourceFiles = source_files elif is_fmi3 and not (isinstance(filename, str) and _filename.endswith('.xml')): # read buildDescription.xml if _filename is a folder or ZIP file modelDescription.buildConfigurations = read_build_description(_filename, validate=validate) # unit definitions if is_fmi1: for u in root.findall('UnitDefinitions/BaseUnit'): unit = Unit(name=u.get('unit')) for d in u.findall('DisplayUnitDefinition'): displayUnit = DisplayUnit(name=d.get('displayUnit')) displayUnit.factor = float(d.get('gain', '1')) displayUnit.offset = float(d.get('offset', '0')) unit.displayUnits.append(displayUnit) modelDescription.unitDefinitions.append(unit) else: for u in root.findall('UnitDefinitions/Unit'): unit = Unit(name=u.get('name')) # base unit for b in u.findall('BaseUnit'): unit.baseUnit = BaseUnit() _copy_attributes(b, unit.baseUnit, ['kg', 'm', 's', 'A', 'K', 'mol', 'cd', 'rad', 'factor', 'offset']) # display units for d in u.findall('DisplayUnit'): displayUnit = DisplayUnit(name=d.get('name')) _copy_attributes(d, displayUnit, ['factor', 'offset']) unit.displayUnits.append(displayUnit) modelDescription.unitDefinitions.append(unit) # type definitions type_definitions = {None: None} if is_fmi1 or is_fmi2: # FMI 1 and 2 for t in root.findall('TypeDefinitions/' + ('Type' if is_fmi1 else 'SimpleType')): first = t[0] # first element simple_type = SimpleType( name=t.get('name'), type=first.tag[:-len('Type')] if is_fmi1 else first.tag, **first.attrib ) # add enumeration items for i, item in enumerate(first.findall('Item')): it = Item(**item.attrib) if is_fmi1: it.value = i + 1 simple_type.items.append(it) modelDescription.typeDefinitions.append(simple_type) type_definitions[simple_type.name] = simple_type else: # FMI 3 for t in root.findall('TypeDefinitions/*'): if t.tag not in {'Float32Type', 'Float64Type', 'Int8Type', 'UInt8Type', 'Int16Type', 'UInt16Type', 'Int32Type', 'UInt32Type', 'Int64Type', 'UInt64Type', 'BooleanType', 'StringType', 'BinaryType', 'EnumerationType'}: continue simple_type = SimpleType(type=t.tag[:-4], **t.attrib) # add enumeration items for item in t.findall('Item'): it = Item(**item.attrib) simple_type.items.append(it) modelDescription.typeDefinitions.append(simple_type) type_definitions[simple_type.name] = simple_type # default values for 'initial' derived from variability and causality initial_defaults = { 'constant': {'output': 'exact', 'local': 'exact', 'parameter': 'exact'}, 'fixed': {'parameter': 'exact', 'calculatedParameter': 'calculated', 'structuralParameter': 'exact', 'local': 'calculated'}, 'tunable': {'parameter': 'exact', 'calculatedParameter': 'calculated', 'structuralParameter': 'exact', 'local': 'calculated'}, 'discrete': {'input': None, 'output': 'calculated', 'local': 'calculated'}, 'continuous': {'input': None, 'output': 'calculated', 'local': 'calculated', 'independent': None}, 'clock': {'input': 'exact', 'output': 'calculated', 'local': 'calculated'}, } # model variables for variable in root.find('ModelVariables'): if variable.get("name") is None: continue sv = ScalarVariable(name=variable.get('name'), valueReference=int(variable.get('valueReference'))) sv.description = variable.get('description') sv.causality = variable.get('causality', default='local') sv.variability = variable.get('variability') sv.initial = variable.get('initial') sv.sourceline = variable.sourceline if fmiVersion in ['1.0', '2.0']: # get the nested "value" element for child in variable.iterchildren(): if child.tag in {'Real', 'Integer', 'Boolean', 'String', 'Enumeration'}: value = child break else: value = variable sv.intervalVariability = variable.get('intervalVariability') sv.clocks = variable.get('clocks') sv.type = value.tag if variable.tag in {'Binary', 'String'}: # handle <Start> element of Binary and String variables in FMI 3 start = variable.find('Start') if start is not None: sv.start = start.get('value') else: sv.start = value.get('start') type_map = { 'Real': float, 'Integer': int, 'Enumeration': int, 'Boolean': bool, 'String': str, 'Float32': float, 'Float64': float, 'Int8': int, 'UInt8': int, 'Int16': int, 'UInt16': int, 'Int32': int, 'UInt32': int, 'Int64': int, 'UInt64': int, 'Binary': bytes, 'Clock': float, } sv._python_type = type_map[sv.type] if sv.type in ['Real', 'Float32', 'Float64']: sv.unit = value.get('unit') sv.displayUnit = value.get('displayUnit') sv.relativeQuantity = value.get('relativeQuantity') == 'true' sv.derivative = value.get('derivative') sv.nominal = value.get('nominal') sv.unbounded = value.get('unbounded') == 'true' if sv.type in ['Real', 'Enumeration'] or sv.type.startswith(('Float', 'Int')): sv.quantity = value.get('quantity') sv.min = value.get('min') sv.max = value.get('max') # resolve the declared type declared_type = value.get('declaredType') if declared_type in type_definitions: sv.declaredType = type_definitions[value.get('declaredType')] else: raise Exception('Variable "%s" (line %s) has declaredType="%s" which has not been defined.' % (sv.name, sv.sourceline, declared_type)) if is_fmi1: if sv.causality == 'internal': sv.causality = 'local' if sv.variability == 'parameter': sv.causality = 'parameter' sv.variability = None else: if sv.variability is None: sv.variability = 'continuous' if sv.type in {'Float32', 'Float64', 'Real'} else 'discrete' if sv.initial is None: try: sv.initial = initial_defaults[sv.variability][sv.causality] except KeyError: raise Exception('Variable "%s" (line %s) has an illegal combination of causality="%s"' ' and variability="%s".' % (sv.name, sv.sourceline, sv.causality, sv.variability)) dimensions = variable.findall('Dimension') if dimensions: for dimension in dimensions: start = dimension.get('start') vr = dimension.get('valueReference') d = Dimension( start=int(start) if start is not None else None, valueReference=int(vr) if vr is not None else None ) sv.dimensions.append(d) modelDescription.modelVariables.append(sv) variables = dict((v.valueReference, v) for v in modelDescription.modelVariables) # calculate initial shape for variable in modelDescription.modelVariables: shape = [] for d in variable.dimensions: if d.start is not None: shape.append(int(d.start)) else: v = variables[d.valueReference] shape.append(int(v.start)) variable.shape = tuple(shape) if is_fmi2: # model structure for attr, element in [(modelDescription.outputs, 'Outputs'), (modelDescription.derivatives, 'Derivatives'), (modelDescription.initialUnknowns, 'InitialUnknowns')]: for u in root.findall('ModelStructure/' + element + '/Unknown'): unknown = Unknown() unknown.sourceline = u.sourceline unknown.variable = modelDescription.modelVariables[int(u.get('index')) - 1] dependencies = u.get('dependencies') if dependencies: for vr in dependencies.strip().split(' '): unknown.dependencies.append(modelDescription.modelVariables[int(vr) - 1]) dependenciesKind = u.get('dependenciesKind') if dependenciesKind: unknown.dependenciesKind = dependenciesKind.strip().split(' ') attr.append(unknown) # resolve derivatives for variable in modelDescription.modelVariables: if variable.derivative is not None: index = int(variable.derivative) - 1 variable.derivative = modelDescription.modelVariables[index] if is_fmi3: for attr, element in [(modelDescription.outputs, 'Output'), (modelDescription.derivatives, 'ContinuousStateDerivative'), (modelDescription.clockedStates, 'ClockedState'), (modelDescription.initialUnknowns, 'InitialUnknown'), (modelDescription.eventIndicators, 'EventIndicator')]: for u in root.findall('ModelStructure/' + element): unknown = Unknown() unknown.sourceline = u.sourceline unknown.variable = variables[int(u.get('valueReference'))] dependencies = u.get('dependencies') if dependencies: for vr in dependencies.strip().split(' '): unknown.dependencies.append(variables[int(vr)]) dependenciesKind = u.get('dependenciesKind') if dependenciesKind: unknown.dependenciesKind = dependenciesKind.strip().split(' ') attr.append(unknown) for variable in modelDescription.modelVariables: # resolve derivative if variable.derivative is not None: variable.derivative = variables[int(variable.derivative)] # resolve clocks if variable.clocks is not None: variable.clocks = [variables[int(vr)] for vr in variable.clocks.strip().split(' ')] # calculate numberOfContinuousStates for unknown in modelDescription.derivatives: modelDescription.numberOfContinuousStates += int(np.prod(unknown.variable.shape)) # calculate numberOfEventIndicators for unknown in modelDescription.eventIndicators: modelDescription.numberOfEventIndicators += int(np.prod(unknown.variable.shape)) if validate: problems = validation.validate_model_description(modelDescription, validate_variable_names=validate_variable_names, validate_model_structure=validate_model_structure) if problems: raise ValidationError(problems) return modelDescription
PypiClean
/Mask_anonymization_framework-1.3.2-py3-none-any.whl/mask_framework_lib/ner_plugins/NER_BiLSTM_Glove_i2b2.py
from keras import Sequential from keras.models import model_from_json from keras.layers import Embedding, Bidirectional, LSTM, Dense, TimeDistributed from sklearn.preprocessing import LabelBinarizer from tqdm import tqdm from keras_preprocessing import sequence from ..ner_plugins.utils.spec_tokenizers import tokenize_fa import numpy as np from keras_preprocessing.text import Tokenizer import pickle import os import urllib.request from zipfile import ZipFile class NER_BiLSTM_Glove_i2b2(object): """Class that implements and performs named entity recognition using BiLSTM neural network architecture. The architecture uses GloVe embeddings trained on common crawl dataset. Then the algorithm is trained on i2b2 2014 dataset. """ def __init__(self): """Implementation of initialization""" # load json and create model self.model = None if os.path.exists('src/mask_framework_lib/Models/BiLSTM_Glove_de_identification_model.json'): json_file = open('src/mask_framework_lib/Models/BiLSTM_Glove_de_identification_model.json', 'r') loaded_model_json = json_file.read() json_file.close() self.model = model_from_json(loaded_model_json) self.GLOVE_DIR = "Resources/" if os.path.isdir(self.GLOVE_DIR) == False or os.path.isfile(self.GLOVE_DIR+"glove.840B.300d.txt")==False: if os.path.exists(self.GLOVE_DIR)==False: os.mkdir(self.GLOVE_DIR) print('Beginning file download with urllib2...') url = 'http://nlp.stanford.edu/data/glove.840B.300d.zip' urllib.request.urlretrieve(url, self.GLOVE_DIR+'glove.840B.300d.zip') with ZipFile(self.GLOVE_DIR+'glove.840B.300d.zip', 'r') as zipObj: # Extract all the contents of zip file in current directory zipObj.extractall(self.GLOVE_DIR) os.remove(self.GLOVE_DIR+"glove.840B.300d.zip") # load weights into new model self.model.load_weights("src/mask_framework_lib/Models/BiLSTM_Glove_de_identification_model.h5") print("Loaded model from disk") self.model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) self.word_index = pickle.load(open("src/mask_framework_lib/Models/word_index.pkl","rb")) self.MAX_SEQUENCE_LENGTH = 200 self.EMBEDDING_DIM = 300 self.MAX_NB_WORDS = 2200000 def build_tensor2(self,sequences,numrecs,word2index,maxlen,makecategorical=False,num_classes=0,is_label=False): """ Function to create tensors out of sequences :param sequences: Sequences of words :param numrecs: size of the tensor :param word2index: mapping between words and its numerical representation (index). Loaded from file :param maxlen: Maximal lenght of the sequence :param makecategorical: Not used :param num_classes: Not used :param is_label: Not used, leave default for action performing :return: """ data = np.empty((numrecs,), dtype=list) label_index = {'O': 0} label_set = ["DATE", "LOCATION", "NAME", "ID", "AGE", "CONTACT", "PROFESSION", "PHI"] for lbl in label_set: label_index[lbl] = len(label_index) lb = LabelBinarizer() lb.fit(list(label_index.values())) i = 0 plabels = [] for sent in tqdm(sequences, desc='Building tensor'): wids = [] pl = [] for word, label in sent: if is_label == False: if word in word2index: wids.append(word2index[word]) else: wids.append(word2index['the']) else: pl.append(label_index[label]) plabels.append(pl) if not is_label: data[i] = wids i += 1 if is_label: plabels = sequence.pad_sequences(plabels, maxlen=maxlen) print(plabels.shape) pdata = np.array([lb.transform(l) for l in plabels]) else: pdata = sequence.pad_sequences(data, maxlen=maxlen) return pdata def build_tensor(self,sequences,numrecs,word2index,maxlen,makecategorical=False,num_classes=0,is_label=False): """ Function to create tensors out of sequences :param sequences: Sequences of words :param numrecs: size of the tensor :param word2index: mapping between words and its numerical representation (index). Loaded from file :param maxlen: Maximal lenght of the sequence :param makecategorical: Not used :param num_classes: Not used :param is_label: Not used, leave default for action performing :return: """ data = np.empty((numrecs,),dtype=list) label_index = {'O': 0} label_set = ["DATE", "LOCATION", "NAME", "ID", "AGE", "CONTACT", "PROFESSION", "PHI"] for lbl in label_set: label_index[lbl] = len(label_index) lb = LabelBinarizer() lb.fit(list(label_index.values())) i = 0 plabels = [] for sent in tqdm(sequences, desc='Building tensor'): wids = [] pl = [] for word in sent: if is_label == False: if word[0] in word2index: wids.append(word2index[word[0]]) else: wids.append(word2index['the']) plabels.append(pl) if not is_label: data[i] = wids i +=1 if is_label: plabels = sequence.pad_sequences(plabels, maxlen=maxlen) print(plabels.shape) pdata = np.array([lb.transform(l) for l in plabels]) else: pdata = sequence.pad_sequences(data, maxlen=maxlen) return pdata def transform(self,sequence): X = self.build_tensor(sequence, len(sequence), self.word_index, 70) Y = self.build_tensor(sequence, len(sequence), self.word_index, 70, True, 9, True) def perform_NER(self,text): """ Function that perform BiLSTM-based NER :param text: Text that should be analyzed and tagged :return: returns sequence of sequences with labels """ sequences = tokenize_fa([text]) word_sequences = [] X_test = [] tokens = [] for seq in sequences: features_seq = [] sentence = [] for i in range(0, len(seq)): features_seq.append(seq[i][0]) tokens.append(seq[i][0]) sentence.append(seq[i][0]) X_test.append(sentence) word_sequences.append(sentence) tensor = self.build_tensor(sequences, len(sequences), self.word_index, 70) predictions = self.model.predict(tensor) Y_pred_F = [] for i in range(0,len(predictions)): seq= [] for j in range(0,len(predictions[i])): max_k = 0 max_k_val =0 max_str = "" for k in range(0,len(predictions[i][j])): if predictions[i][j][k]>max_k_val: max_k_val = predictions[i][j][k] max_k = k if max_k == 0: max_str = "O" elif max_k == 1: max_str = "DATE" elif max_k == 2: max_str = "LOCATION" elif max_k == 3: max_str = "NAME" elif max_k == 4: max_str = "ID" elif max_k == 5: max_str = "AGE" elif max_k == 6: max_str = "CONTACT" elif max_k == 7: max_str = "PROFESSION" elif max_k == 8: max_str = "PHI" seq.append(max_str) Y_pred_F.append(seq) final_sequences = [] for j in range(0,len(Y_pred_F)): sentence = [] for i in range(len(Y_pred_F[j])-len(sequences[j]),len(Y_pred_F[j])): sentence.append((sequences[j][i-(len(Y_pred_F[j])-len(sequences[j]))][0],Y_pred_F[j][i])) final_sequences.append(sentence) return final_sequences def createModel(self, text,GLOVE_DIR): self.embeddings_index = {} if os.path.isdir(GLOVE_DIR) == False or os.path.isfile(GLOVE_DIR+"glove.840B.300d.txt")==False: print('Beginning GloVe file download with urllib2...') url = 'http://nlp.stanford.edu/data/glove.840B.300d.zip' if os.path.exists(self.GLOVE_DIR)==False: os.mkdir(self.GLOVE_DIR) urllib.request.urlretrieve(url, self.GLOVE_DIR+'glove.840B.300d.zip') with ZipFile(self.GLOVE_DIR+'glove.840B.300d.zip', 'r') as zipObj: # Extract all the contents of zip file in current directory zipObj.extractall(GLOVE_DIR) os.remove(self.GLOVE_DIR+"glove.840B.300d.zip") f = open(os.path.join(GLOVE_DIR, 'glove.840B.300d.txt'),encoding='utf') for line in f: values = line.split() word = ''.join(values[:-300]) coefs = np.asarray(values[-300:], dtype='float32') self.embeddings_index[word] = coefs f.close() print('Found %s word vectors.' % len(self.embeddings_index)) tokenizer = Tokenizer(num_words=self.MAX_NB_WORDS, lower=False) tokenizer.fit_on_texts(text) self.word_index = tokenizer.word_index pickle.dump(self.word_index,open("word_index.pkl",'wb')) self.embedding_matrix = np.zeros((len(self.word_index) + 1, self.EMBEDDING_DIM)) print(self.embedding_matrix.shape) for word, i in self.word_index.items(): embedding_vector = self.embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. self.embedding_matrix[i] = embedding_vector self.embedding_layer = Embedding(len(self.word_index) + 1, self.EMBEDDING_DIM, weights=[self.embedding_matrix], input_length=70, trainable=True) self.model = Sequential() self.model.add(self.embedding_layer) self.model.add(Bidirectional(LSTM(150, dropout=0.3, recurrent_dropout=0.6, return_sequences=True)))#{'sum', 'mul', 'concat', 'ave', None} self.model.add(Bidirectional(LSTM(60, dropout=0.2, recurrent_dropout=0.5, return_sequences=True))) self.model.add(TimeDistributed(Dense(9, activation='softmax'))) # a dense layer as suggested by neuralNer self.model.compile(loss="categorical_crossentropy", optimizer='rmsprop' , metrics=['accuracy']) self.model.summary() pass def transform_sequences(self,token_sequences): text = [] for ts in token_sequences: for t in ts: text.append(t[0]) self.createModel(text, self.GLOVE_DIR) X = self.build_tensor2(token_sequences, len(token_sequences), self.word_index, 70) Y = self.build_tensor2(token_sequences, len(token_sequences), self.word_index, 70, True, 9, True) return X,Y def learn(self,X,Y,epochs=1): self.model.fit(X, Y, epochs=epochs, validation_split=0.1, batch_size=64) def evaluate(self,X,Y): Y_pred = self.model.predict(X) from sklearn import metrics labels = [1, 2, 3, 4, 5, 6, 7, 8, 9] Y_pred_F = [] for i in range(0, len(Y_pred)): for j in range(0, len(Y_pred[i])): max_k = 0 max_k_val = 0 for k in range(0, len(Y_pred[i][j])): if Y_pred[i][j][k] > max_k_val: max_k_val = Y_pred[i][j][k] max_k = k Y_pred_F.append(max_k) Y_test_F = [] for i in range(0, len(Y)): for j in range(0, len(Y[i])): max_k = 0 max_k_val = 0 for k in range(0, len(Y[i][j])): if Y[i][j][k] > max_k_val: max_k_val = Y[i][j][k] max_k = k Y_test_F.append(max_k) print(metrics.classification_report(Y_test_F, Y_pred_F, labels)) def save(self,model_path): # serialize model to JSON model_json = self.model.to_json() with open("src/mask_framework_lib/Models\\"+model_path + ".json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 self.model.save_weights("src/mask_framework_lib/Models\\"+model_path + ".h5") print("Saved model to disk")
PypiClean
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/lru-cache/index.js
'use strict' // A linked list to keep track of recently-used-ness const Yallist = require('yallist') const MAX = Symbol('max') const LENGTH = Symbol('length') const LENGTH_CALCULATOR = Symbol('lengthCalculator') const ALLOW_STALE = Symbol('allowStale') const MAX_AGE = Symbol('maxAge') const DISPOSE = Symbol('dispose') const NO_DISPOSE_ON_SET = Symbol('noDisposeOnSet') const LRU_LIST = Symbol('lruList') const CACHE = Symbol('cache') const UPDATE_AGE_ON_GET = Symbol('updateAgeOnGet') const naiveLength = () => 1 // lruList is a yallist where the head is the youngest // item, and the tail is the oldest. the list contains the Hit // objects as the entries. // Each Hit object has a reference to its Yallist.Node. This // never changes. // // cache is a Map (or PseudoMap) that matches the keys to // the Yallist.Node object. class LRUCache { constructor (options) { if (typeof options === 'number') options = { max: options } if (!options) options = {} if (options.max && (typeof options.max !== 'number' || options.max < 0)) throw new TypeError('max must be a non-negative number') // Kind of weird to have a default max of Infinity, but oh well. const max = this[MAX] = options.max || Infinity const lc = options.length || naiveLength this[LENGTH_CALCULATOR] = (typeof lc !== 'function') ? naiveLength : lc this[ALLOW_STALE] = options.stale || false if (options.maxAge && typeof options.maxAge !== 'number') throw new TypeError('maxAge must be a number') this[MAX_AGE] = options.maxAge || 0 this[DISPOSE] = options.dispose this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false this.reset() } // resize the cache when the max changes. set max (mL) { if (typeof mL !== 'number' || mL < 0) throw new TypeError('max must be a non-negative number') this[MAX] = mL || Infinity trim(this) } get max () { return this[MAX] } set allowStale (allowStale) { this[ALLOW_STALE] = !!allowStale } get allowStale () { return this[ALLOW_STALE] } set maxAge (mA) { if (typeof mA !== 'number') throw new TypeError('maxAge must be a non-negative number') this[MAX_AGE] = mA trim(this) } get maxAge () { return this[MAX_AGE] } // resize the cache when the lengthCalculator changes. set lengthCalculator (lC) { if (typeof lC !== 'function') lC = naiveLength if (lC !== this[LENGTH_CALCULATOR]) { this[LENGTH_CALCULATOR] = lC this[LENGTH] = 0 this[LRU_LIST].forEach(hit => { hit.length = this[LENGTH_CALCULATOR](hit.value, hit.key) this[LENGTH] += hit.length }) } trim(this) } get lengthCalculator () { return this[LENGTH_CALCULATOR] } get length () { return this[LENGTH] } get itemCount () { return this[LRU_LIST].length } rforEach (fn, thisp) { thisp = thisp || this for (let walker = this[LRU_LIST].tail; walker !== null;) { const prev = walker.prev forEachStep(this, fn, walker, thisp) walker = prev } } forEach (fn, thisp) { thisp = thisp || this for (let walker = this[LRU_LIST].head; walker !== null;) { const next = walker.next forEachStep(this, fn, walker, thisp) walker = next } } keys () { return this[LRU_LIST].toArray().map(k => k.key) } values () { return this[LRU_LIST].toArray().map(k => k.value) } reset () { if (this[DISPOSE] && this[LRU_LIST] && this[LRU_LIST].length) { this[LRU_LIST].forEach(hit => this[DISPOSE](hit.key, hit.value)) } this[CACHE] = new Map() // hash of items by key this[LRU_LIST] = new Yallist() // list of items in order of use recency this[LENGTH] = 0 // length of items in the list } dump () { return this[LRU_LIST].map(hit => isStale(this, hit) ? false : { k: hit.key, v: hit.value, e: hit.now + (hit.maxAge || 0) }).toArray().filter(h => h) } dumpLru () { return this[LRU_LIST] } set (key, value, maxAge) { maxAge = maxAge || this[MAX_AGE] if (maxAge && typeof maxAge !== 'number') throw new TypeError('maxAge must be a number') const now = maxAge ? Date.now() : 0 const len = this[LENGTH_CALCULATOR](value, key) if (this[CACHE].has(key)) { if (len > this[MAX]) { del(this, this[CACHE].get(key)) return false } const node = this[CACHE].get(key) const item = node.value // dispose of the old one before overwriting // split out into 2 ifs for better coverage tracking if (this[DISPOSE]) { if (!this[NO_DISPOSE_ON_SET]) this[DISPOSE](key, item.value) } item.now = now item.maxAge = maxAge item.value = value this[LENGTH] += len - item.length item.length = len this.get(key) trim(this) return true } const hit = new Entry(key, value, len, now, maxAge) // oversized objects fall out of cache automatically. if (hit.length > this[MAX]) { if (this[DISPOSE]) this[DISPOSE](key, value) return false } this[LENGTH] += hit.length this[LRU_LIST].unshift(hit) this[CACHE].set(key, this[LRU_LIST].head) trim(this) return true } has (key) { if (!this[CACHE].has(key)) return false const hit = this[CACHE].get(key).value return !isStale(this, hit) } get (key) { return get(this, key, true) } peek (key) { return get(this, key, false) } pop () { const node = this[LRU_LIST].tail if (!node) return null del(this, node) return node.value } del (key) { del(this, this[CACHE].get(key)) } load (arr) { // reset the cache this.reset() const now = Date.now() // A previous serialized cache has the most recent items first for (let l = arr.length - 1; l >= 0; l--) { const hit = arr[l] const expiresAt = hit.e || 0 if (expiresAt === 0) // the item was created without expiration in a non aged cache this.set(hit.k, hit.v) else { const maxAge = expiresAt - now // dont add already expired items if (maxAge > 0) { this.set(hit.k, hit.v, maxAge) } } } } prune () { this[CACHE].forEach((value, key) => get(this, key, false)) } } const get = (self, key, doUse) => { const node = self[CACHE].get(key) if (node) { const hit = node.value if (isStale(self, hit)) { del(self, node) if (!self[ALLOW_STALE]) return undefined } else { if (doUse) { if (self[UPDATE_AGE_ON_GET]) node.value.now = Date.now() self[LRU_LIST].unshiftNode(node) } } return hit.value } } const isStale = (self, hit) => { if (!hit || (!hit.maxAge && !self[MAX_AGE])) return false const diff = Date.now() - hit.now return hit.maxAge ? diff > hit.maxAge : self[MAX_AGE] && (diff > self[MAX_AGE]) } const trim = self => { if (self[LENGTH] > self[MAX]) { for (let walker = self[LRU_LIST].tail; self[LENGTH] > self[MAX] && walker !== null;) { // We know that we're about to delete this one, and also // what the next least recently used key will be, so just // go ahead and set it now. const prev = walker.prev del(self, walker) walker = prev } } } const del = (self, node) => { if (node) { const hit = node.value if (self[DISPOSE]) self[DISPOSE](hit.key, hit.value) self[LENGTH] -= hit.length self[CACHE].delete(hit.key) self[LRU_LIST].removeNode(node) } } class Entry { constructor (key, value, length, now, maxAge) { this.key = key this.value = value this.length = length this.now = now this.maxAge = maxAge || 0 } } const forEachStep = (self, fn, node, thisp) => { let hit = node.value if (isStale(self, hit)) { del(self, node) if (!self[ALLOW_STALE]) hit = undefined } if (hit) fn.call(thisp, hit.value, hit.key, self) } module.exports = LRUCache
PypiClean
/Markdown-Toolbox-0.0.9.tar.gz/Markdown-Toolbox-0.0.9/Markdown_Toolbox/moduels/thread/Thread_ClearAttatchment.py
from PySide2.QtWidgets import * from PySide2.QtGui import * from PySide2.QtCore import * from moduels.component.NormalValue import 常量, 清理化线程常量 from moduels.function.getAllUrlFromString import 从字符串搜索到所有附件路径 from moduels.function.localizeLinksInDocument import 将文档索引的链接本地化 from moduels.function.checkDirectoryPath import 检查路径 from moduels.function.restoreLinkFromJump import 跳转链接还原 import os, re, time from http.cookiejar import MozillaCookieJar from urllib import request, error import urllib.error from urllib.parse import urlparse from shutil import copy, move, rmtree class Thread_ClearAttatchment(QThread): 输入文件夹列表 = None 执行期间需要禁用的组件 = [] 提醒是否确认要删除的信号 = Signal() def __init__(self, parent=None): super(Thread_ClearAttatchment, self).__init__(parent) def run(self): 输入文件列表 = self.输入文件夹列表 for 组件 in self.执行期间需要禁用的组件: 组件.setDisabled(True) 常量.状态栏.showMessage('正在清理中') # 常量.mainWindow.setWindowTitle(常量.mainWindow.窗口标题 + '(执行中……)') # 常量.有重名时的处理方式 = 0 # 0 是询问,1 是全部覆盖,2 是全部跳过 无效附件移动到的文件夹列表 = [] for 输入文件夹 in 输入文件列表: print(f'正在处理的输入文件夹:{输入文件夹}') 当前文件夹下的附件路径列表 = [] 当前文件夹下的存储相对路径附件的文件夹的列表 = [] if not os.path.exists(输入文件夹): continue 输入文件夹下的文件列表 = os.listdir(输入文件夹) md文档目录 = [] print(f'当前搜索文件夹的md文档有:') for 文件名 in 输入文件夹下的文件列表: if re.match('.+\.md$', 文件名): md文档目录.append(输入文件夹 + '/' + 文件名) print(f' {文件名}') if len(md文档目录) < 1: continue for md文档 in md文档目录: print(f'正在文档中搜索相对路径。{md文档}') try: md文档内容 = open(md文档, 'r', encoding='utf-8').read() except: md文档内容 = open(md文档, 'r', encoding='gbk').read() 搜索到的附件路径列表 = 从字符串搜索到所有附件路径(md文档内容) for 附件路径 in 搜索到的附件路径列表: print('当前文档中的有效相对路径附件有:') 附件路径加上文档路径转换的绝对路径 = os.path.dirname(md文档) + '/' + 附件路径 if os.path.exists(附件路径加上文档路径转换的绝对路径): if 附件路径加上文档路径转换的绝对路径 not in 当前文件夹下的附件路径列表: 当前文件夹下的附件路径列表.append(附件路径加上文档路径转换的绝对路径) print(f' {附件路径}') print(f' 而其所处的文件夹是{os.path.dirname(附件路径加上文档路径转换的绝对路径)}') if os.path.dirname(附件路径加上文档路径转换的绝对路径) not in 当前文件夹下的存储相对路径附件的文件夹的列表: 当前文件夹下的存储相对路径附件的文件夹的列表.append(os.path.dirname(附件路径加上文档路径转换的绝对路径)) elif os.path.exists(附件路径): if 附件路径 not in 当前文件夹下的附件路径列表: 当前文件夹下的附件路径列表.append(附件路径) print(f' {附件路径}') print(f' 而其所处的文件夹是{os.path.dirname(附件路径加上文档路径转换的绝对路径)}') 已找到的附件文件夹中的文件列表 = [] for 附件文件夹 in 当前文件夹下的存储相对路径附件的文件夹的列表: print(f'现在开始统计附件文件夹中的所有附件:{附件文件夹}') for 路径 in os.listdir(附件文件夹): 路径 = 附件文件夹 + '/' + 路径 if os.path.isfile(路径): 已找到的附件文件夹中的文件列表.append(路径) print(f' 找到一个附件文件:{路径}') print('现在开始统计无用的附件') 无用的附件列表 = [] for 附件文件 in 已找到的附件文件夹中的文件列表: if 附件文件 not in 当前文件夹下的附件路径列表: 无用的附件列表.append(附件文件) print('现在开始移动无用的附件') for 无用附件 in 无用的附件列表: 目标文件夹 = os.path.dirname(os.path.dirname(无用附件)) + '/' + '未引用附件' if not 检查路径(目标文件夹): print(f'要移动到的目录未能成功创建:{目标文件夹}') continue 原完整路径名 = 目标文件夹 + '/' + os.path.basename(无用附件) 目标完整路径名 = 原完整路径名 重复后文件名加的后缀数字 = 1 while os.path.exists(目标完整路径名): print(f'无用附件要移动到目标的目标位置已有同名文件:{目标完整路径名}') print('保留两者') 原完整路径名分割后 = os.path.splitext(原完整路径名) 目标完整路径名 = 原完整路径名分割后[0] + str(重复后文件名加的后缀数字) + 原完整路径名分割后[1] print(f'尝试新目标文件名:{目标完整路径名}') 重复后文件名加的后缀数字 += 1 move(无用附件, 目标完整路径名) if 目标文件夹 not in 无效附件移动到的文件夹列表: 无效附件移动到的文件夹列表.append(目标文件夹) print(f'成功将 {无用附件} 移动到 {目标完整路径名}') 清理化线程常量.进程需要等待 = True if len(无效附件移动到的文件夹列表) > 0: self.提醒是否确认要删除的信号.emit() while 清理化线程常量.进程需要等待: self.sleep(1) if 清理化线程常量.是否确认要删除找到的无用文件: print('确认要删除无用附件') for 无效附件目录 in 无效附件移动到的文件夹列表: try: rmtree(无效附件目录) except: print(f'一个无效附件目录移除失败:{无效附件目录}') 常量.状态栏.showMessage('任务完成') # 常量.mainWindow.setWindowTitle(常量.mainWindow.窗口标题 + '(完成)') for 组件 in self.执行期间需要禁用的组件: 组件.setEnabled(True) print(f'\n\n清理完成 {time.time()}\n\n')
PypiClean
/FACe_lib-0.3.0.tar.gz/FACe_lib-0.3.0/face/models/invoice.py
from marshmallow import fields, Schema, post_load from .response import Response, ResponseSchema # Load the default status codes import face.codes as codes """ FACe Invoice models and schemas It defines the "factura" response content and defines an extended Response with the "factura" integrated: - InvoiceResponse and InvoiceSchema defines the resultado.factura - Invoice and InvoiceSchema extends the base Response to integrate the "factura" component """ """ Generic InvoiceState to integrate Tramitacion and Anulacion Invoice states """ class InvoiceState(object): def __init__(self, **kwargs): self.codigo = kwargs.get('codigo', None) self.descripcion = kwargs.get('descripcion', None) self.motivo = kwargs.get('motivo', None) def __getitem__(self, item): return self.__dict__[item] class InvoiceStateSchema(Schema): codigo = fields.String(validate=codes.status.validator) descripcion = fields.String(allow_none=True) motivo = fields.String(allow_none=True) @post_load def create_tramitacion(self, data): """ Return a Tramitacion instance to deserialize the TramitacionSchema """ return InvoiceState(**data) class InvoiceResponse(object): def __init__(self, **kwargs): self.numeroRegistro = kwargs.get('numeroRegistro', None) self.organoGestor = kwargs.get('organoGestor', None) self.unidadTramitadora = kwargs.get('unidadTramitadora', None) self.oficinaContable = kwargs.get('oficinaContable', None) self.identificadorEmisor = kwargs.get('identificadorEmisor', None) self.numeroFactura = kwargs.get('numeroFactura', None) self.serieFactura = kwargs.get('serieFactura', None) self.fechaRecepcion = kwargs.get('fechaRecepcion', None) # needed for consultarFactura self.tramitacion = kwargs.get('tramitacion', None) self.anulacion = kwargs.get('anulacion', None) def __getitem__(self, item): return self.__dict__[item] class InvoiceResponseSchema(Schema): numeroRegistro = fields.String(allow_none=True) organoGestor = fields.String(allow_none=True) unidadTramitadora = fields.String(allow_none=True) oficinaContable = fields.String(allow_none=True) identificadorEmisor = fields.String(allow_none=True) numeroFactura = fields.String(allow_none=True) serieFactura = fields.String(allow_none=True) fechaRecepcion = fields.String(allow_none=True) # needed for consultarFactura tramitacion = fields.Nested(InvoiceStateSchema, many=False, allow_none=True) anulacion = fields.Nested(InvoiceStateSchema, many=False, allow_none=True) @post_load def create_resultado(self, data): """ Return a Result instance while deserializing ResultSchema """ return InvoiceResponse(**data) class Invoice(Response): def __init__(self, resultado, factura): super(Invoice, self).__init__(resultado=resultado) self.factura = factura class InvoiceSchema(ResponseSchema): factura = fields.Nested(InvoiceResponseSchema, many=False) @post_load def create_response(self, data): """ Return a Response instance while deserializing ResponseSchema """ return Invoice(**data)
PypiClean
/Flask-FluidDB-0.1.tar.gz/Flask-FluidDB-0.1/docs/_themes/flask_theme_support.py
from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' }
PypiClean
/DatNTLib-0.0.3-py3-none-any.whl/DatNTLibs/Datasets/COCO/main.py
import os import numpy as np import skimage.io as io import matplotlib.pyplot as plt from pycocotools.coco import COCO #pip3 install pycocotools class COCODataset: def __init__(self,i_version='train',i_mode='instances',i_year=2017): assert isinstance(i_version,str) assert i_version in ('train','val','test','unlabeled') assert isinstance(i_year,int) assert i_year in (2014,2015,2017,) assert isinstance(i_mode,str) assert i_mode in ('captions','instances','person_keypoints') self.annotations = os.path.join(os.getcwd(),'annotations','annotations') self.datatype='{}_{}{}'.format(i_mode,i_version,i_year) self.annotation_json = os.path.join(self.annotations,self.datatype+'.json') assert os.path.exists(self.annotation_json) self.coco_object = COCO(annotation_file=self.annotation_json) def display_sample_image(self): pass def get_coco_instance_categories(self): if self.datatype.find('instances')>=0: categories = self.coco_object.loadCats(self.coco_object.getCatIds()) cat_names = [cat['name'] for cat in categories] print('COCO categories: \n') for index, cat_name in enumerate(cat_names): print('{}: {}'.format(index,cat_name)) scat_names = set([cat['supercategory'] for cat in categories]) print('COCO supercategories: \n') for index, scat_name in enumerate(scat_names): print('{}: {}'.format(index,scat_name)) return cat_names,scat_names else: return False def get_category_images(self,i_category='person'): assert isinstance(i_category,str) category_ids = self.coco_object.getCatIds(catNms=[i_category]) image_ids = self.coco_object.getImgIds(catIds=category_ids) assert isinstance(image_ids,(list,tuple))#Ids of all images belong to i_category for id in image_ids: image = self.coco_object.loadImgs(id)[0] annIds = self.coco_object.getAnnIds(imgIds=image['id'], catIds=category_ids, iscrowd=None) anns = self.coco_object.loadAnns(annIds) print(image) image = io.imread(image['coco_url']) print(image) print(annIds) print(anns) plt.imshow(image) self.coco_object.showAnns(anns) plt.show() return image_ids if __name__ == '__main__': print('This module is to demonstrate the characteristics of COCO dataset') dataset = COCODataset(i_version='train',i_year=2017) dataset.get_coco_instance_categories() dataset.get_category_images(i_category='person') """=================================================================================================================="""
PypiClean
/HubbardMeanField-1.0.tar.gz/HubbardMeanField-1.0/hubbardmeanfield/hubbard/kagome.py
import numpy as np import json from hubbard.base import Hubbard class KagomeHubbard(Hubbard): # ## IO # def __init__(self,nrows,ncols,u=0,t=1,nup=0,ndown=0, allow_fractions=False,**kwargs): """ Creates a kagome lattice and initialises things. nrows and ncols cannot be changed later without building a new instance. Inputs: nrows - positive integer, number of rows in the supercell. ncols - positive integer, number of columns in the supercell. t - number, optional (default 1). Hopping constant. u - number, optional (default 0). Hubbard U constant. nup,ndown,kwargs - arguments for set_electrons Defaults to no electrons. (Do not set backup) Last Modified: 2020-08-10 """ # Check that nrows and ncols are positive if nrows <= 0: raise ValueError("Number of rows must be positive.") if ncols <= 0: raise ValueError("Number of columns must be positive.") # Initialise the important constants. self.nrows = nrows self.ncols = ncols # Call the parent initialiser. super().__init__(3*nrows*ncols,u=u,nup=nup,ndown=ndown, allow_fractions=allow_fractions,**kwargs) # Initialise the kinetic energy. self.set_kinetic(t) # def copy(self): """ Returns a copy of this kagome lattice object. Outputs: KagomeHubbard object. Last Modified: 2020-08-04 """ # Copy charge density and U. kagome = KagomeHubbard(self.nrows,self.ncols,u=self.u, nup=self.nup.copy(),ndown=self.ndown.copy(), allow_fractions=self.allow_fractions) # Copy the kinetic energy matrix. kagome.set_kinetic_from_matrix(self.kin.copy()) # Copy magnetic field kagome.set_mag(self.mag) return kagome # @classmethod def load(cls,f): """ Loads a KagomeHubbard object from a JSON file f. Inputs: f - string, filename. Outputs: KagomeHubbard object. Last Modified: 2020-08-04 """ # Load the file with open(f) as file: di = json.load(file) # Create a KagomeHubbard object kagome = cls(nrows=di['nrows'],ncols=di['ncols'], u=di['u'],nup=np.asarray(di['nup']), ndown=np.asarray(di['ndown']), allow_fractions=di['allow_fractions']) kagome.set_kinetic_from_matrix(np.asarray(di['kin'])) kagome.set_mag(di['mag']) return kagome # def save(self,f): """ Save a JSON representation of the object's data to file f. Inputs: f - string, filename. Writes a text file f. Last Modified: 2021-05-18 """ with open(f,mode='w') as file: json.dump({'nrows':int(self.nrows), 'ncols':int(self.ncols), 'u':float(self.u), 'mag':float(self.mag), 'nup':self.nup.tolist(), 'ndown':self.ndown.tolist(), 'allow_fractions':bool(self.allow_fractions), 'kin':self.kin.tolist()}, file) # Have to use tolist because numpy arrays aren't JSON-able. # ## ELECTRON DENSITY MODIFIERS # def _electron_density_single_methods(self,nelect,method,up,**kwargs): """ Handler for setting the electron density of the Kagome lattice. Accepts the 'star' method and 'points' keyword. See also the parent method. Inputs: nelect - number of electrons. method - string, {'star'}, specifies the method. up - Boolean. Whether this is spin up electrons. Keyword Arguments: points - Boolean. Default True. For star method, whether to place the spin up electrons at the points of the star. Output: electron density - (self.nsites,) ndarray. Last Modified: 2020-08-11 """ # Process kwargs points = kwargs.get('points',True) # Check the method. if method == 'star': # Star method. We alternate based on spin up or down. if up: density = self._electron_density_star(nelect,points) else: density = self._electron_density_star(nelect,(not points)) else: # The method is not here. Pass execution up the MRO. density = super()._electron_density_single_methods(nelect, method,up,**kwargs) return density # def _electron_density_star(self,n,points): """ Produce a star-like configuration of electron density. I found this to be the ground state of the Kagome Hubbard lattice for a good part of the parameter space. Inputs: n - integer, number of electrons. points - Boolean, whether to put most of the density in the points of the stars (True) or the inner bits (False). Output: 1D ndarray representing electron density. Last Modified: 2020-07-15 """ # Get the coordinates of the points of the star. rowcolmgrid = np.mgrid[0:self.nrows,0:self.ncols] # In the rows and columns, which sublattice site has the star point? ncoord = np.mod(rowcolmgrid[0] + 2*rowcolmgrid[1],3) # Convert to numerical coordinates. pcoords = (ncoord + 3*rowcolmgrid[0] + 3*self.nrows*rowcolmgrid[1]).flatten() # A useful constant: number of unit cells ncells = self.nrows*self.ncols # Four cases, bases on whether to fill points first, and the filling. if points: if n <= self.nrows*self.ncols: # One third filling or less density = np.zeros(3*ncells) density[pcoords] = n/ncells # Just fill the points of the stars. else: # Put overflow from star points into other points. density = np.ones(3*ncells) * (n/(2*ncells)-1/2) density[pcoords] = 1 else: if n <= 2*self.nrows*self.ncols: # Two thirds filling or less # Leave the star points empty density = np.ones(3*ncells) * (n/(2*ncells)) density[pcoords] = 0 else: # Put overflow into the star points density = np.ones(3*ncells) density[pcoords] = n/ncells - 2 assert abs(density.sum() - n) < 1e-14 return density # ## GETTERS # def get_coordinates(self): """ Return the coordinates for plotting electron density. Output: a (self.nsites,2) ndarray of floats. Last Modified: 2020-08-10 """ return kagome_coordinates(self.nrows,self.ncols) # ## PLOTTERS # # ## SETTERS # def set_kinetic(self,t): """ Create the kinetic energy part of the kagome Hamiltonian matrix. Coordinates are sublattice+row*3+col*nrows*3. Inputs: t - real number. Hopping constant. Effect: sets self.kin to a 3*nrows*ncols dimensional square ndarray. Last Modified: 2020-08-10 """ # Have the kinetic energy term in tensor form, # where sublattice site, row and column are separate indices. tensor = -t*kagome_adjacency_tensor(self.nrows,self.ncols) # Reshape to a square matrix. length = self.nsites self.kin = np.reshape(tensor,(length,length),order='F') # def set_kinetic_random(self,t,wt=0,we=0): """ Create a kinetic energy matrix with some random noise in it. Random noise is uniform. Noise of hopping constants centred on t and noise of on-site energy centred on 0. Inputs: t - number. Hopping constant. wt - number. Width of the random noise of hopping. we - number. Width of the random noise of on-site energy. Effects: sets self.kin Last Modified: 2020-11-13 """ rng = np.random.default_rng() # Get the regular kinetic energy set. self.set_kinetic(t) # Get some random noise for the whole matrix. noise = rng.random(self.kin.shape)*wt - wt/2 # Zero out elements which should be zero. noise[self.kin == 0] = 0 # Make the noise matrix Hermitian. # Zero out the lower triangle, otherwise next step will be sum of two random # variates which is not a uniform distribution. noise = np.triu(noise) noise = noise + noise.transpose() # Apply noise to the kinetic energy. self.kin += noise # Now get the noisy diagonal for on-site energy. self.kin += np.diag(rng.random(len(self.kin))*we - we/2) # Done. # def simulate(nrows,ncols,nelectup,nelectdown,u,**kwargs): """ Sets up and does a Hubbard calculation on a kagome lattice. I recommend you look to hubbard.utils instead. This is deprecated. Inputs: nrows - positive integer. Number of rows in the supercell. ncols - positive integer. Number of columns in the supercell. nelectup - non-negative integer. Number of spin up electrons. Must be no greater than 3*nrows*ncols. nelectdown - non-negative integer. Number of spin down electrons. Must be no greater than 3*nrows*ncols. u - real number. Hubbard U parameter. Keyword Arguments: t - real number, default 1. Hopping parameter. scheme - string, default 'linear'. Mixing scheme. Currently, only linear mixing is implemented. mix - number between 0 and 1, default 0.5. Linear mixing parameter. ediff - positive number, default 1e-6. Energy difference below which we assume the self-consistency cycle has converged. max_iter - positive integer, default 500. Maximum number of iterations of self-consistency to perform. initial - string, default 'random'. Determines how the electron density is initialised. Options are 'random' and 'uniform'. alpha - optional positive number, default None. For 'random' initial. Dirichlet parameter for random generation. If not set, is automatically chosen for optimal spread. Outputs: KagomeHubbard object. Last Modified: 2020-07-10 """ # Process default values. kwargs.get(key,default) # t, hopping parameter. t = kwargs.get('t',1) # Mixing scheme. scheme = kwargs.get('scheme','linear') # Parameters for mixing scheme. if scheme == 'linear': mix = kwargs.get('mix',0.5) ediff = kwargs.get('ediff',1e-6) max_iter = kwargs.get('max_iter',500) else: raise ValueError("Mixing scheme "+str(scheme)+" is not implemented.") # Density initialisation scheme initial = kwargs.get('initial','random') if initial == 'random': alpha = kwargs.get('alpha',None) # Input processing complete. # Create the object kagome = KagomeHubbard(nrows,ncols,t=t,u=u,nup=nelectup,ndown=nelectdown, method=initial,alpha=alpha) # Run the simulation if scheme == 'linear': kagome.linear_mixing(max_iter,ediff,mix) else: raise ValueError("Mixing scheme "+str(scheme)+" is not implemented.") return kagome def kagome_coordinates(nrows,ncols): """ For each point in the kagome lattice, gives its Cartesian coordinates. Inputs: nrows and ncols, integers, number of rows and columns in supercell. Output: a (3*nrows*ncols, 2) array Last Modified: 2020-08-10 """ # Lattice vectors. a1 = np.array([1/2,np.sqrt(3)/2]) a2 = np.array([-1/2,np.sqrt(3)/2]) # Sub-lattice vectors sub = np.array([[0,0],a1/2,a2/2]) # There's probably a vectorised way to do this, but I don't know it. # I generally don't need to run this too frequently, and its # efficiency isn't atrocious. coords = np.empty((3,nrows,ncols,2)) for i in range(3): for j in range(nrows): for k in range(ncols): coords[i,j,k] = sub[i]+j*a1+k*a2 return np.reshape(coords,(3*nrows*ncols,2),order='F') def kagome_adjacency_tensor(nrows, ncols): """ Creates the adjacency tensor for the kagome lattice. First three coords is input. Second 3 is output. Returns a 3*nrows*ncols*3*nrows*ncols ndarray. Elements are 1 or 0. Can also have 2's if nrows or ncols is 1. Last Modified: 2020-06-03 """ # Initialise the adjacency tensor. adj = np.zeros((3,nrows,ncols,3,nrows,ncols)) # We have 12 adjacencies (counting each direction separately) # Use multiplication and broadcasting. #(0,i,j)->(1,i,j) # Generate matrices mapping each of coordinates. m0to1 = np.zeros((3,1,1,3,1,1)) m0to1[0,0,0,1,0,0] = 1 idrow = np.identity(nrows).reshape(1,nrows,1,1,nrows,1) idcol = np.identity(ncols).reshape(1,1,ncols,1,1,ncols) # Multiply together to get the full operation. op = m0to1*idrow*idcol # Add to the adjacency matrix. adj += op # (0,i,j)->(2,i,j) m0to2 = np.zeros((3,1,1,3,1,1)) m0to2[0,0,0,2,0,0] = 1 op = m0to2*idrow*idcol adj += op # (0,i,j)->(1,i-1,j) rowm1 = np.zeros((nrows,nrows)) # Row minus 1 rowm1[(np.arange(nrows),np.mod(np.arange(nrows)-1,nrows))] = 1 # mod implements periodic boundary conditions. rowm1 = rowm1.reshape((1,nrows,1,1,nrows,1)) op = m0to1*rowm1*idcol adj += op # (0,i,j)->(2,i,j-1) colm1 = np.zeros((ncols,ncols)) colm1[(np.arange(ncols),np.mod(np.arange(ncols)-1,ncols))] = 1 colm1 = colm1.reshape((1,1,ncols,1,1,ncols)) op = m0to2*idrow*colm1 adj += op # (1,i,j)->(0,i,j) m1to0 = np.zeros((3,1,1,3,1,1)) m1to0[1,0,0,0,0,0] = 1 op = m1to0*idrow*idcol adj += op # (1,i,j)->(2,i,j) m1to2 = np.zeros((3,1,1,3,1,1)) m1to2[1,0,0,2,0,0] = 1 op = m1to2*idrow*idcol adj += op # (1,i,j)->(0,i+1,j) rowp1 = np.zeros((nrows,nrows)) # Row plus 1 rowp1[(np.arange(nrows),np.mod(np.arange(nrows)+1,nrows))] = 1 rowp1 = rowp1.reshape((1,nrows,1,1,nrows,1)) op = m1to0*rowp1*idcol adj += op # (1,i,j)->(2,i+1,j-1) op = m1to2*rowp1*colm1 adj += op # (2,i,j)->(0,i,j) m2to0 = np.zeros((3,1,1,3,1,1)) m2to0[(2,0,0,0,0,0)] = 1 op = m2to0*idrow*idcol adj += op # (2,i,j)->(1,i,j) m2to1 = np.zeros((3,1,1,3,1,1)) m2to1[(2,0,0,1,0,0)] = 1 op = m2to1*idrow*idcol adj += op # (2,i,j)->(0,i,j+1) colp1 = np.zeros((ncols,ncols)) colp1[(np.arange(ncols),np.mod(np.arange(ncols)+1,ncols))] = 1 colp1 = colp1.reshape((1,1,ncols,1,1,ncols)) op = m2to0*idrow*colp1 adj += op # (2,i,j)->(1,i-1,j+1) op = m2to1*rowm1*colp1 adj += op # Sanity checking # We have the right number of elements assert adj.sum()==12*nrows*ncols # The tensor is symmetric assert (adj == adj.transpose((3,4,5,0,1,2))).all() # Return return adj
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@types/node/v8.d.ts
declare module 'v8' { import { Readable } from 'node:stream'; interface HeapSpaceInfo { space_name: string; space_size: number; space_used_size: number; space_available_size: number; physical_space_size: number; } // ** Signifies if the --zap_code_space option is enabled or not. 1 == enabled, 0 == disabled. */ type DoesZapCodeSpaceFlag = 0 | 1; interface HeapInfo { total_heap_size: number; total_heap_size_executable: number; total_physical_size: number; total_available_size: number; used_heap_size: number; heap_size_limit: number; malloced_memory: number; peak_malloced_memory: number; does_zap_garbage: DoesZapCodeSpaceFlag; number_of_native_contexts: number; number_of_detached_contexts: number; } interface HeapCodeStatistics { code_and_metadata_size: number; bytecode_and_metadata_size: number; external_script_source_size: number; } /** * Returns an integer representing a version tag derived from the V8 version, * command-line flags, and detected CPU features. This is useful for determining * whether a `vm.Script` `cachedData` buffer is compatible with this instance * of V8. * * ```js * console.log(v8.cachedDataVersionTag()); // 3947234607 * // The value returned by v8.cachedDataVersionTag() is derived from the V8 * // version, command-line flags, and detected CPU features. Test that the value * // does indeed update when flags are toggled. * v8.setFlagsFromString('--allow_natives_syntax'); * console.log(v8.cachedDataVersionTag()); // 183726201 * ``` * @since v8.0.0 */ function cachedDataVersionTag(): number; /** * Returns an object with the following properties: * * `does_zap_garbage` is a 0/1 boolean, which signifies whether the`--zap_code_space` option is enabled or not. This makes V8 overwrite heap * garbage with a bit pattern. The RSS footprint (resident set size) gets bigger * because it continuously touches all heap pages and that makes them less likely * to get swapped out by the operating system. * * `number_of_native_contexts` The value of native\_context is the number of the * top-level contexts currently active. Increase of this number over time indicates * a memory leak. * * `number_of_detached_contexts` The value of detached\_context is the number * of contexts that were detached and not yet garbage collected. This number * being non-zero indicates a potential memory leak. * * ```js * { * total_heap_size: 7326976, * total_heap_size_executable: 4194304, * total_physical_size: 7326976, * total_available_size: 1152656, * used_heap_size: 3476208, * heap_size_limit: 1535115264, * malloced_memory: 16384, * peak_malloced_memory: 1127496, * does_zap_garbage: 0, * number_of_native_contexts: 1, * number_of_detached_contexts: 0 * } * ``` * @since v1.0.0 */ function getHeapStatistics(): HeapInfo; /** * Returns statistics about the V8 heap spaces, i.e. the segments which make up * the V8 heap. Neither the ordering of heap spaces, nor the availability of a * heap space can be guaranteed as the statistics are provided via the * V8[`GetHeapSpaceStatistics`](https://v8docs.nodesource.com/node-13.2/d5/dda/classv8_1_1_isolate.html#ac673576f24fdc7a33378f8f57e1d13a4) function and may change from one V8 version to the * next. * * The value returned is an array of objects containing the following properties: * * ```json * [ * { * "space_name": "new_space", * "space_size": 2063872, * "space_used_size": 951112, * "space_available_size": 80824, * "physical_space_size": 2063872 * }, * { * "space_name": "old_space", * "space_size": 3090560, * "space_used_size": 2493792, * "space_available_size": 0, * "physical_space_size": 3090560 * }, * { * "space_name": "code_space", * "space_size": 1260160, * "space_used_size": 644256, * "space_available_size": 960, * "physical_space_size": 1260160 * }, * { * "space_name": "map_space", * "space_size": 1094160, * "space_used_size": 201608, * "space_available_size": 0, * "physical_space_size": 1094160 * }, * { * "space_name": "large_object_space", * "space_size": 0, * "space_used_size": 0, * "space_available_size": 1490980608, * "physical_space_size": 0 * } * ] * ``` * @since v6.0.0 */ function getHeapSpaceStatistics(): HeapSpaceInfo[]; /** * The `v8.setFlagsFromString()` method can be used to programmatically set * V8 command-line flags. This method should be used with care. Changing settings * after the VM has started may result in unpredictable behavior, including * crashes and data loss; or it may simply do nothing. * * The V8 options available for a version of Node.js may be determined by running`node --v8-options`. * * Usage: * * ```js * // Print GC events to stdout for one minute. * const v8 = require('v8'); * v8.setFlagsFromString('--trace_gc'); * setTimeout(() => { v8.setFlagsFromString('--notrace_gc'); }, 60e3); * ``` * @since v1.0.0 */ function setFlagsFromString(flags: string): void; /** * Generates a snapshot of the current V8 heap and returns a Readable * Stream that may be used to read the JSON serialized representation. * This JSON stream format is intended to be used with tools such as * Chrome DevTools. The JSON schema is undocumented and specific to the * V8 engine. Therefore, the schema may change from one version of V8 to the next. * * Creating a heap snapshot requires memory about twice the size of the heap at * the time the snapshot is created. This results in the risk of OOM killers * terminating the process. * * Generating a snapshot is a synchronous operation which blocks the event loop * for a duration depending on the heap size. * * ```js * // Print heap snapshot to the console * const v8 = require('v8'); * const stream = v8.getHeapSnapshot(); * stream.pipe(process.stdout); * ``` * @since v11.13.0 * @return A Readable Stream containing the V8 heap snapshot */ function getHeapSnapshot(): Readable; /** * Generates a snapshot of the current V8 heap and writes it to a JSON * file. This file is intended to be used with tools such as Chrome * DevTools. The JSON schema is undocumented and specific to the V8 * engine, and may change from one version of V8 to the next. * * A heap snapshot is specific to a single V8 isolate. When using `worker threads`, a heap snapshot generated from the main thread will * not contain any information about the workers, and vice versa. * * Creating a heap snapshot requires memory about twice the size of the heap at * the time the snapshot is created. This results in the risk of OOM killers * terminating the process. * * Generating a snapshot is a synchronous operation which blocks the event loop * for a duration depending on the heap size. * * ```js * const { writeHeapSnapshot } = require('v8'); * const { * Worker, * isMainThread, * parentPort * } = require('worker_threads'); * * if (isMainThread) { * const worker = new Worker(__filename); * * worker.once('message', (filename) => { * console.log(`worker heapdump: ${filename}`); * // Now get a heapdump for the main thread. * console.log(`main thread heapdump: ${writeHeapSnapshot()}`); * }); * * // Tell the worker to create a heapdump. * worker.postMessage('heapdump'); * } else { * parentPort.once('message', (message) => { * if (message === 'heapdump') { * // Generate a heapdump for the worker * // and return the filename to the parent. * parentPort.postMessage(writeHeapSnapshot()); * } * }); * } * ``` * @since v11.13.0 * @param filename The file path where the V8 heap snapshot is to be saved. If not specified, a file name with the pattern `'Heap-${yyyymmdd}-${hhmmss}-${pid}-${thread_id}.heapsnapshot'` will be * generated, where `{pid}` will be the PID of the Node.js process, `{thread_id}` will be `0` when `writeHeapSnapshot()` is called from the main Node.js thread or the id of a * worker thread. * @return The filename where the snapshot was saved. */ function writeHeapSnapshot(filename?: string): string; /** * Returns an object with the following properties: * * ```js * { * code_and_metadata_size: 212208, * bytecode_and_metadata_size: 161368, * external_script_source_size: 1410794 * } * ``` * @since v12.8.0 */ function getHeapCodeStatistics(): HeapCodeStatistics; /** * @since v8.0.0 */ class Serializer { /** * Writes out a header, which includes the serialization format version. */ writeHeader(): void; /** * Serializes a JavaScript value and adds the serialized representation to the * internal buffer. * * This throws an error if `value` cannot be serialized. */ writeValue(val: any): boolean; /** * Returns the stored internal buffer. This serializer should not be used once * the buffer is released. Calling this method results in undefined behavior * if a previous write has failed. */ releaseBuffer(): Buffer; /** * Marks an `ArrayBuffer` as having its contents transferred out of band. * Pass the corresponding `ArrayBuffer` in the deserializing context to `deserializer.transferArrayBuffer()`. * @param id A 32-bit unsigned integer. * @param arrayBuffer An `ArrayBuffer` instance. */ transferArrayBuffer(id: number, arrayBuffer: ArrayBuffer): void; /** * Write a raw 32-bit unsigned integer. * For use inside of a custom `serializer._writeHostObject()`. */ writeUint32(value: number): void; /** * Write a raw 64-bit unsigned integer, split into high and low 32-bit parts. * For use inside of a custom `serializer._writeHostObject()`. */ writeUint64(hi: number, lo: number): void; /** * Write a JS `number` value. * For use inside of a custom `serializer._writeHostObject()`. */ writeDouble(value: number): void; /** * Write raw bytes into the serializer’s internal buffer. The deserializer * will require a way to compute the length of the buffer. * For use inside of a custom `serializer._writeHostObject()`. */ writeRawBytes(buffer: NodeJS.TypedArray): void; } /** * A subclass of `Serializer` that serializes `TypedArray`(in particular `Buffer`) and `DataView` objects as host objects, and only * stores the part of their underlying `ArrayBuffer`s that they are referring to. * @since v8.0.0 */ class DefaultSerializer extends Serializer {} /** * @since v8.0.0 */ class Deserializer { constructor(data: NodeJS.TypedArray); /** * Reads and validates a header (including the format version). * May, for example, reject an invalid or unsupported wire format. In that case, * an `Error` is thrown. */ readHeader(): boolean; /** * Deserializes a JavaScript value from the buffer and returns it. */ readValue(): any; /** * Marks an `ArrayBuffer` as having its contents transferred out of band. * Pass the corresponding `ArrayBuffer` in the serializing context to `serializer.transferArrayBuffer()` (or return the `id` from `serializer._getSharedArrayBufferId()` in the case of * `SharedArrayBuffer`s). * @param id A 32-bit unsigned integer. * @param arrayBuffer An `ArrayBuffer` instance. */ transferArrayBuffer(id: number, arrayBuffer: ArrayBuffer): void; /** * Reads the underlying wire format version. Likely mostly to be useful to * legacy code reading old wire format versions. May not be called before`.readHeader()`. */ getWireFormatVersion(): number; /** * Read a raw 32-bit unsigned integer and return it. * For use inside of a custom `deserializer._readHostObject()`. */ readUint32(): number; /** * Read a raw 64-bit unsigned integer and return it as an array `[hi, lo]`with two 32-bit unsigned integer entries. * For use inside of a custom `deserializer._readHostObject()`. */ readUint64(): [number, number]; /** * Read a JS `number` value. * For use inside of a custom `deserializer._readHostObject()`. */ readDouble(): number; /** * Read raw bytes from the deserializer’s internal buffer. The `length` parameter * must correspond to the length of the buffer that was passed to `serializer.writeRawBytes()`. * For use inside of a custom `deserializer._readHostObject()`. */ readRawBytes(length: number): Buffer; } /** * A subclass of `Deserializer` corresponding to the format written by `DefaultSerializer`. * @since v8.0.0 */ class DefaultDeserializer extends Deserializer {} /** * Uses a `DefaultSerializer` to serialize `value` into a buffer. * * `ERR_BUFFER_TOO_LARGE` will be thrown when trying to * serialize a huge object which requires buffer * larger than `buffer.constants.MAX_LENGTH`. * @since v8.0.0 */ function serialize(value: any): Buffer; /** * Uses a `DefaultDeserializer` with default options to read a JS value * from a buffer. * @since v8.0.0 * @param buffer A buffer returned by {@link serialize}. */ function deserialize(buffer: NodeJS.TypedArray): any; /** * The `v8.takeCoverage()` method allows the user to write the coverage started by `NODE_V8_COVERAGE` to disk on demand. This method can be invoked multiple * times during the lifetime of the process. Each time the execution counter will * be reset and a new coverage report will be written to the directory specified * by `NODE_V8_COVERAGE`. * * When the process is about to exit, one last coverage will still be written to * disk unless {@link stopCoverage} is invoked before the process exits. * @since v15.1.0, v14.18.0, v12.22.0 */ function takeCoverage(): void; /** * The `v8.stopCoverage()` method allows the user to stop the coverage collection * started by `NODE_V8_COVERAGE`, so that V8 can release the execution count * records and optimize code. This can be used in conjunction with {@link takeCoverage} if the user wants to collect the coverage on demand. * @since v15.1.0, v14.18.0, v12.22.0 */ function stopCoverage(): void; /** * This API collects GC data in current thread. */ class GCProfiler { /** * Start collecting GC data. */ start(): void; /** * Stop collecting GC data and return a object. */ stop(): GCProfilerResult; } interface GCProfilerResult { version: number; startTime: number; endTime: number; statistics: Array<{ gcType: string; cost: number; beforeGC: { heapStatistics: HeapStatistics; heapSpaceStatistics: HeapSpaceStatistics[]; }; afterGC: { heapStatistics: HeapStatistics; heapSpaceStatistics: HeapSpaceStatistics[]; }; }>; } interface HeapStatistics { totalHeapSize: number; totalHeapSizeExecutable: number; totalPhysicalSize: number; totalAvailableSize: number; totalGlobalHandlesSize: number; usedGlobalHandlesSize: number; usedHeapSize: number; heapSizeLimit: number; mallocedMemory: number; externalMemory: number; peakMallocedMemory: number; } interface HeapSpaceStatistics { spaceName: string; spaceSize: number; spaceUsedSize: number; spaceAvailableSize: number; physicalSpaceSize: number; } /** * Called when a promise is constructed. This does not mean that corresponding before/after events will occur, only that the possibility exists. This will * happen if a promise is created without ever getting a continuation. * @since v17.1.0, v16.14.0 * @param promise The promise being created. * @param parent The promise continued from, if applicable. */ interface Init { (promise: Promise<unknown>, parent: Promise<unknown>): void; } /** * Called before a promise continuation executes. This can be in the form of `then()`, `catch()`, or `finally()` handlers or an await resuming. * * The before callback will be called 0 to N times. The before callback will typically be called 0 times if no continuation was ever made for the promise. * The before callback may be called many times in the case where many continuations have been made from the same promise. * @since v17.1.0, v16.14.0 */ interface Before { (promise: Promise<unknown>): void; } /** * Called immediately after a promise continuation executes. This may be after a `then()`, `catch()`, or `finally()` handler or before an await after another await. * @since v17.1.0, v16.14.0 */ interface After { (promise: Promise<unknown>): void; } /** * Called when the promise receives a resolution or rejection value. This may occur synchronously in the case of {@link Promise.resolve()} or * {@link Promise.reject()}. * @since v17.1.0, v16.14.0 */ interface Settled { (promise: Promise<unknown>): void; } /** * Key events in the lifetime of a promise have been categorized into four areas: creation of a promise, before/after a continuation handler is called or * around an await, and when the promise resolves or rejects. * * Because promises are asynchronous resources whose lifecycle is tracked via the promise hooks mechanism, the `init()`, `before()`, `after()`, and * `settled()` callbacks must not be async functions as they create more promises which would produce an infinite loop. * @since v17.1.0, v16.14.0 */ interface HookCallbacks { init?: Init; before?: Before; after?: After; settled?: Settled; } interface PromiseHooks { /** * The `init` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop. * @since v17.1.0, v16.14.0 * @param init The {@link Init | `init` callback} to call when a promise is created. * @return Call to stop the hook. */ onInit: (init: Init) => Function; /** * The `settled` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop. * @since v17.1.0, v16.14.0 * @param settled The {@link Settled | `settled` callback} to call when a promise is created. * @return Call to stop the hook. */ onSettled: (settled: Settled) => Function; /** * The `before` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop. * @since v17.1.0, v16.14.0 * @param before The {@link Before | `before` callback} to call before a promise continuation executes. * @return Call to stop the hook. */ onBefore: (before: Before) => Function; /** * The `after` hook must be a plain function. Providing an async function will throw as it would produce an infinite microtask loop. * @since v17.1.0, v16.14.0 * @param after The {@link After | `after` callback} to call after a promise continuation executes. * @return Call to stop the hook. */ onAfter: (after: After) => Function; /** * Registers functions to be called for different lifetime events of each promise. * The callbacks `init()`/`before()`/`after()`/`settled()` are called for the respective events during a promise's lifetime. * All callbacks are optional. For example, if only promise creation needs to be tracked, then only the init callback needs to be passed. * The hook callbacks must be plain functions. Providing async functions will throw as it would produce an infinite microtask loop. * @since v17.1.0, v16.14.0 * @param callbacks The {@link HookCallbacks | Hook Callbacks} to register * @return Used for disabling hooks */ createHook: (callbacks: HookCallbacks) => Function; } /** * The `promiseHooks` interface can be used to track promise lifecycle events. * @since v17.1.0, v16.14.0 */ const promiseHooks: PromiseHooks; } declare module 'node:v8' { export * from 'v8'; }
PypiClean
/MetaStalk-2.2.1.tar.gz/MetaStalk-2.2.1/README.md
# MetaStalk [![GitHub](https://img.shields.io/github/license/Cyb3r-Jak3/MetaStalk?style=flat)](https://github.com/Cyb3r-Jak3/MetaStalk/blob/master/LICENSE) ![Gitlab pipeline status (branch)](https://img.shields.io/gitlab/pipeline/Cyb3r-Jak3/MetaStalk/master?label=Build&style=flat) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/metastalk) ![PyPI](https://img.shields.io/pypi/v/metastalk) [![Maintainability](https://api.codeclimate.com/v1/badges/9b95ea5f0c8a77eab0ed/maintainability)](https://codeclimate.com/github/Cyb3r-Jak3/MetaStalk/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/9b95ea5f0c8a77eab0ed/test_coverage)](https://codeclimate.com/github/Cyb3r-Jak3/MetaStalk/test_coverage) [![codecov](https://codecov.io/gl/Cyb3r-Jak3/metastalk/branch/master/graph/badge.svg)](https://codecov.io/gl/Cyb3r-Jak3/metastalk) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/68c8c8c6b4d5421cb0e8a81f69696944)](https://www.codacy.com/manual/Cyb3r_Jak3/metastalk?utm_source=gitlab.com&amp;utm_medium=referral&amp;utm_content=Cyb3r-Jak3/metastalk&amp;utm_campaign=Badge_Grade) [![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/Cyb3r-Jak3/MetaStalk/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/Cyb3r-Jak3/MetaStalk/?branch=master) [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Cyb3r-Jak3/MetaStalk.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Cyb3r-Jak3/MetaStalk/context:python) ## About MetaStalk is a tool that can be used to generate graphs from the metadata of JPEG, TIFF, and HEIC images, which are tested. More formats are supported but untested. It currently creates graphs for: - GPS coordinates (map) - Focal Length, Camera model, Camera manufacturer, Producer information (Pie Chart) - Timestamp information (Chart) Examples photos from [ianare/exif-samples](https://github.com/ianare/exif-samples/tree/master/jpg/gps), [exiftool](https://owl.phy.queensu.ca/~phil/exiftool/sample_images.html), [drewmpales/metadata-extractor-images](https://github.com/drewnoakes/metadata-extractor-images). All development is done on GitLab and mirrored to GitHub. Please read [contributing.md](CONTRIBUTING.md) for development. Python 3.6 and up. ## How to use MetaStalk is available as a package on pypi.org or you can do a source install. ```bash usage: MetaStalk [-h] [-a] [-d] [-e {pdf,svg,webp,jpeg,png,html,html_offline}] [--no-open] [-o OUTPUT] [-t] [-v] [files [files ...]] Tool to graph image metadata. positional arguments: files Path of photos to check. optional arguments: -h, --help show this help message and exit -a, --alphabetic Sorts charts in alphabetical order rather than the default order -d, --debug Sets logging level to DEBUG. -e, --export {pdf,svg,webp,jpeg,png,html,html_offline} Exports the graphs rather than all on one webpage --no-open Will only start the server and not open the browser to view it -o OUTPUT, --output OUTPUT The name of the directory to output exports to. Will be created if it does not exist. Defaults to metastalk_exports. -t, --test Does not show the graphs at the end. -v, --verbose Sets logging level to INFO ``` ### Extras MetaStalk has extra installs available for an increased feature set. - **metastalk[image]** Allows for exporting charts to images in png, jpeg, webp, svg and pdf formats. - [Orca](https://github.com/plotly/orca) is required. - **metastalk[heic]** Allows for parse of heic images. **Linux or Mac is requirement** because Metastalk uses [pyheif](https://github.com/david-poirier-csn/pyheif) and thus [libheif](https://github.com/strukturag/libheif). - **metastalk[full]** Combination of [image] and [heic]. Requires Linux or Mac and needs [orca](https://github.com/plotly/orca) installed. ### PyPi Install ```bash pip install metastalk metastalk <Path to files> #i.e. metastalk ./ExamplePhotos/ ``` ### Source Install ```bash git clone https://gitlab.com/Cyb3r-Jak3/metastalk cd metastalk setup.py install metastalk <Path to files> #i.e. metastalk ./ExamplePhotos/ ``` ## Disclaimer This is for educational/proof of concept purposes only. What you do with MetaStalk is **your** responsibility. [![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gl/Cyb3r-Jak3/MetaStalk/?ref=repository-badge)
PypiClean
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/0.8/zlib.js
var zlib = {}; /** * Returns a new Gzip object with an options. * @param options * @returns {zlib.Gzip} a new Gzip object with an options */ zlib.createGzip = function(options) {} /** * Returns a new Gunzip object with an options. * @param options * @returns {zlib.Gunzip} a new Gunzip object with an options */ zlib.createGunzip = function(options) {} /** * Returns a new Deflate object with an options. * @param options * @returns {zlib.Deflate} a new Deflate object with an options */ zlib.createDeflate = function(options) {} /** * Returns a new Inflate object with an options. * @param options * @returns {zlib.Inflate} a new Inflate object with an options */ zlib.createInflate = function(options) {} /** * Returns a new DeflateRaw object with an options. * @param options * @returns {zlib.DeflateRaw} a new DeflateRaw object with an options */ zlib.createDeflateRaw = function(options) {} /** * Returns a new InflateRaw object with an options. * @param options * @returns {zlib.InflateRaw} a new InflateRaw object with an options */ zlib.createInflateRaw = function(options) {} /** * Returns a new Unzip object with an options. * @param options * @returns {zlib.Unzip} a new Unzip object with an options */ zlib.createUnzip = function(options) {} /** * Compress a string with Deflate. * @param buf * @param callback */ zlib.deflate = function(buf, callback) {} /** * Compress a string with DeflateRaw. * @param buf * @param callback */ zlib.deflateRaw = function(buf, callback) {} /** * Compress a string with Gzip. * @param buf * @param callback */ zlib.gzip = function(buf, callback) {} /** * Decompress a raw Buffer with Gunzip. * @param buf * @param callback */ zlib.gunzip = function(buf, callback) {} /** * Decompress a raw Buffer with Inflate. * @param buf * @param callback */ zlib.inflate = function(buf, callback) {} /** * Decompress a raw Buffer with InflateRaw. * @param buf * @param callback */ zlib.inflateRaw = function(buf, callback) {} /** * Decompress a raw Buffer with Unzip. * @param buf * @param callback */ zlib.unzip = function(buf, callback) {} /** * Compress data using gzip. * @constructor */ zlib.Gzip = function() {} zlib.Gzip.prototype = new stream.ReadableStream(); zlib.Gzip.prototype = new stream.WritableStream(); /** * Decompress a gzip stream. * @constructor */ zlib.Gunzip = function() {} zlib.Gunzip.prototype = new stream.ReadableStream(); zlib.Gunzip.prototype = new stream.WritableStream(); /** * Compress data using deflate. * @constructor */ zlib.Deflate = function() {} zlib.Deflate.prototype = new stream.ReadableStream(); zlib.Deflate.prototype = new stream.WritableStream(); /** * Decompress a deflate stream. * @constructor */ zlib.Inflate = function() {} zlib.Inflate.prototype = new stream.ReadableStream(); zlib.Inflate.prototype = new stream.WritableStream(); /** * Compress data using deflate, and do not append a zlib header. * @constructor */ zlib.DeflateRaw = function() {} zlib.DeflateRaw.prototype = new stream.ReadableStream(); zlib.DeflateRaw.prototype = new stream.WritableStream(); /** * Decompress a raw deflate stream. * @constructor */ zlib.InflateRaw = function() {} zlib.InflateRaw.prototype = new stream.ReadableStream(); zlib.InflateRaw.prototype = new stream.WritableStream(); /** * Decompress either a Gzip- or Deflate-compressed stream by auto-detecting * the header. * @constructor */ zlib.Unzip = function() {} zlib.Unzip.prototype = new stream.ReadableStream(); zlib.Unzip.prototype = new stream.WritableStream(); /* constants */ zlib.Z_OK = 0; zlib.Z_STREAM_END = 0; zlib.Z_NEED_DICT = 0; zlib.Z_ERRNO = 0; zlib.Z_STREAM_ERROR = 0; zlib.Z_DATA_ERROR = 0; zlib.Z_MEM_ERROR = 0; zlib.Z_BUF_ERROR = 0; zlib.Z_VERSION_ERROR = 0; zlib.Z_NO_COMPRESSION = 0; zlib.Z_BEST_SPEED = 0; zlib.Z_BEST_COMPRESSION = 0; zlib.Z_DEFAULT_COMPRESSION = 0; zlib.Z_FILTERED = 0; zlib.Z_HUFFMAN_ONLY = 0; zlib.Z_RLE = 0; zlib.Z_FIXED = 0; zlib.Z_DEFAULT_STRATEGY = 0; zlib.Z_BINARY = 0; zlib.Z_TEXT = 0; zlib.Z_ASCII = 0; zlib.Z_UNKNOWN = 0; zlib.Z_DEFLATED = 0; zlib.Z_NULL = 0; var stream = require('stream'); exports = zlib;
PypiClean
/ChadBot3-0.1-py3-none-any.whl/ChadBot/generation/rajat_work/qgen/generator/fpm/fpm.py
import itertools import string from tqdm import tqdm from ...util import nlp from .matcher import FuzzyMatcher from .pattern import pattern_specs from ..base import BaseGenerator # 5W1H WH = {'who', 'what', 'why', 'where', 'when', 'how'} # Coordinating conjunction FANBOYS = {'for', 'and', 'nor', 'but', 'or', 'yet', 'so'} class FPMGenerator(BaseGenerator): """ Generate questions via fuzzy pattern matching on existing question patterns. """ def __init__(self): super().__init__("Fuzzy Question Pattern Matching") self.patterns = self._generate_patterns(pattern_specs) self.matcher = FuzzyMatcher(self.patterns) @staticmethod def _generate_patterns(specs): patterns = [] for group in specs: for _ in range(len(group['patterns'])): # iterate through all original patterns pattern = group['patterns'].pop(0).strip() tokens_list = [] # list of tokens that require substitution for word in pattern.split(): if word.startswith('{') and word.endswith('}'): tokens_list.append(word[1:-1]) temp = [pattern] for token in tokens_list: for _ in range(len(temp)): # iterate through all sub-patterns curr_pattern = temp.pop(0) for substitute in group['substitution_keys'][token]: temp.append(curr_pattern.replace('{' + token + '}', substitute)) group['patterns'].extend(temp) patterns.append(group['patterns']) return patterns @staticmethod def _format_input(sentence): """ Add a space after specific punctuation if it is not followed by a space """ punctuations = "!),.:;?]}" char_array = [] for i, s in enumerate(sentence): char_array.append(s) if i < len(sentence) - 1 and s in punctuations and sentence[i + 1] not in punctuations + " ": char_array.append(" ") return "".join(char_array) @staticmethod def _format_output(sentence): words = sentence.strip("!,.:;?").split() words[0] = words[0][0].upper() + words[0][1:] # capitalize first letter words[-1] = words[-1] + "?" return " ".join(words) @staticmethod def _has_multiple_question(question): """ Simple heuristic to determine whether a `question` contains multiple sub-questions. """ if question.count('?') > 1: return True else: combined_wh = [["{} and {}".format(w1, w2), "{} & {}".format(w1, w2)] for w1 in WH for w2 in WH] return any([c in question.lower() for c in itertools.chain.from_iterable(combined_wh)]) @staticmethod def _split_question(question): spacy_nlp = nlp.get_spacy_model() def _resolve_followup_question(prev_question, followup_question): """ Resolve sentences start with "If so", "If not", etc. """ sub_question_lower = followup_question.lower() if any(sub_question_lower.startswith(pre) for pre in ['if so', 'if not']): last_aux_index = -1 last_sub_index = -1 index = 0 with spacy_nlp.disable_pipes('ner'): for token in spacy_nlp(prev_question): if token.dep_ == 'aux': last_aux_index = index elif token.dep_ in ['nsubj', 'nsubjpass']: last_sub_index = index elif token.dep_ == 'ROOT': break if token.dep_ not in ['case', 'punct']: index += 1 tokens = prev_question.rstrip(".!?, ").split() sub = " ".join(tokens[last_aux_index + 1:last_sub_index + 1]) aux = tokens[last_aux_index].lower() sub_sent = " ".join(tokens[last_sub_index + 1:]) + "," followup_question = " ".join(followup_question.split()[2:]) if sub_question_lower.startswith('if so'): followup_question = " ".join(["If", sub, aux, sub_sent, followup_question]) elif sub_question_lower.startswith('if not'): followup_question = " ".join(["If", sub, aux, "not", sub_sent, followup_question]) return followup_question if not FPMGenerator._has_multiple_question(question): return [question] result = [] with spacy_nlp.disable_pipes('ner'): doc = spacy_nlp(question) for sent in doc.sents: # remove leading coordinating conjunction for cc in FANBOYS: if sent.text.lower().startswith(cc): result.append(sent.text[len(cc) + 1:]) break else: if len(result) >= 1 and any(sent.text.lower().startswith(pre) for pre in ['if so', 'if not']): result.append(_resolve_followup_question(result[0], sent.text)) else: result.append(sent.text) context = "" combined_wh = [["{} and {}".format(w1, w2), "{} & {}".format(w1, w2)] for w1 in WH for w2 in WH] for q in result.copy(): if q.endswith('.'): context = q + ' ' result.remove(q) continue for phrase in itertools.chain.from_iterable(combined_wh): if phrase in q.lower(): result.remove(q) start_index = q.lower().index(phrase) + len(phrase) sub_question = q[start_index:] temp = phrase.split() result.extend([temp[0].title() + sub_question, temp[-1].title() + sub_question]) break if context: results = [context + s for s in result] else: results = [nlp.resolve_coref(s, question) for s in result] return results @staticmethod def _preprocess(question): # 1. Convert contraction to regular form (e.g. "What's" to "What is") question = nlp.convert_contraction(FPMGenerator._format_input(question).strip()) # 2. Separate multiple questions from sentence sentences = [question] if FPMGenerator._has_multiple_question(question): sentences = FPMGenerator._split_question(question) return sentences def generate(self, sentence): inputs = self._preprocess(sentence) result = [] for sentence in inputs: # Extract question pattern matched_result = self.matcher.match(sentence) if not matched_result: continue else: tokens = matched_result.tokens if len(tokens['<st>']) > 1: # concatenate multiple statements into single statement tokens['<st>'] = [ ' and '.join(tokens['<st>']).translate(str.maketrans('', '', string.punctuation))] # Substitute tokens into other question patterns patterns_list = self.patterns[matched_result.group_id] for pattern in patterns_list: if pattern.lower() == matched_result.pattern.lower(): continue pattern_tokens = pattern.split() if (len(tokens['<sbj>']) == pattern_tokens.count('<sbj>') and len(tokens['<obj>']) >= pattern_tokens.count('<obj>') and len(tokens['<act>']) == pattern_tokens.count('<act>') and len(tokens['<st>']) >= pattern_tokens.count('<st>')): for i in range(pattern_tokens.count('<obj>')): pattern_tokens[pattern_tokens.index('<obj>')] = tokens['<obj>'][i].strip('?') for i in range(pattern_tokens.count('<sbj>')): pattern_tokens[pattern_tokens.index('<sbj>')] = tokens['<sbj>'][i].strip('?') for i in range(pattern_tokens.count('<act>')): pattern_tokens[pattern_tokens.index('<act>')] = tokens['<act>'][i].strip('?') for i in range(pattern_tokens.count('<st>')): pattern_tokens[pattern_tokens.index('<st>')] = tokens['<st>'][i].strip('?') # Coreference resolution (i.e. Find out what 'it' in a sentence is referring to) permuted = ' '.join(pattern_tokens) if tokens['<st>'] and pattern.split().count('<st>') == 0: result.append(self._format_output( nlp.resolve_coref(permuted, sentence)) ) else: result.append(self._format_output(permuted)) result = list(set(result).difference({sentence})) return result def batch_generate(self, sentences): results = dict() for sentence in tqdm(sentences): results[sentence] = self.generate(sentence) return results
PypiClean
/BornAgain-21.0-cp310-cp310-macosx_13_0_arm64.whl/bornagain/lib/libBornAgainBase.py
from sys import version_info as _swig_python_version_info import _libBornAgainBase try: import builtins as __builtin__ except ImportError: import __builtin__ def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except __builtin__.Exception: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name == "this": set(self, name, value) elif name == "thisown": self.this.own(value) elif hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self, name, value) else: raise AttributeError("You cannot add instance attributes to %s" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls, name) and not isinstance(getattr(cls, name), property): set(cls, name, value) else: raise AttributeError("You cannot add class attributes to %s" % cls) return set_class_attr def _swig_add_metaclass(metaclass): """Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass""" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): """Meta class to enforce nondynamic attributes (no new attributes) for a class""" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref class SwigPyIterator(object): r"""Proxy of C++ swig::SwigPyIterator class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _libBornAgainBase.delete_SwigPyIterator def value(self): r"""value(SwigPyIterator self) -> PyObject *""" return _libBornAgainBase.SwigPyIterator_value(self) def incr(self, n=1): r"""incr(SwigPyIterator self, size_t n=1) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator_incr(self, n) def decr(self, n=1): r"""decr(SwigPyIterator self, size_t n=1) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator_decr(self, n) def distance(self, x): r"""distance(SwigPyIterator self, SwigPyIterator x) -> ptrdiff_t""" return _libBornAgainBase.SwigPyIterator_distance(self, x) def equal(self, x): r"""equal(SwigPyIterator self, SwigPyIterator x) -> bool""" return _libBornAgainBase.SwigPyIterator_equal(self, x) def copy(self): r"""copy(SwigPyIterator self) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator_copy(self) def next(self): r"""next(SwigPyIterator self) -> PyObject *""" return _libBornAgainBase.SwigPyIterator_next(self) def __next__(self): r"""__next__(SwigPyIterator self) -> PyObject *""" return _libBornAgainBase.SwigPyIterator___next__(self) def previous(self): r"""previous(SwigPyIterator self) -> PyObject *""" return _libBornAgainBase.SwigPyIterator_previous(self) def advance(self, n): r"""advance(SwigPyIterator self, ptrdiff_t n) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator_advance(self, n) def __eq__(self, x): r"""__eq__(SwigPyIterator self, SwigPyIterator x) -> bool""" return _libBornAgainBase.SwigPyIterator___eq__(self, x) def __ne__(self, x): r"""__ne__(SwigPyIterator self, SwigPyIterator x) -> bool""" return _libBornAgainBase.SwigPyIterator___ne__(self, x) def __iadd__(self, n): r"""__iadd__(SwigPyIterator self, ptrdiff_t n) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator___iadd__(self, n) def __isub__(self, n): r"""__isub__(SwigPyIterator self, ptrdiff_t n) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator___isub__(self, n) def __add__(self, n): r"""__add__(SwigPyIterator self, ptrdiff_t n) -> SwigPyIterator""" return _libBornAgainBase.SwigPyIterator___add__(self, n) def __sub__(self, *args): r""" __sub__(SwigPyIterator self, ptrdiff_t n) -> SwigPyIterator __sub__(SwigPyIterator self, SwigPyIterator x) -> ptrdiff_t """ return _libBornAgainBase.SwigPyIterator___sub__(self, *args) def __iter__(self): return self # Register SwigPyIterator in _libBornAgainBase: _libBornAgainBase.SwigPyIterator_swigregister(SwigPyIterator) SHARED_PTR_DISOWN = _libBornAgainBase.SHARED_PTR_DISOWN import warnings def deprecated(message): def deprecated_decorator(func): def deprecated_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.warn("{} is a deprecated function. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) # reset filter return func(*args, **kwargs) return deprecated_func return deprecated_decorator class vdouble1d_t(object): r"""Proxy of C++ std::vector< double > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vdouble1d_t self) -> SwigPyIterator""" return _libBornAgainBase.vdouble1d_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vdouble1d_t self) -> bool""" return _libBornAgainBase.vdouble1d_t___nonzero__(self) def __bool__(self): r"""__bool__(vdouble1d_t self) -> bool""" return _libBornAgainBase.vdouble1d_t___bool__(self) def __len__(self): r"""__len__(vdouble1d_t self) -> std::vector< double >::size_type""" return _libBornAgainBase.vdouble1d_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vdouble1d_t self, std::vector< double >::difference_type i, std::vector< double >::difference_type j) -> vdouble1d_t""" return _libBornAgainBase.vdouble1d_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vdouble1d_t self, std::vector< double >::difference_type i, std::vector< double >::difference_type j) __setslice__(vdouble1d_t self, std::vector< double >::difference_type i, std::vector< double >::difference_type j, vdouble1d_t v) """ return _libBornAgainBase.vdouble1d_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vdouble1d_t self, std::vector< double >::difference_type i, std::vector< double >::difference_type j)""" return _libBornAgainBase.vdouble1d_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vdouble1d_t self, std::vector< double >::difference_type i) __delitem__(vdouble1d_t self, PySliceObject * slice) """ return _libBornAgainBase.vdouble1d_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vdouble1d_t self, PySliceObject * slice) -> vdouble1d_t __getitem__(vdouble1d_t self, std::vector< double >::difference_type i) -> std::vector< double >::value_type const & """ return _libBornAgainBase.vdouble1d_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vdouble1d_t self, PySliceObject * slice, vdouble1d_t v) __setitem__(vdouble1d_t self, PySliceObject * slice) __setitem__(vdouble1d_t self, std::vector< double >::difference_type i, std::vector< double >::value_type const & x) """ return _libBornAgainBase.vdouble1d_t___setitem__(self, *args) def pop(self): r"""pop(vdouble1d_t self) -> std::vector< double >::value_type""" return _libBornAgainBase.vdouble1d_t_pop(self) def append(self, x): r"""append(vdouble1d_t self, std::vector< double >::value_type const & x)""" return _libBornAgainBase.vdouble1d_t_append(self, x) def empty(self): r"""empty(vdouble1d_t self) -> bool""" return _libBornAgainBase.vdouble1d_t_empty(self) def size(self): r"""size(vdouble1d_t self) -> std::vector< double >::size_type""" return _libBornAgainBase.vdouble1d_t_size(self) def swap(self, v): r"""swap(vdouble1d_t self, vdouble1d_t v)""" return _libBornAgainBase.vdouble1d_t_swap(self, v) def begin(self): r"""begin(vdouble1d_t self) -> std::vector< double >::iterator""" return _libBornAgainBase.vdouble1d_t_begin(self) def end(self): r"""end(vdouble1d_t self) -> std::vector< double >::iterator""" return _libBornAgainBase.vdouble1d_t_end(self) def rbegin(self): r"""rbegin(vdouble1d_t self) -> std::vector< double >::reverse_iterator""" return _libBornAgainBase.vdouble1d_t_rbegin(self) def rend(self): r"""rend(vdouble1d_t self) -> std::vector< double >::reverse_iterator""" return _libBornAgainBase.vdouble1d_t_rend(self) def clear(self): r"""clear(vdouble1d_t self)""" return _libBornAgainBase.vdouble1d_t_clear(self) def get_allocator(self): r"""get_allocator(vdouble1d_t self) -> std::vector< double >::allocator_type""" return _libBornAgainBase.vdouble1d_t_get_allocator(self) def pop_back(self): r"""pop_back(vdouble1d_t self)""" return _libBornAgainBase.vdouble1d_t_pop_back(self) def erase(self, *args): r""" erase(vdouble1d_t self, std::vector< double >::iterator pos) -> std::vector< double >::iterator erase(vdouble1d_t self, std::vector< double >::iterator first, std::vector< double >::iterator last) -> std::vector< double >::iterator """ return _libBornAgainBase.vdouble1d_t_erase(self, *args) def __init__(self, *args): r""" __init__(vdouble1d_t self) -> vdouble1d_t __init__(vdouble1d_t self, vdouble1d_t other) -> vdouble1d_t __init__(vdouble1d_t self, std::vector< double >::size_type size) -> vdouble1d_t __init__(vdouble1d_t self, std::vector< double >::size_type size, std::vector< double >::value_type const & value) -> vdouble1d_t """ _libBornAgainBase.vdouble1d_t_swiginit(self, _libBornAgainBase.new_vdouble1d_t(*args)) def push_back(self, x): r"""push_back(vdouble1d_t self, std::vector< double >::value_type const & x)""" return _libBornAgainBase.vdouble1d_t_push_back(self, x) def front(self): r"""front(vdouble1d_t self) -> std::vector< double >::value_type const &""" return _libBornAgainBase.vdouble1d_t_front(self) def back(self): r"""back(vdouble1d_t self) -> std::vector< double >::value_type const &""" return _libBornAgainBase.vdouble1d_t_back(self) def assign(self, n, x): r"""assign(vdouble1d_t self, std::vector< double >::size_type n, std::vector< double >::value_type const & x)""" return _libBornAgainBase.vdouble1d_t_assign(self, n, x) def resize(self, *args): r""" resize(vdouble1d_t self, std::vector< double >::size_type new_size) resize(vdouble1d_t self, std::vector< double >::size_type new_size, std::vector< double >::value_type const & x) """ return _libBornAgainBase.vdouble1d_t_resize(self, *args) def insert(self, *args): r""" insert(vdouble1d_t self, std::vector< double >::iterator pos, std::vector< double >::value_type const & x) -> std::vector< double >::iterator insert(vdouble1d_t self, std::vector< double >::iterator pos, std::vector< double >::size_type n, std::vector< double >::value_type const & x) """ return _libBornAgainBase.vdouble1d_t_insert(self, *args) def reserve(self, n): r"""reserve(vdouble1d_t self, std::vector< double >::size_type n)""" return _libBornAgainBase.vdouble1d_t_reserve(self, n) def capacity(self): r"""capacity(vdouble1d_t self) -> std::vector< double >::size_type""" return _libBornAgainBase.vdouble1d_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vdouble1d_t # Register vdouble1d_t in _libBornAgainBase: _libBornAgainBase.vdouble1d_t_swigregister(vdouble1d_t) class vdouble2d_t(object): r"""Proxy of C++ std::vector< std::vector< double > > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vdouble2d_t self) -> SwigPyIterator""" return _libBornAgainBase.vdouble2d_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vdouble2d_t self) -> bool""" return _libBornAgainBase.vdouble2d_t___nonzero__(self) def __bool__(self): r"""__bool__(vdouble2d_t self) -> bool""" return _libBornAgainBase.vdouble2d_t___bool__(self) def __len__(self): r"""__len__(vdouble2d_t self) -> std::vector< std::vector< double > >::size_type""" return _libBornAgainBase.vdouble2d_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i, std::vector< std::vector< double > >::difference_type j) -> vdouble2d_t""" return _libBornAgainBase.vdouble2d_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i, std::vector< std::vector< double > >::difference_type j) __setslice__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i, std::vector< std::vector< double > >::difference_type j, vdouble2d_t v) """ return _libBornAgainBase.vdouble2d_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i, std::vector< std::vector< double > >::difference_type j)""" return _libBornAgainBase.vdouble2d_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i) __delitem__(vdouble2d_t self, PySliceObject * slice) """ return _libBornAgainBase.vdouble2d_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vdouble2d_t self, PySliceObject * slice) -> vdouble2d_t __getitem__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i) -> vdouble1d_t """ return _libBornAgainBase.vdouble2d_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vdouble2d_t self, PySliceObject * slice, vdouble2d_t v) __setitem__(vdouble2d_t self, PySliceObject * slice) __setitem__(vdouble2d_t self, std::vector< std::vector< double > >::difference_type i, vdouble1d_t x) """ return _libBornAgainBase.vdouble2d_t___setitem__(self, *args) def pop(self): r"""pop(vdouble2d_t self) -> vdouble1d_t""" return _libBornAgainBase.vdouble2d_t_pop(self) def append(self, x): r"""append(vdouble2d_t self, vdouble1d_t x)""" return _libBornAgainBase.vdouble2d_t_append(self, x) def empty(self): r"""empty(vdouble2d_t self) -> bool""" return _libBornAgainBase.vdouble2d_t_empty(self) def size(self): r"""size(vdouble2d_t self) -> std::vector< std::vector< double > >::size_type""" return _libBornAgainBase.vdouble2d_t_size(self) def swap(self, v): r"""swap(vdouble2d_t self, vdouble2d_t v)""" return _libBornAgainBase.vdouble2d_t_swap(self, v) def begin(self): r"""begin(vdouble2d_t self) -> std::vector< std::vector< double > >::iterator""" return _libBornAgainBase.vdouble2d_t_begin(self) def end(self): r"""end(vdouble2d_t self) -> std::vector< std::vector< double > >::iterator""" return _libBornAgainBase.vdouble2d_t_end(self) def rbegin(self): r"""rbegin(vdouble2d_t self) -> std::vector< std::vector< double > >::reverse_iterator""" return _libBornAgainBase.vdouble2d_t_rbegin(self) def rend(self): r"""rend(vdouble2d_t self) -> std::vector< std::vector< double > >::reverse_iterator""" return _libBornAgainBase.vdouble2d_t_rend(self) def clear(self): r"""clear(vdouble2d_t self)""" return _libBornAgainBase.vdouble2d_t_clear(self) def get_allocator(self): r"""get_allocator(vdouble2d_t self) -> std::vector< std::vector< double > >::allocator_type""" return _libBornAgainBase.vdouble2d_t_get_allocator(self) def pop_back(self): r"""pop_back(vdouble2d_t self)""" return _libBornAgainBase.vdouble2d_t_pop_back(self) def erase(self, *args): r""" erase(vdouble2d_t self, std::vector< std::vector< double > >::iterator pos) -> std::vector< std::vector< double > >::iterator erase(vdouble2d_t self, std::vector< std::vector< double > >::iterator first, std::vector< std::vector< double > >::iterator last) -> std::vector< std::vector< double > >::iterator """ return _libBornAgainBase.vdouble2d_t_erase(self, *args) def __init__(self, *args): r""" __init__(vdouble2d_t self) -> vdouble2d_t __init__(vdouble2d_t self, vdouble2d_t other) -> vdouble2d_t __init__(vdouble2d_t self, std::vector< std::vector< double > >::size_type size) -> vdouble2d_t __init__(vdouble2d_t self, std::vector< std::vector< double > >::size_type size, vdouble1d_t value) -> vdouble2d_t """ _libBornAgainBase.vdouble2d_t_swiginit(self, _libBornAgainBase.new_vdouble2d_t(*args)) def push_back(self, x): r"""push_back(vdouble2d_t self, vdouble1d_t x)""" return _libBornAgainBase.vdouble2d_t_push_back(self, x) def front(self): r"""front(vdouble2d_t self) -> vdouble1d_t""" return _libBornAgainBase.vdouble2d_t_front(self) def back(self): r"""back(vdouble2d_t self) -> vdouble1d_t""" return _libBornAgainBase.vdouble2d_t_back(self) def assign(self, n, x): r"""assign(vdouble2d_t self, std::vector< std::vector< double > >::size_type n, vdouble1d_t x)""" return _libBornAgainBase.vdouble2d_t_assign(self, n, x) def resize(self, *args): r""" resize(vdouble2d_t self, std::vector< std::vector< double > >::size_type new_size) resize(vdouble2d_t self, std::vector< std::vector< double > >::size_type new_size, vdouble1d_t x) """ return _libBornAgainBase.vdouble2d_t_resize(self, *args) def insert(self, *args): r""" insert(vdouble2d_t self, std::vector< std::vector< double > >::iterator pos, vdouble1d_t x) -> std::vector< std::vector< double > >::iterator insert(vdouble2d_t self, std::vector< std::vector< double > >::iterator pos, std::vector< std::vector< double > >::size_type n, vdouble1d_t x) """ return _libBornAgainBase.vdouble2d_t_insert(self, *args) def reserve(self, n): r"""reserve(vdouble2d_t self, std::vector< std::vector< double > >::size_type n)""" return _libBornAgainBase.vdouble2d_t_reserve(self, n) def capacity(self): r"""capacity(vdouble2d_t self) -> std::vector< std::vector< double > >::size_type""" return _libBornAgainBase.vdouble2d_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vdouble2d_t # Register vdouble2d_t in _libBornAgainBase: _libBornAgainBase.vdouble2d_t_swigregister(vdouble2d_t) class vector_integer_t(object): r"""Proxy of C++ std::vector< int > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vector_integer_t self) -> SwigPyIterator""" return _libBornAgainBase.vector_integer_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vector_integer_t self) -> bool""" return _libBornAgainBase.vector_integer_t___nonzero__(self) def __bool__(self): r"""__bool__(vector_integer_t self) -> bool""" return _libBornAgainBase.vector_integer_t___bool__(self) def __len__(self): r"""__len__(vector_integer_t self) -> std::vector< int >::size_type""" return _libBornAgainBase.vector_integer_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vector_integer_t self, std::vector< int >::difference_type i, std::vector< int >::difference_type j) -> vector_integer_t""" return _libBornAgainBase.vector_integer_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vector_integer_t self, std::vector< int >::difference_type i, std::vector< int >::difference_type j) __setslice__(vector_integer_t self, std::vector< int >::difference_type i, std::vector< int >::difference_type j, vector_integer_t v) """ return _libBornAgainBase.vector_integer_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vector_integer_t self, std::vector< int >::difference_type i, std::vector< int >::difference_type j)""" return _libBornAgainBase.vector_integer_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vector_integer_t self, std::vector< int >::difference_type i) __delitem__(vector_integer_t self, PySliceObject * slice) """ return _libBornAgainBase.vector_integer_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vector_integer_t self, PySliceObject * slice) -> vector_integer_t __getitem__(vector_integer_t self, std::vector< int >::difference_type i) -> std::vector< int >::value_type const & """ return _libBornAgainBase.vector_integer_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vector_integer_t self, PySliceObject * slice, vector_integer_t v) __setitem__(vector_integer_t self, PySliceObject * slice) __setitem__(vector_integer_t self, std::vector< int >::difference_type i, std::vector< int >::value_type const & x) """ return _libBornAgainBase.vector_integer_t___setitem__(self, *args) def pop(self): r"""pop(vector_integer_t self) -> std::vector< int >::value_type""" return _libBornAgainBase.vector_integer_t_pop(self) def append(self, x): r"""append(vector_integer_t self, std::vector< int >::value_type const & x)""" return _libBornAgainBase.vector_integer_t_append(self, x) def empty(self): r"""empty(vector_integer_t self) -> bool""" return _libBornAgainBase.vector_integer_t_empty(self) def size(self): r"""size(vector_integer_t self) -> std::vector< int >::size_type""" return _libBornAgainBase.vector_integer_t_size(self) def swap(self, v): r"""swap(vector_integer_t self, vector_integer_t v)""" return _libBornAgainBase.vector_integer_t_swap(self, v) def begin(self): r"""begin(vector_integer_t self) -> std::vector< int >::iterator""" return _libBornAgainBase.vector_integer_t_begin(self) def end(self): r"""end(vector_integer_t self) -> std::vector< int >::iterator""" return _libBornAgainBase.vector_integer_t_end(self) def rbegin(self): r"""rbegin(vector_integer_t self) -> std::vector< int >::reverse_iterator""" return _libBornAgainBase.vector_integer_t_rbegin(self) def rend(self): r"""rend(vector_integer_t self) -> std::vector< int >::reverse_iterator""" return _libBornAgainBase.vector_integer_t_rend(self) def clear(self): r"""clear(vector_integer_t self)""" return _libBornAgainBase.vector_integer_t_clear(self) def get_allocator(self): r"""get_allocator(vector_integer_t self) -> std::vector< int >::allocator_type""" return _libBornAgainBase.vector_integer_t_get_allocator(self) def pop_back(self): r"""pop_back(vector_integer_t self)""" return _libBornAgainBase.vector_integer_t_pop_back(self) def erase(self, *args): r""" erase(vector_integer_t self, std::vector< int >::iterator pos) -> std::vector< int >::iterator erase(vector_integer_t self, std::vector< int >::iterator first, std::vector< int >::iterator last) -> std::vector< int >::iterator """ return _libBornAgainBase.vector_integer_t_erase(self, *args) def __init__(self, *args): r""" __init__(vector_integer_t self) -> vector_integer_t __init__(vector_integer_t self, vector_integer_t other) -> vector_integer_t __init__(vector_integer_t self, std::vector< int >::size_type size) -> vector_integer_t __init__(vector_integer_t self, std::vector< int >::size_type size, std::vector< int >::value_type const & value) -> vector_integer_t """ _libBornAgainBase.vector_integer_t_swiginit(self, _libBornAgainBase.new_vector_integer_t(*args)) def push_back(self, x): r"""push_back(vector_integer_t self, std::vector< int >::value_type const & x)""" return _libBornAgainBase.vector_integer_t_push_back(self, x) def front(self): r"""front(vector_integer_t self) -> std::vector< int >::value_type const &""" return _libBornAgainBase.vector_integer_t_front(self) def back(self): r"""back(vector_integer_t self) -> std::vector< int >::value_type const &""" return _libBornAgainBase.vector_integer_t_back(self) def assign(self, n, x): r"""assign(vector_integer_t self, std::vector< int >::size_type n, std::vector< int >::value_type const & x)""" return _libBornAgainBase.vector_integer_t_assign(self, n, x) def resize(self, *args): r""" resize(vector_integer_t self, std::vector< int >::size_type new_size) resize(vector_integer_t self, std::vector< int >::size_type new_size, std::vector< int >::value_type const & x) """ return _libBornAgainBase.vector_integer_t_resize(self, *args) def insert(self, *args): r""" insert(vector_integer_t self, std::vector< int >::iterator pos, std::vector< int >::value_type const & x) -> std::vector< int >::iterator insert(vector_integer_t self, std::vector< int >::iterator pos, std::vector< int >::size_type n, std::vector< int >::value_type const & x) """ return _libBornAgainBase.vector_integer_t_insert(self, *args) def reserve(self, n): r"""reserve(vector_integer_t self, std::vector< int >::size_type n)""" return _libBornAgainBase.vector_integer_t_reserve(self, n) def capacity(self): r"""capacity(vector_integer_t self) -> std::vector< int >::size_type""" return _libBornAgainBase.vector_integer_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vector_integer_t # Register vector_integer_t in _libBornAgainBase: _libBornAgainBase.vector_integer_t_swigregister(vector_integer_t) class vinteger2d_t(object): r"""Proxy of C++ std::vector< std::vector< int > > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vinteger2d_t self) -> SwigPyIterator""" return _libBornAgainBase.vinteger2d_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vinteger2d_t self) -> bool""" return _libBornAgainBase.vinteger2d_t___nonzero__(self) def __bool__(self): r"""__bool__(vinteger2d_t self) -> bool""" return _libBornAgainBase.vinteger2d_t___bool__(self) def __len__(self): r"""__len__(vinteger2d_t self) -> std::vector< std::vector< int > >::size_type""" return _libBornAgainBase.vinteger2d_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i, std::vector< std::vector< int > >::difference_type j) -> vinteger2d_t""" return _libBornAgainBase.vinteger2d_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i, std::vector< std::vector< int > >::difference_type j) __setslice__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i, std::vector< std::vector< int > >::difference_type j, vinteger2d_t v) """ return _libBornAgainBase.vinteger2d_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i, std::vector< std::vector< int > >::difference_type j)""" return _libBornAgainBase.vinteger2d_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i) __delitem__(vinteger2d_t self, PySliceObject * slice) """ return _libBornAgainBase.vinteger2d_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vinteger2d_t self, PySliceObject * slice) -> vinteger2d_t __getitem__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i) -> vector_integer_t """ return _libBornAgainBase.vinteger2d_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vinteger2d_t self, PySliceObject * slice, vinteger2d_t v) __setitem__(vinteger2d_t self, PySliceObject * slice) __setitem__(vinteger2d_t self, std::vector< std::vector< int > >::difference_type i, vector_integer_t x) """ return _libBornAgainBase.vinteger2d_t___setitem__(self, *args) def pop(self): r"""pop(vinteger2d_t self) -> vector_integer_t""" return _libBornAgainBase.vinteger2d_t_pop(self) def append(self, x): r"""append(vinteger2d_t self, vector_integer_t x)""" return _libBornAgainBase.vinteger2d_t_append(self, x) def empty(self): r"""empty(vinteger2d_t self) -> bool""" return _libBornAgainBase.vinteger2d_t_empty(self) def size(self): r"""size(vinteger2d_t self) -> std::vector< std::vector< int > >::size_type""" return _libBornAgainBase.vinteger2d_t_size(self) def swap(self, v): r"""swap(vinteger2d_t self, vinteger2d_t v)""" return _libBornAgainBase.vinteger2d_t_swap(self, v) def begin(self): r"""begin(vinteger2d_t self) -> std::vector< std::vector< int > >::iterator""" return _libBornAgainBase.vinteger2d_t_begin(self) def end(self): r"""end(vinteger2d_t self) -> std::vector< std::vector< int > >::iterator""" return _libBornAgainBase.vinteger2d_t_end(self) def rbegin(self): r"""rbegin(vinteger2d_t self) -> std::vector< std::vector< int > >::reverse_iterator""" return _libBornAgainBase.vinteger2d_t_rbegin(self) def rend(self): r"""rend(vinteger2d_t self) -> std::vector< std::vector< int > >::reverse_iterator""" return _libBornAgainBase.vinteger2d_t_rend(self) def clear(self): r"""clear(vinteger2d_t self)""" return _libBornAgainBase.vinteger2d_t_clear(self) def get_allocator(self): r"""get_allocator(vinteger2d_t self) -> std::vector< std::vector< int > >::allocator_type""" return _libBornAgainBase.vinteger2d_t_get_allocator(self) def pop_back(self): r"""pop_back(vinteger2d_t self)""" return _libBornAgainBase.vinteger2d_t_pop_back(self) def erase(self, *args): r""" erase(vinteger2d_t self, std::vector< std::vector< int > >::iterator pos) -> std::vector< std::vector< int > >::iterator erase(vinteger2d_t self, std::vector< std::vector< int > >::iterator first, std::vector< std::vector< int > >::iterator last) -> std::vector< std::vector< int > >::iterator """ return _libBornAgainBase.vinteger2d_t_erase(self, *args) def __init__(self, *args): r""" __init__(vinteger2d_t self) -> vinteger2d_t __init__(vinteger2d_t self, vinteger2d_t other) -> vinteger2d_t __init__(vinteger2d_t self, std::vector< std::vector< int > >::size_type size) -> vinteger2d_t __init__(vinteger2d_t self, std::vector< std::vector< int > >::size_type size, vector_integer_t value) -> vinteger2d_t """ _libBornAgainBase.vinteger2d_t_swiginit(self, _libBornAgainBase.new_vinteger2d_t(*args)) def push_back(self, x): r"""push_back(vinteger2d_t self, vector_integer_t x)""" return _libBornAgainBase.vinteger2d_t_push_back(self, x) def front(self): r"""front(vinteger2d_t self) -> vector_integer_t""" return _libBornAgainBase.vinteger2d_t_front(self) def back(self): r"""back(vinteger2d_t self) -> vector_integer_t""" return _libBornAgainBase.vinteger2d_t_back(self) def assign(self, n, x): r"""assign(vinteger2d_t self, std::vector< std::vector< int > >::size_type n, vector_integer_t x)""" return _libBornAgainBase.vinteger2d_t_assign(self, n, x) def resize(self, *args): r""" resize(vinteger2d_t self, std::vector< std::vector< int > >::size_type new_size) resize(vinteger2d_t self, std::vector< std::vector< int > >::size_type new_size, vector_integer_t x) """ return _libBornAgainBase.vinteger2d_t_resize(self, *args) def insert(self, *args): r""" insert(vinteger2d_t self, std::vector< std::vector< int > >::iterator pos, vector_integer_t x) -> std::vector< std::vector< int > >::iterator insert(vinteger2d_t self, std::vector< std::vector< int > >::iterator pos, std::vector< std::vector< int > >::size_type n, vector_integer_t x) """ return _libBornAgainBase.vinteger2d_t_insert(self, *args) def reserve(self, n): r"""reserve(vinteger2d_t self, std::vector< std::vector< int > >::size_type n)""" return _libBornAgainBase.vinteger2d_t_reserve(self, n) def capacity(self): r"""capacity(vinteger2d_t self) -> std::vector< std::vector< int > >::size_type""" return _libBornAgainBase.vinteger2d_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vinteger2d_t # Register vinteger2d_t in _libBornAgainBase: _libBornAgainBase.vinteger2d_t_swigregister(vinteger2d_t) class vector_longinteger_t(object): r"""Proxy of C++ std::vector< unsigned long > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vector_longinteger_t self) -> SwigPyIterator""" return _libBornAgainBase.vector_longinteger_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vector_longinteger_t self) -> bool""" return _libBornAgainBase.vector_longinteger_t___nonzero__(self) def __bool__(self): r"""__bool__(vector_longinteger_t self) -> bool""" return _libBornAgainBase.vector_longinteger_t___bool__(self) def __len__(self): r"""__len__(vector_longinteger_t self) -> std::vector< unsigned long >::size_type""" return _libBornAgainBase.vector_longinteger_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i, std::vector< unsigned long >::difference_type j) -> vector_longinteger_t""" return _libBornAgainBase.vector_longinteger_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i, std::vector< unsigned long >::difference_type j) __setslice__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i, std::vector< unsigned long >::difference_type j, vector_longinteger_t v) """ return _libBornAgainBase.vector_longinteger_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i, std::vector< unsigned long >::difference_type j)""" return _libBornAgainBase.vector_longinteger_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i) __delitem__(vector_longinteger_t self, PySliceObject * slice) """ return _libBornAgainBase.vector_longinteger_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vector_longinteger_t self, PySliceObject * slice) -> vector_longinteger_t __getitem__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i) -> std::vector< unsigned long >::value_type const & """ return _libBornAgainBase.vector_longinteger_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vector_longinteger_t self, PySliceObject * slice, vector_longinteger_t v) __setitem__(vector_longinteger_t self, PySliceObject * slice) __setitem__(vector_longinteger_t self, std::vector< unsigned long >::difference_type i, std::vector< unsigned long >::value_type const & x) """ return _libBornAgainBase.vector_longinteger_t___setitem__(self, *args) def pop(self): r"""pop(vector_longinteger_t self) -> std::vector< unsigned long >::value_type""" return _libBornAgainBase.vector_longinteger_t_pop(self) def append(self, x): r"""append(vector_longinteger_t self, std::vector< unsigned long >::value_type const & x)""" return _libBornAgainBase.vector_longinteger_t_append(self, x) def empty(self): r"""empty(vector_longinteger_t self) -> bool""" return _libBornAgainBase.vector_longinteger_t_empty(self) def size(self): r"""size(vector_longinteger_t self) -> std::vector< unsigned long >::size_type""" return _libBornAgainBase.vector_longinteger_t_size(self) def swap(self, v): r"""swap(vector_longinteger_t self, vector_longinteger_t v)""" return _libBornAgainBase.vector_longinteger_t_swap(self, v) def begin(self): r"""begin(vector_longinteger_t self) -> std::vector< unsigned long >::iterator""" return _libBornAgainBase.vector_longinteger_t_begin(self) def end(self): r"""end(vector_longinteger_t self) -> std::vector< unsigned long >::iterator""" return _libBornAgainBase.vector_longinteger_t_end(self) def rbegin(self): r"""rbegin(vector_longinteger_t self) -> std::vector< unsigned long >::reverse_iterator""" return _libBornAgainBase.vector_longinteger_t_rbegin(self) def rend(self): r"""rend(vector_longinteger_t self) -> std::vector< unsigned long >::reverse_iterator""" return _libBornAgainBase.vector_longinteger_t_rend(self) def clear(self): r"""clear(vector_longinteger_t self)""" return _libBornAgainBase.vector_longinteger_t_clear(self) def get_allocator(self): r"""get_allocator(vector_longinteger_t self) -> std::vector< unsigned long >::allocator_type""" return _libBornAgainBase.vector_longinteger_t_get_allocator(self) def pop_back(self): r"""pop_back(vector_longinteger_t self)""" return _libBornAgainBase.vector_longinteger_t_pop_back(self) def erase(self, *args): r""" erase(vector_longinteger_t self, std::vector< unsigned long >::iterator pos) -> std::vector< unsigned long >::iterator erase(vector_longinteger_t self, std::vector< unsigned long >::iterator first, std::vector< unsigned long >::iterator last) -> std::vector< unsigned long >::iterator """ return _libBornAgainBase.vector_longinteger_t_erase(self, *args) def __init__(self, *args): r""" __init__(vector_longinteger_t self) -> vector_longinteger_t __init__(vector_longinteger_t self, vector_longinteger_t other) -> vector_longinteger_t __init__(vector_longinteger_t self, std::vector< unsigned long >::size_type size) -> vector_longinteger_t __init__(vector_longinteger_t self, std::vector< unsigned long >::size_type size, std::vector< unsigned long >::value_type const & value) -> vector_longinteger_t """ _libBornAgainBase.vector_longinteger_t_swiginit(self, _libBornAgainBase.new_vector_longinteger_t(*args)) def push_back(self, x): r"""push_back(vector_longinteger_t self, std::vector< unsigned long >::value_type const & x)""" return _libBornAgainBase.vector_longinteger_t_push_back(self, x) def front(self): r"""front(vector_longinteger_t self) -> std::vector< unsigned long >::value_type const &""" return _libBornAgainBase.vector_longinteger_t_front(self) def back(self): r"""back(vector_longinteger_t self) -> std::vector< unsigned long >::value_type const &""" return _libBornAgainBase.vector_longinteger_t_back(self) def assign(self, n, x): r"""assign(vector_longinteger_t self, std::vector< unsigned long >::size_type n, std::vector< unsigned long >::value_type const & x)""" return _libBornAgainBase.vector_longinteger_t_assign(self, n, x) def resize(self, *args): r""" resize(vector_longinteger_t self, std::vector< unsigned long >::size_type new_size) resize(vector_longinteger_t self, std::vector< unsigned long >::size_type new_size, std::vector< unsigned long >::value_type const & x) """ return _libBornAgainBase.vector_longinteger_t_resize(self, *args) def insert(self, *args): r""" insert(vector_longinteger_t self, std::vector< unsigned long >::iterator pos, std::vector< unsigned long >::value_type const & x) -> std::vector< unsigned long >::iterator insert(vector_longinteger_t self, std::vector< unsigned long >::iterator pos, std::vector< unsigned long >::size_type n, std::vector< unsigned long >::value_type const & x) """ return _libBornAgainBase.vector_longinteger_t_insert(self, *args) def reserve(self, n): r"""reserve(vector_longinteger_t self, std::vector< unsigned long >::size_type n)""" return _libBornAgainBase.vector_longinteger_t_reserve(self, n) def capacity(self): r"""capacity(vector_longinteger_t self) -> std::vector< unsigned long >::size_type""" return _libBornAgainBase.vector_longinteger_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vector_longinteger_t # Register vector_longinteger_t in _libBornAgainBase: _libBornAgainBase.vector_longinteger_t_swigregister(vector_longinteger_t) class vector_complex_t(object): r"""Proxy of C++ std::vector< std::complex< double > > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vector_complex_t self) -> SwigPyIterator""" return _libBornAgainBase.vector_complex_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vector_complex_t self) -> bool""" return _libBornAgainBase.vector_complex_t___nonzero__(self) def __bool__(self): r"""__bool__(vector_complex_t self) -> bool""" return _libBornAgainBase.vector_complex_t___bool__(self) def __len__(self): r"""__len__(vector_complex_t self) -> std::vector< std::complex< double > >::size_type""" return _libBornAgainBase.vector_complex_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i, std::vector< std::complex< double > >::difference_type j) -> vector_complex_t""" return _libBornAgainBase.vector_complex_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i, std::vector< std::complex< double > >::difference_type j) __setslice__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i, std::vector< std::complex< double > >::difference_type j, vector_complex_t v) """ return _libBornAgainBase.vector_complex_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i, std::vector< std::complex< double > >::difference_type j)""" return _libBornAgainBase.vector_complex_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i) __delitem__(vector_complex_t self, PySliceObject * slice) """ return _libBornAgainBase.vector_complex_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vector_complex_t self, PySliceObject * slice) -> vector_complex_t __getitem__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i) -> std::vector< std::complex< double > >::value_type const & """ return _libBornAgainBase.vector_complex_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vector_complex_t self, PySliceObject * slice, vector_complex_t v) __setitem__(vector_complex_t self, PySliceObject * slice) __setitem__(vector_complex_t self, std::vector< std::complex< double > >::difference_type i, std::vector< std::complex< double > >::value_type const & x) """ return _libBornAgainBase.vector_complex_t___setitem__(self, *args) def pop(self): r"""pop(vector_complex_t self) -> std::vector< std::complex< double > >::value_type""" return _libBornAgainBase.vector_complex_t_pop(self) def append(self, x): r"""append(vector_complex_t self, std::vector< std::complex< double > >::value_type const & x)""" return _libBornAgainBase.vector_complex_t_append(self, x) def empty(self): r"""empty(vector_complex_t self) -> bool""" return _libBornAgainBase.vector_complex_t_empty(self) def size(self): r"""size(vector_complex_t self) -> std::vector< std::complex< double > >::size_type""" return _libBornAgainBase.vector_complex_t_size(self) def swap(self, v): r"""swap(vector_complex_t self, vector_complex_t v)""" return _libBornAgainBase.vector_complex_t_swap(self, v) def begin(self): r"""begin(vector_complex_t self) -> std::vector< std::complex< double > >::iterator""" return _libBornAgainBase.vector_complex_t_begin(self) def end(self): r"""end(vector_complex_t self) -> std::vector< std::complex< double > >::iterator""" return _libBornAgainBase.vector_complex_t_end(self) def rbegin(self): r"""rbegin(vector_complex_t self) -> std::vector< std::complex< double > >::reverse_iterator""" return _libBornAgainBase.vector_complex_t_rbegin(self) def rend(self): r"""rend(vector_complex_t self) -> std::vector< std::complex< double > >::reverse_iterator""" return _libBornAgainBase.vector_complex_t_rend(self) def clear(self): r"""clear(vector_complex_t self)""" return _libBornAgainBase.vector_complex_t_clear(self) def get_allocator(self): r"""get_allocator(vector_complex_t self) -> std::vector< std::complex< double > >::allocator_type""" return _libBornAgainBase.vector_complex_t_get_allocator(self) def pop_back(self): r"""pop_back(vector_complex_t self)""" return _libBornAgainBase.vector_complex_t_pop_back(self) def erase(self, *args): r""" erase(vector_complex_t self, std::vector< std::complex< double > >::iterator pos) -> std::vector< std::complex< double > >::iterator erase(vector_complex_t self, std::vector< std::complex< double > >::iterator first, std::vector< std::complex< double > >::iterator last) -> std::vector< std::complex< double > >::iterator """ return _libBornAgainBase.vector_complex_t_erase(self, *args) def __init__(self, *args): r""" __init__(vector_complex_t self) -> vector_complex_t __init__(vector_complex_t self, vector_complex_t other) -> vector_complex_t __init__(vector_complex_t self, std::vector< std::complex< double > >::size_type size) -> vector_complex_t __init__(vector_complex_t self, std::vector< std::complex< double > >::size_type size, std::vector< std::complex< double > >::value_type const & value) -> vector_complex_t """ _libBornAgainBase.vector_complex_t_swiginit(self, _libBornAgainBase.new_vector_complex_t(*args)) def push_back(self, x): r"""push_back(vector_complex_t self, std::vector< std::complex< double > >::value_type const & x)""" return _libBornAgainBase.vector_complex_t_push_back(self, x) def front(self): r"""front(vector_complex_t self) -> std::vector< std::complex< double > >::value_type const &""" return _libBornAgainBase.vector_complex_t_front(self) def back(self): r"""back(vector_complex_t self) -> std::vector< std::complex< double > >::value_type const &""" return _libBornAgainBase.vector_complex_t_back(self) def assign(self, n, x): r"""assign(vector_complex_t self, std::vector< std::complex< double > >::size_type n, std::vector< std::complex< double > >::value_type const & x)""" return _libBornAgainBase.vector_complex_t_assign(self, n, x) def resize(self, *args): r""" resize(vector_complex_t self, std::vector< std::complex< double > >::size_type new_size) resize(vector_complex_t self, std::vector< std::complex< double > >::size_type new_size, std::vector< std::complex< double > >::value_type const & x) """ return _libBornAgainBase.vector_complex_t_resize(self, *args) def insert(self, *args): r""" insert(vector_complex_t self, std::vector< std::complex< double > >::iterator pos, std::vector< std::complex< double > >::value_type const & x) -> std::vector< std::complex< double > >::iterator insert(vector_complex_t self, std::vector< std::complex< double > >::iterator pos, std::vector< std::complex< double > >::size_type n, std::vector< std::complex< double > >::value_type const & x) """ return _libBornAgainBase.vector_complex_t_insert(self, *args) def reserve(self, n): r"""reserve(vector_complex_t self, std::vector< std::complex< double > >::size_type n)""" return _libBornAgainBase.vector_complex_t_reserve(self, n) def capacity(self): r"""capacity(vector_complex_t self) -> std::vector< std::complex< double > >::size_type""" return _libBornAgainBase.vector_complex_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vector_complex_t # Register vector_complex_t in _libBornAgainBase: _libBornAgainBase.vector_complex_t_swigregister(vector_complex_t) class vector_string_t(object): r"""Proxy of C++ std::vector< std::string > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vector_string_t self) -> SwigPyIterator""" return _libBornAgainBase.vector_string_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vector_string_t self) -> bool""" return _libBornAgainBase.vector_string_t___nonzero__(self) def __bool__(self): r"""__bool__(vector_string_t self) -> bool""" return _libBornAgainBase.vector_string_t___bool__(self) def __len__(self): r"""__len__(vector_string_t self) -> std::vector< std::string >::size_type""" return _libBornAgainBase.vector_string_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vector_string_t self, std::vector< std::string >::difference_type i, std::vector< std::string >::difference_type j) -> vector_string_t""" return _libBornAgainBase.vector_string_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vector_string_t self, std::vector< std::string >::difference_type i, std::vector< std::string >::difference_type j) __setslice__(vector_string_t self, std::vector< std::string >::difference_type i, std::vector< std::string >::difference_type j, vector_string_t v) """ return _libBornAgainBase.vector_string_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vector_string_t self, std::vector< std::string >::difference_type i, std::vector< std::string >::difference_type j)""" return _libBornAgainBase.vector_string_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vector_string_t self, std::vector< std::string >::difference_type i) __delitem__(vector_string_t self, PySliceObject * slice) """ return _libBornAgainBase.vector_string_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vector_string_t self, PySliceObject * slice) -> vector_string_t __getitem__(vector_string_t self, std::vector< std::string >::difference_type i) -> std::vector< std::string >::value_type const & """ return _libBornAgainBase.vector_string_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vector_string_t self, PySliceObject * slice, vector_string_t v) __setitem__(vector_string_t self, PySliceObject * slice) __setitem__(vector_string_t self, std::vector< std::string >::difference_type i, std::vector< std::string >::value_type const & x) """ return _libBornAgainBase.vector_string_t___setitem__(self, *args) def pop(self): r"""pop(vector_string_t self) -> std::vector< std::string >::value_type""" return _libBornAgainBase.vector_string_t_pop(self) def append(self, x): r"""append(vector_string_t self, std::vector< std::string >::value_type const & x)""" return _libBornAgainBase.vector_string_t_append(self, x) def empty(self): r"""empty(vector_string_t self) -> bool""" return _libBornAgainBase.vector_string_t_empty(self) def size(self): r"""size(vector_string_t self) -> std::vector< std::string >::size_type""" return _libBornAgainBase.vector_string_t_size(self) def swap(self, v): r"""swap(vector_string_t self, vector_string_t v)""" return _libBornAgainBase.vector_string_t_swap(self, v) def begin(self): r"""begin(vector_string_t self) -> std::vector< std::string >::iterator""" return _libBornAgainBase.vector_string_t_begin(self) def end(self): r"""end(vector_string_t self) -> std::vector< std::string >::iterator""" return _libBornAgainBase.vector_string_t_end(self) def rbegin(self): r"""rbegin(vector_string_t self) -> std::vector< std::string >::reverse_iterator""" return _libBornAgainBase.vector_string_t_rbegin(self) def rend(self): r"""rend(vector_string_t self) -> std::vector< std::string >::reverse_iterator""" return _libBornAgainBase.vector_string_t_rend(self) def clear(self): r"""clear(vector_string_t self)""" return _libBornAgainBase.vector_string_t_clear(self) def get_allocator(self): r"""get_allocator(vector_string_t self) -> std::vector< std::string >::allocator_type""" return _libBornAgainBase.vector_string_t_get_allocator(self) def pop_back(self): r"""pop_back(vector_string_t self)""" return _libBornAgainBase.vector_string_t_pop_back(self) def erase(self, *args): r""" erase(vector_string_t self, std::vector< std::string >::iterator pos) -> std::vector< std::string >::iterator erase(vector_string_t self, std::vector< std::string >::iterator first, std::vector< std::string >::iterator last) -> std::vector< std::string >::iterator """ return _libBornAgainBase.vector_string_t_erase(self, *args) def __init__(self, *args): r""" __init__(vector_string_t self) -> vector_string_t __init__(vector_string_t self, vector_string_t other) -> vector_string_t __init__(vector_string_t self, std::vector< std::string >::size_type size) -> vector_string_t __init__(vector_string_t self, std::vector< std::string >::size_type size, std::vector< std::string >::value_type const & value) -> vector_string_t """ _libBornAgainBase.vector_string_t_swiginit(self, _libBornAgainBase.new_vector_string_t(*args)) def push_back(self, x): r"""push_back(vector_string_t self, std::vector< std::string >::value_type const & x)""" return _libBornAgainBase.vector_string_t_push_back(self, x) def front(self): r"""front(vector_string_t self) -> std::vector< std::string >::value_type const &""" return _libBornAgainBase.vector_string_t_front(self) def back(self): r"""back(vector_string_t self) -> std::vector< std::string >::value_type const &""" return _libBornAgainBase.vector_string_t_back(self) def assign(self, n, x): r"""assign(vector_string_t self, std::vector< std::string >::size_type n, std::vector< std::string >::value_type const & x)""" return _libBornAgainBase.vector_string_t_assign(self, n, x) def resize(self, *args): r""" resize(vector_string_t self, std::vector< std::string >::size_type new_size) resize(vector_string_t self, std::vector< std::string >::size_type new_size, std::vector< std::string >::value_type const & x) """ return _libBornAgainBase.vector_string_t_resize(self, *args) def insert(self, *args): r""" insert(vector_string_t self, std::vector< std::string >::iterator pos, std::vector< std::string >::value_type const & x) -> std::vector< std::string >::iterator insert(vector_string_t self, std::vector< std::string >::iterator pos, std::vector< std::string >::size_type n, std::vector< std::string >::value_type const & x) """ return _libBornAgainBase.vector_string_t_insert(self, *args) def reserve(self, n): r"""reserve(vector_string_t self, std::vector< std::string >::size_type n)""" return _libBornAgainBase.vector_string_t_reserve(self, n) def capacity(self): r"""capacity(vector_string_t self) -> std::vector< std::string >::size_type""" return _libBornAgainBase.vector_string_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vector_string_t # Register vector_string_t in _libBornAgainBase: _libBornAgainBase.vector_string_t_swigregister(vector_string_t) class map_string_double_t(object): r"""Proxy of C++ std::map< std::string,double > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(map_string_double_t self) -> SwigPyIterator""" return _libBornAgainBase.map_string_double_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(map_string_double_t self) -> bool""" return _libBornAgainBase.map_string_double_t___nonzero__(self) def __bool__(self): r"""__bool__(map_string_double_t self) -> bool""" return _libBornAgainBase.map_string_double_t___bool__(self) def __len__(self): r"""__len__(map_string_double_t self) -> std::map< std::string,double >::size_type""" return _libBornAgainBase.map_string_double_t___len__(self) def __iter__(self): return self.key_iterator() def iterkeys(self): return self.key_iterator() def itervalues(self): return self.value_iterator() def iteritems(self): return self.iterator() def __getitem__(self, key): r"""__getitem__(map_string_double_t self, std::map< std::string,double >::key_type const & key) -> std::map< std::string,double >::mapped_type const &""" return _libBornAgainBase.map_string_double_t___getitem__(self, key) def __delitem__(self, key): r"""__delitem__(map_string_double_t self, std::map< std::string,double >::key_type const & key)""" return _libBornAgainBase.map_string_double_t___delitem__(self, key) def has_key(self, key): r"""has_key(map_string_double_t self, std::map< std::string,double >::key_type const & key) -> bool""" return _libBornAgainBase.map_string_double_t_has_key(self, key) def keys(self): r"""keys(map_string_double_t self) -> PyObject *""" return _libBornAgainBase.map_string_double_t_keys(self) def values(self): r"""values(map_string_double_t self) -> PyObject *""" return _libBornAgainBase.map_string_double_t_values(self) def items(self): r"""items(map_string_double_t self) -> PyObject *""" return _libBornAgainBase.map_string_double_t_items(self) def __contains__(self, key): r"""__contains__(map_string_double_t self, std::map< std::string,double >::key_type const & key) -> bool""" return _libBornAgainBase.map_string_double_t___contains__(self, key) def key_iterator(self): r"""key_iterator(map_string_double_t self) -> SwigPyIterator""" return _libBornAgainBase.map_string_double_t_key_iterator(self) def value_iterator(self): r"""value_iterator(map_string_double_t self) -> SwigPyIterator""" return _libBornAgainBase.map_string_double_t_value_iterator(self) def __setitem__(self, *args): r""" __setitem__(map_string_double_t self, std::map< std::string,double >::key_type const & key) __setitem__(map_string_double_t self, std::map< std::string,double >::key_type const & key, std::map< std::string,double >::mapped_type const & x) """ return _libBornAgainBase.map_string_double_t___setitem__(self, *args) def asdict(self): r"""asdict(map_string_double_t self) -> PyObject *""" return _libBornAgainBase.map_string_double_t_asdict(self) def __init__(self, *args): r""" __init__(map_string_double_t self, std::less< std::string > const & other) -> map_string_double_t __init__(map_string_double_t self) -> map_string_double_t __init__(map_string_double_t self, map_string_double_t other) -> map_string_double_t """ _libBornAgainBase.map_string_double_t_swiginit(self, _libBornAgainBase.new_map_string_double_t(*args)) def empty(self): r"""empty(map_string_double_t self) -> bool""" return _libBornAgainBase.map_string_double_t_empty(self) def size(self): r"""size(map_string_double_t self) -> std::map< std::string,double >::size_type""" return _libBornAgainBase.map_string_double_t_size(self) def swap(self, v): r"""swap(map_string_double_t self, map_string_double_t v)""" return _libBornAgainBase.map_string_double_t_swap(self, v) def begin(self): r"""begin(map_string_double_t self) -> std::map< std::string,double >::iterator""" return _libBornAgainBase.map_string_double_t_begin(self) def end(self): r"""end(map_string_double_t self) -> std::map< std::string,double >::iterator""" return _libBornAgainBase.map_string_double_t_end(self) def rbegin(self): r"""rbegin(map_string_double_t self) -> std::map< std::string,double >::reverse_iterator""" return _libBornAgainBase.map_string_double_t_rbegin(self) def rend(self): r"""rend(map_string_double_t self) -> std::map< std::string,double >::reverse_iterator""" return _libBornAgainBase.map_string_double_t_rend(self) def clear(self): r"""clear(map_string_double_t self)""" return _libBornAgainBase.map_string_double_t_clear(self) def get_allocator(self): r"""get_allocator(map_string_double_t self) -> std::map< std::string,double >::allocator_type""" return _libBornAgainBase.map_string_double_t_get_allocator(self) def count(self, x): r"""count(map_string_double_t self, std::map< std::string,double >::key_type const & x) -> std::map< std::string,double >::size_type""" return _libBornAgainBase.map_string_double_t_count(self, x) def erase(self, *args): r""" erase(map_string_double_t self, std::map< std::string,double >::key_type const & x) -> std::map< std::string,double >::size_type erase(map_string_double_t self, std::map< std::string,double >::iterator position) erase(map_string_double_t self, std::map< std::string,double >::iterator first, std::map< std::string,double >::iterator last) """ return _libBornAgainBase.map_string_double_t_erase(self, *args) def find(self, x): r"""find(map_string_double_t self, std::map< std::string,double >::key_type const & x) -> std::map< std::string,double >::iterator""" return _libBornAgainBase.map_string_double_t_find(self, x) def lower_bound(self, x): r"""lower_bound(map_string_double_t self, std::map< std::string,double >::key_type const & x) -> std::map< std::string,double >::iterator""" return _libBornAgainBase.map_string_double_t_lower_bound(self, x) def upper_bound(self, x): r"""upper_bound(map_string_double_t self, std::map< std::string,double >::key_type const & x) -> std::map< std::string,double >::iterator""" return _libBornAgainBase.map_string_double_t_upper_bound(self, x) __swig_destroy__ = _libBornAgainBase.delete_map_string_double_t # Register map_string_double_t in _libBornAgainBase: _libBornAgainBase.map_string_double_t_swigregister(map_string_double_t) class pvacuum_double_t(object): r"""Proxy of C++ std::pair< double,double > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, *args): r""" __init__(pvacuum_double_t self) -> pvacuum_double_t __init__(pvacuum_double_t self, double first, double second) -> pvacuum_double_t __init__(pvacuum_double_t self, pvacuum_double_t other) -> pvacuum_double_t """ _libBornAgainBase.pvacuum_double_t_swiginit(self, _libBornAgainBase.new_pvacuum_double_t(*args)) first = property(_libBornAgainBase.pvacuum_double_t_first_get, _libBornAgainBase.pvacuum_double_t_first_set, doc=r"""first : double""") second = property(_libBornAgainBase.pvacuum_double_t_second_get, _libBornAgainBase.pvacuum_double_t_second_set, doc=r"""second : double""") def __len__(self): return 2 def __repr__(self): return str((self.first, self.second)) def __getitem__(self, index): if not (index % 2): return self.first else: return self.second def __setitem__(self, index, val): if not (index % 2): self.first = val else: self.second = val __swig_destroy__ = _libBornAgainBase.delete_pvacuum_double_t # Register pvacuum_double_t in _libBornAgainBase: _libBornAgainBase.pvacuum_double_t_swigregister(pvacuum_double_t) class vector_pvacuum_double_t(object): r"""Proxy of C++ std::vector< std::pair< double,double > > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def iterator(self): r"""iterator(vector_pvacuum_double_t self) -> SwigPyIterator""" return _libBornAgainBase.vector_pvacuum_double_t_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): r"""__nonzero__(vector_pvacuum_double_t self) -> bool""" return _libBornAgainBase.vector_pvacuum_double_t___nonzero__(self) def __bool__(self): r"""__bool__(vector_pvacuum_double_t self) -> bool""" return _libBornAgainBase.vector_pvacuum_double_t___bool__(self) def __len__(self): r"""__len__(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::size_type""" return _libBornAgainBase.vector_pvacuum_double_t___len__(self) def __getslice__(self, i, j): r"""__getslice__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i, std::vector< std::pair< double,double > >::difference_type j) -> vector_pvacuum_double_t""" return _libBornAgainBase.vector_pvacuum_double_t___getslice__(self, i, j) def __setslice__(self, *args): r""" __setslice__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i, std::vector< std::pair< double,double > >::difference_type j) __setslice__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i, std::vector< std::pair< double,double > >::difference_type j, vector_pvacuum_double_t v) """ return _libBornAgainBase.vector_pvacuum_double_t___setslice__(self, *args) def __delslice__(self, i, j): r"""__delslice__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i, std::vector< std::pair< double,double > >::difference_type j)""" return _libBornAgainBase.vector_pvacuum_double_t___delslice__(self, i, j) def __delitem__(self, *args): r""" __delitem__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i) __delitem__(vector_pvacuum_double_t self, PySliceObject * slice) """ return _libBornAgainBase.vector_pvacuum_double_t___delitem__(self, *args) def __getitem__(self, *args): r""" __getitem__(vector_pvacuum_double_t self, PySliceObject * slice) -> vector_pvacuum_double_t __getitem__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i) -> pvacuum_double_t """ return _libBornAgainBase.vector_pvacuum_double_t___getitem__(self, *args) def __setitem__(self, *args): r""" __setitem__(vector_pvacuum_double_t self, PySliceObject * slice, vector_pvacuum_double_t v) __setitem__(vector_pvacuum_double_t self, PySliceObject * slice) __setitem__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::difference_type i, pvacuum_double_t x) """ return _libBornAgainBase.vector_pvacuum_double_t___setitem__(self, *args) def pop(self): r"""pop(vector_pvacuum_double_t self) -> pvacuum_double_t""" return _libBornAgainBase.vector_pvacuum_double_t_pop(self) def append(self, x): r"""append(vector_pvacuum_double_t self, pvacuum_double_t x)""" return _libBornAgainBase.vector_pvacuum_double_t_append(self, x) def empty(self): r"""empty(vector_pvacuum_double_t self) -> bool""" return _libBornAgainBase.vector_pvacuum_double_t_empty(self) def size(self): r"""size(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::size_type""" return _libBornAgainBase.vector_pvacuum_double_t_size(self) def swap(self, v): r"""swap(vector_pvacuum_double_t self, vector_pvacuum_double_t v)""" return _libBornAgainBase.vector_pvacuum_double_t_swap(self, v) def begin(self): r"""begin(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::iterator""" return _libBornAgainBase.vector_pvacuum_double_t_begin(self) def end(self): r"""end(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::iterator""" return _libBornAgainBase.vector_pvacuum_double_t_end(self) def rbegin(self): r"""rbegin(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::reverse_iterator""" return _libBornAgainBase.vector_pvacuum_double_t_rbegin(self) def rend(self): r"""rend(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::reverse_iterator""" return _libBornAgainBase.vector_pvacuum_double_t_rend(self) def clear(self): r"""clear(vector_pvacuum_double_t self)""" return _libBornAgainBase.vector_pvacuum_double_t_clear(self) def get_allocator(self): r"""get_allocator(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::allocator_type""" return _libBornAgainBase.vector_pvacuum_double_t_get_allocator(self) def pop_back(self): r"""pop_back(vector_pvacuum_double_t self)""" return _libBornAgainBase.vector_pvacuum_double_t_pop_back(self) def erase(self, *args): r""" erase(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::iterator pos) -> std::vector< std::pair< double,double > >::iterator erase(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::iterator first, std::vector< std::pair< double,double > >::iterator last) -> std::vector< std::pair< double,double > >::iterator """ return _libBornAgainBase.vector_pvacuum_double_t_erase(self, *args) def __init__(self, *args): r""" __init__(vector_pvacuum_double_t self) -> vector_pvacuum_double_t __init__(vector_pvacuum_double_t self, vector_pvacuum_double_t other) -> vector_pvacuum_double_t __init__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type size) -> vector_pvacuum_double_t __init__(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type size, pvacuum_double_t value) -> vector_pvacuum_double_t """ _libBornAgainBase.vector_pvacuum_double_t_swiginit(self, _libBornAgainBase.new_vector_pvacuum_double_t(*args)) def push_back(self, x): r"""push_back(vector_pvacuum_double_t self, pvacuum_double_t x)""" return _libBornAgainBase.vector_pvacuum_double_t_push_back(self, x) def front(self): r"""front(vector_pvacuum_double_t self) -> pvacuum_double_t""" return _libBornAgainBase.vector_pvacuum_double_t_front(self) def back(self): r"""back(vector_pvacuum_double_t self) -> pvacuum_double_t""" return _libBornAgainBase.vector_pvacuum_double_t_back(self) def assign(self, n, x): r"""assign(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type n, pvacuum_double_t x)""" return _libBornAgainBase.vector_pvacuum_double_t_assign(self, n, x) def resize(self, *args): r""" resize(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type new_size) resize(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type new_size, pvacuum_double_t x) """ return _libBornAgainBase.vector_pvacuum_double_t_resize(self, *args) def insert(self, *args): r""" insert(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::iterator pos, pvacuum_double_t x) -> std::vector< std::pair< double,double > >::iterator insert(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::iterator pos, std::vector< std::pair< double,double > >::size_type n, pvacuum_double_t x) """ return _libBornAgainBase.vector_pvacuum_double_t_insert(self, *args) def reserve(self, n): r"""reserve(vector_pvacuum_double_t self, std::vector< std::pair< double,double > >::size_type n)""" return _libBornAgainBase.vector_pvacuum_double_t_reserve(self, n) def capacity(self): r"""capacity(vector_pvacuum_double_t self) -> std::vector< std::pair< double,double > >::size_type""" return _libBornAgainBase.vector_pvacuum_double_t_capacity(self) __swig_destroy__ = _libBornAgainBase.delete_vector_pvacuum_double_t # Register vector_pvacuum_double_t in _libBornAgainBase: _libBornAgainBase.vector_pvacuum_double_t_swigregister(vector_pvacuum_double_t) def mul_I(z): r"""mul_I(complex_t z) -> complex_t""" return _libBornAgainBase.mul_I(z) def exp_I(z): r"""exp_I(complex_t z) -> complex_t""" return _libBornAgainBase.exp_I(z) def isfinite(z): r"""isfinite(complex_t z) -> bool""" return _libBornAgainBase.isfinite(z) class ICloneable(object): r"""Proxy of C++ ICloneable class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _libBornAgainBase.delete_ICloneable def clone(self): r"""clone(ICloneable self) -> ICloneable""" return _libBornAgainBase.ICloneable_clone(self) def transferToCPP(self): r"""transferToCPP(ICloneable self)""" return self.__disown__() # Register ICloneable in _libBornAgainBase: _libBornAgainBase.ICloneable_swigregister(ICloneable) cvar = _libBornAgainBase.cvar I = cvar.I class Span(object): r"""Proxy of C++ Span class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, *args): r""" __init__(Span self) -> Span __init__(Span self, double low, double hig) -> Span """ _libBornAgainBase.Span_swiginit(self, _libBornAgainBase.new_Span(*args)) def __add__(self, increment): r"""__add__(Span self, double increment) -> Span""" return _libBornAgainBase.Span___add__(self, increment) def low(self): r"""low(Span self) -> double""" return _libBornAgainBase.Span_low(self) def hig(self): r"""hig(Span self) -> double""" return _libBornAgainBase.Span_hig(self) def contains(self, z): r"""contains(Span self, double z) -> bool""" return _libBornAgainBase.Span_contains(self, z) def pair(self): r"""pair(Span self) -> pvacuum_double_t""" return _libBornAgainBase.Span_pair(self) @staticmethod def unite(left, right): r"""unite(Span left, Span right) -> Span""" return _libBornAgainBase.Span_unite(left, right) __swig_destroy__ = _libBornAgainBase.delete_Span # Register Span in _libBornAgainBase: _libBornAgainBase.Span_swigregister(Span) class ThreadInfo(object): r"""Proxy of C++ ThreadInfo class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr n_threads = property(_libBornAgainBase.ThreadInfo_n_threads_get, _libBornAgainBase.ThreadInfo_n_threads_set, doc=r"""n_threads : unsigned int""") n_batches = property(_libBornAgainBase.ThreadInfo_n_batches_get, _libBornAgainBase.ThreadInfo_n_batches_set, doc=r"""n_batches : unsigned int""") current_batch = property(_libBornAgainBase.ThreadInfo_current_batch_get, _libBornAgainBase.ThreadInfo_current_batch_set, doc=r"""current_batch : unsigned int""") def __init__(self): r"""__init__(ThreadInfo self) -> ThreadInfo""" _libBornAgainBase.ThreadInfo_swiginit(self, _libBornAgainBase.new_ThreadInfo()) __swig_destroy__ = _libBornAgainBase.delete_ThreadInfo # Register ThreadInfo in _libBornAgainBase: _libBornAgainBase.ThreadInfo_swigregister(ThreadInfo) def rad2deg(angle): r"""rad2deg(double angle) -> double""" return _libBornAgainBase.rad2deg(angle) def deg2rad(angle): r"""deg2rad(double angle) -> double""" return _libBornAgainBase.deg2rad(angle) def theta(a): r"""theta(R3 a) -> double""" return _libBornAgainBase.theta(a) def phi(a): r"""phi(R3 a) -> double""" return _libBornAgainBase.phi(a) def cosTheta(a): r"""cosTheta(R3 a) -> double""" return _libBornAgainBase.cosTheta(a) def sin2Theta(a): r"""sin2Theta(R3 a) -> double""" return _libBornAgainBase.sin2Theta(a) def angle(a, b): r"""angle(R3 a, R3 b) -> double""" return _libBornAgainBase.angle(a, b) class Bin1D(object): r"""Proxy of C++ Bin1D class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr @staticmethod def FromTo(lower, upper): r"""FromTo(double lower, double upper) -> Bin1D""" return _libBornAgainBase.Bin1D_FromTo(lower, upper) @staticmethod def At(*args): r""" At(double center) -> Bin1D At(double center, double halfwidth) -> Bin1D """ return _libBornAgainBase.Bin1D_At(*args) def lowerBound(self): r"""lowerBound(Bin1D self) -> double""" return _libBornAgainBase.Bin1D_lowerBound(self) def upperBound(self): r"""upperBound(Bin1D self) -> double""" return _libBornAgainBase.Bin1D_upperBound(self) def center(self): r"""center(Bin1D self) -> double""" return _libBornAgainBase.Bin1D_center(self) def binSize(self): r"""binSize(Bin1D self) -> double""" return _libBornAgainBase.Bin1D_binSize(self) def __eq__(self, other): r"""__eq__(Bin1D self, Bin1D other) -> bool""" return _libBornAgainBase.Bin1D___eq__(self, other) def clipped_or_nil(self, lower, upper): r"""clipped_or_nil(Bin1D self, double lower, double upper) -> std::optional< Bin1D >""" return _libBornAgainBase.Bin1D_clipped_or_nil(self, lower, upper) __swig_destroy__ = _libBornAgainBase.delete_Bin1D # Register Bin1D in _libBornAgainBase: _libBornAgainBase.Bin1D_swigregister(Bin1D) nanometer = cvar.nanometer angstrom = cvar.angstrom micrometer = cvar.micrometer millimeter = cvar.millimeter nm = cvar.nm nm2 = cvar.nm2 rad = cvar.rad deg = cvar.deg tesla = cvar.tesla gauss = cvar.gauss class Scale(object): r"""Proxy of C++ Scale class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, name, bins): r"""__init__(Scale self, std::string name, std::vector< Bin1D,std::allocator< Bin1D > > const & bins) -> Scale""" _libBornAgainBase.Scale_swiginit(self, _libBornAgainBase.new_Scale(name, bins)) def clone(self): r"""clone(Scale self) -> Scale""" return _libBornAgainBase.Scale_clone(self) def setAxisName(self, name): r"""setAxisName(Scale self, std::string name)""" return _libBornAgainBase.Scale_setAxisName(self, name) def axisName(self): r"""axisName(Scale self) -> std::string""" return _libBornAgainBase.Scale_axisName(self) def size(self): r"""size(Scale self) -> size_t""" return _libBornAgainBase.Scale_size(self) def min(self): r"""min(Scale self) -> double""" return _libBornAgainBase.Scale_min(self) def max(self): r"""max(Scale self) -> double""" return _libBornAgainBase.Scale_max(self) def bounds(self): r"""bounds(Scale self) -> pvacuum_double_t""" return _libBornAgainBase.Scale_bounds(self) def rangeComprises(self, value): r"""rangeComprises(Scale self, double value) -> bool""" return _libBornAgainBase.Scale_rangeComprises(self, value) def span(self): r"""span(Scale self) -> double""" return _libBornAgainBase.Scale_span(self) def center(self): r"""center(Scale self) -> double""" return _libBornAgainBase.Scale_center(self) def bin(self, i): r"""bin(Scale self, size_t i) -> Bin1D""" return _libBornAgainBase.Scale_bin(self, i) def binCenter(self, i): r"""binCenter(Scale self, size_t i) -> double""" return _libBornAgainBase.Scale_binCenter(self, i) def bins(self): r"""bins(Scale self) -> std::vector< Bin1D,std::allocator< Bin1D > > const &""" return _libBornAgainBase.Scale_bins(self) def binCenters(self): r"""binCenters(Scale self) -> vdouble1d_t""" return _libBornAgainBase.Scale_binCenters(self) def closestIndex(self, value): r"""closestIndex(Scale self, double value) -> size_t""" return _libBornAgainBase.Scale_closestIndex(self, value) def isEquiDivision(self): r"""isEquiDivision(Scale self) -> bool""" return _libBornAgainBase.Scale_isEquiDivision(self) def isScan(self): r"""isScan(Scale self) -> bool""" return _libBornAgainBase.Scale_isScan(self) def clipped(self, *args): r""" clipped(Scale self, double lower, double upper) -> Scale clipped(Scale self, pvacuum_double_t bounds) -> Scale """ return _libBornAgainBase.Scale_clipped(self, *args) def __eq__(self, right): r"""__eq__(Scale self, Scale right) -> bool""" return _libBornAgainBase.Scale___eq__(self, right) __swig_destroy__ = _libBornAgainBase.delete_Scale # Register Scale in _libBornAgainBase: _libBornAgainBase.Scale_swigregister(Scale) def GenericScale(name, limits): r"""GenericScale(std::string const & name, vdouble1d_t limits) -> Scale""" return _libBornAgainBase.GenericScale(name, limits) def ListScan(name, points): r"""ListScan(std::string const & name, vdouble1d_t points) -> Scale""" return _libBornAgainBase.ListScan(name, points) def EquiDivision(name, nbins, start, end): r"""EquiDivision(std::string const & name, size_t nbins, double start, double end) -> Scale""" return _libBornAgainBase.EquiDivision(name, nbins, start, end) class Frame(ICloneable): r"""Proxy of C++ Frame class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, axes): r"""__init__(Frame self, std::vector< Scale const *,std::allocator< Scale const * > > && axes) -> Frame""" _libBornAgainBase.Frame_swiginit(self, _libBornAgainBase.new_Frame(axes)) __swig_destroy__ = _libBornAgainBase.delete_Frame def clone(self): r"""clone(Frame self) -> Frame""" return _libBornAgainBase.Frame_clone(self) def rank(self): r"""rank(Frame self) -> size_t""" return _libBornAgainBase.Frame_rank(self) def size(self): r"""size(Frame self) -> size_t""" return _libBornAgainBase.Frame_size(self) def projectedSize(self, k_axis): r"""projectedSize(Frame self, size_t k_axis) -> size_t""" return _libBornAgainBase.Frame_projectedSize(self, k_axis) def axis(self, k_axis): r"""axis(Frame self, size_t k_axis) -> Scale""" return _libBornAgainBase.Frame_axis(self, k_axis) def xAxis(self): r"""xAxis(Frame self) -> Scale""" return _libBornAgainBase.Frame_xAxis(self) def yAxis(self): r"""yAxis(Frame self) -> Scale""" return _libBornAgainBase.Frame_yAxis(self) def projectedCoord(self, i_flat, k_axis): r"""projectedCoord(Frame self, size_t i_flat, size_t k_axis) -> double""" return _libBornAgainBase.Frame_projectedCoord(self, i_flat, k_axis) def allIndices(self, i_flat): r"""allIndices(Frame self, size_t i_flat) -> vector_integer_t""" return _libBornAgainBase.Frame_allIndices(self, i_flat) def projectedIndex(self, i_flat, k_axis): r"""projectedIndex(Frame self, size_t i_flat, size_t k_axis) -> size_t""" return _libBornAgainBase.Frame_projectedIndex(self, i_flat, k_axis) def toGlobalIndex(self, axes_indices): r"""toGlobalIndex(Frame self, std::vector< unsigned int,std::allocator< unsigned int > > const & axes_indices) -> size_t""" return _libBornAgainBase.Frame_toGlobalIndex(self, axes_indices) def hasSameSizes(self, arg2): r"""hasSameSizes(Frame self, Frame arg2) -> bool""" return _libBornAgainBase.Frame_hasSameSizes(self, arg2) def __eq__(self, arg2): r"""__eq__(Frame self, Frame arg2) -> bool""" return _libBornAgainBase.Frame___eq__(self, arg2) # Register Frame in _libBornAgainBase: _libBornAgainBase.Frame_swigregister(Frame) class R3(object): r"""Proxy of C++ Vec3< double > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, *args): r""" __init__(R3 self, double const x_, double const y_, double const z_) -> R3 __init__(R3 self) -> R3 """ _libBornAgainBase.R3_swiginit(self, _libBornAgainBase.new_R3(*args)) def x(self): r"""x(R3 self) -> double""" return _libBornAgainBase.R3_x(self) def y(self): r"""y(R3 self) -> double""" return _libBornAgainBase.R3_y(self) def z(self): r"""z(R3 self) -> double""" return _libBornAgainBase.R3_z(self) def setX(self, a): r"""setX(R3 self, double const & a)""" return _libBornAgainBase.R3_setX(self, a) def setY(self, a): r"""setY(R3 self, double const & a)""" return _libBornAgainBase.R3_setY(self, a) def setZ(self, a): r"""setZ(R3 self, double const & a)""" return _libBornAgainBase.R3_setZ(self, a) def __iadd__(self, v): r"""__iadd__(R3 self, R3 v) -> R3""" return _libBornAgainBase.R3___iadd__(self, v) def __isub__(self, v): r"""__isub__(R3 self, R3 v) -> R3""" return _libBornAgainBase.R3___isub__(self, v) def conj(self): r"""conj(R3 self) -> R3""" return _libBornAgainBase.R3_conj(self) def mag2(self): r"""mag2(R3 self) -> double""" return _libBornAgainBase.R3_mag2(self) def mag(self): r"""mag(R3 self) -> double""" return _libBornAgainBase.R3_mag(self) def magxy2(self): r"""magxy2(R3 self) -> double""" return _libBornAgainBase.R3_magxy2(self) def magxy(self): r"""magxy(R3 self) -> double""" return _libBornAgainBase.R3_magxy(self) def unit_or_throw(self): r"""unit_or_throw(R3 self) -> R3""" return _libBornAgainBase.R3_unit_or_throw(self) def unit_or_null(self): r"""unit_or_null(R3 self) -> R3""" return _libBornAgainBase.R3_unit_or_null(self) def complex(self): r"""complex(R3 self) -> C3""" return _libBornAgainBase.R3_complex(self) def real(self): r"""real(R3 self) -> R3""" return _libBornAgainBase.R3_real(self) def __eq__(self, other): r"""__eq__(R3 self, R3 other) -> bool""" return _libBornAgainBase.R3___eq__(self, other) def __ne__(self, other): r"""__ne__(R3 self, R3 other) -> bool""" return _libBornAgainBase.R3___ne__(self, other) def rotatedY(self, a): r"""rotatedY(R3 self, double a) -> R3""" return _libBornAgainBase.R3_rotatedY(self, a) def rotatedZ(self, a): r"""rotatedZ(R3 self, double a) -> R3""" return _libBornAgainBase.R3_rotatedZ(self, a) __swig_destroy__ = _libBornAgainBase.delete_R3 # Register R3 in _libBornAgainBase: _libBornAgainBase.R3_swigregister(R3) class C3(object): r"""Proxy of C++ Vec3< std::complex< double > > class.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag") __repr__ = _swig_repr def __init__(self, *args): r""" __init__(C3 self, std::complex< double > const x_, std::complex< double > const y_, std::complex< double > const z_) -> C3 __init__(C3 self) -> C3 """ _libBornAgainBase.C3_swiginit(self, _libBornAgainBase.new_C3(*args)) def x(self): r"""x(C3 self) -> std::complex< double >""" return _libBornAgainBase.C3_x(self) def y(self): r"""y(C3 self) -> std::complex< double >""" return _libBornAgainBase.C3_y(self) def z(self): r"""z(C3 self) -> std::complex< double >""" return _libBornAgainBase.C3_z(self) def setX(self, a): r"""setX(C3 self, std::complex< double > const & a)""" return _libBornAgainBase.C3_setX(self, a) def setY(self, a): r"""setY(C3 self, std::complex< double > const & a)""" return _libBornAgainBase.C3_setY(self, a) def setZ(self, a): r"""setZ(C3 self, std::complex< double > const & a)""" return _libBornAgainBase.C3_setZ(self, a) def __iadd__(self, v): r"""__iadd__(C3 self, C3 v) -> C3""" return _libBornAgainBase.C3___iadd__(self, v) def __isub__(self, v): r"""__isub__(C3 self, C3 v) -> C3""" return _libBornAgainBase.C3___isub__(self, v) def conj(self): r"""conj(C3 self) -> C3""" return _libBornAgainBase.C3_conj(self) def mag2(self): r"""mag2(C3 self) -> double""" return _libBornAgainBase.C3_mag2(self) def mag(self): r"""mag(C3 self) -> double""" return _libBornAgainBase.C3_mag(self) def magxy2(self): r"""magxy2(C3 self) -> double""" return _libBornAgainBase.C3_magxy2(self) def magxy(self): r"""magxy(C3 self) -> double""" return _libBornAgainBase.C3_magxy(self) def unit_or_throw(self): r"""unit_or_throw(C3 self) -> C3""" return _libBornAgainBase.C3_unit_or_throw(self) def unit_or_null(self): r"""unit_or_null(C3 self) -> C3""" return _libBornAgainBase.C3_unit_or_null(self) def complex(self): r"""complex(C3 self) -> C3""" return _libBornAgainBase.C3_complex(self) def real(self): r"""real(C3 self) -> R3""" return _libBornAgainBase.C3_real(self) def __eq__(self, other): r"""__eq__(C3 self, C3 other) -> bool""" return _libBornAgainBase.C3___eq__(self, other) def __ne__(self, other): r"""__ne__(C3 self, C3 other) -> bool""" return _libBornAgainBase.C3___ne__(self, other) def rotatedY(self, a): r"""rotatedY(C3 self, double a) -> C3""" return _libBornAgainBase.C3_rotatedY(self, a) def rotatedZ(self, a): r"""rotatedZ(C3 self, double a) -> C3""" return _libBornAgainBase.C3_rotatedZ(self, a) __swig_destroy__ = _libBornAgainBase.delete_C3 # Register C3 in _libBornAgainBase: _libBornAgainBase.C3_swigregister(C3)
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/chardet/chardistribution.py
from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO) from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO) from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO) from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO) from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO) class CharDistributionAnalysis(object): ENOUGH_DATA_THRESHOLD = 1024 SURE_YES = 0.99 SURE_NO = 0.01 MINIMUM_DATA_THRESHOLD = 3 def __init__(self): # Mapping table to get frequency order from char order (get from # GetOrder()) self._char_to_freq_order = None self._table_size = None # Size of above table # This is a constant value which varies from language to language, # used in calculating confidence. See # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # for further detail. self.typical_distribution_ratio = None self._done = None self._total_chars = None self._freq_chars = None self.reset() def reset(self): """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made self._done = False self._total_chars = 0 # Total characters encountered # The number of characters whose frequency order is less than 512 self._freq_chars = 0 def feed(self, char, char_len): """feed a character with known length""" if char_len == 2: # we only care about 2-bytes character in our distribution analysis order = self.get_order(char) else: order = -1 if order >= 0: self._total_chars += 1 # order is valid if order < self._table_size: if 512 > self._char_to_freq_order[order]: self._freq_chars += 1 def get_confidence(self): """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD: return self.SURE_NO if self._total_chars != self._freq_chars: r = (self._freq_chars / ((self._total_chars - self._freq_chars) * self.typical_distribution_ratio)) if r < self.SURE_YES: return r # normalize confidence (we don't want to be 100% sure) return self.SURE_YES def got_enough_data(self): # It is not necessary to receive all data to draw conclusion. # For charset detection, certain amount of data is enough return self._total_chars > self.ENOUGH_DATA_THRESHOLD def get_order(self, byte_str): # We do not handle characters based on the original encoding string, # but convert this encoding string to a number, here called order. # This allows multiple encodings of a language to share one frequency # table. return -1 class EUCTWDistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(EUCTWDistributionAnalysis, self).__init__() self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER self._table_size = EUCTW_TABLE_SIZE self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = byte_str[0] if first_char >= 0xC4: return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 else: return -1 class EUCKRDistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(EUCKRDistributionAnalysis, self).__init__() self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER self._table_size = EUCKR_TABLE_SIZE self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = byte_str[0] if first_char >= 0xB0: return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 else: return -1 class GB2312DistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(GB2312DistributionAnalysis, self).__init__() self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER self._table_size = GB2312_TABLE_SIZE self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = byte_str[0], byte_str[1] if (first_char >= 0xB0) and (second_char >= 0xA1): return 94 * (first_char - 0xB0) + second_char - 0xA1 else: return -1 class Big5DistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(Big5DistributionAnalysis, self).__init__() self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER self._table_size = BIG5_TABLE_SIZE self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = byte_str[0], byte_str[1] if first_char >= 0xA4: if second_char >= 0xA1: return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 else: return 157 * (first_char - 0xA4) + second_char - 0x40 else: return -1 class SJISDistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(SJISDistributionAnalysis, self).__init__() self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._table_size = JIS_TABLE_SIZE self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that first_char, second_char = byte_str[0], byte_str[1] if (first_char >= 0x81) and (first_char <= 0x9F): order = 188 * (first_char - 0x81) elif (first_char >= 0xE0) and (first_char <= 0xEF): order = 188 * (first_char - 0xE0 + 31) else: return -1 order = order + second_char - 0x40 if second_char > 0x7F: order = -1 return order class EUCJPDistributionAnalysis(CharDistributionAnalysis): def __init__(self): super(EUCJPDistributionAnalysis, self).__init__() self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._table_size = JIS_TABLE_SIZE self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, byte_str): # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that char = byte_str[0] if char >= 0xA0: return 94 * (char - 0xA1) + byte_str[1] - 0xa1 else: return -1
PypiClean
/Gammalearn-0.11.0.tar.gz/Gammalearn-0.11.0/gammalearn/data/example_settings/experiment_settings_file_spawn.py
import collections import os import importlib from pathlib import Path import math import numpy as np import torch from torch.optim import lr_scheduler from torchmetrics.classification import Accuracy, AUROC from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler, PyTorchProfiler import gammalearn.criterions as criterions import gammalearn.optimizers as optimizers import gammalearn.steps as steps from gammalearn.callbacks import (LogGradientNorm, LogModelWeightNorm, LogModelParameters, LogUncertaintyLogVars, LogUncertaintyPrecisions, LogGradNormWeights, LogReLUActivations, LogLinearGradient, LogFeatures, WriteDL2Files) import gammalearn.utils as utils import gammalearn.datasets as dsets from gammalearn.data_handlers import GLearnDataModule from gammalearn.constants import GAMMA_ID, PROTON_ID, ELECTRON_ID import gammalearn.data.nets as nets # Experiment settings main_directory = str(Path.home()) + '/gammalearn_experiments' # TODO change directory if needed """str: mandatory, where the experiments are stored""" experiment_name = 'test_install' """str: mandatory, the name of the experiment. Should be different for each experiment, except if one wants to resume an old experiment """ info = '' """str: optional""" gpus = 1 """int or list: mandatory, the number of gpus to use. If -1, run on all GPUS, if None/0 run on CPU. If list, run on GPUS of list. """ log_every_n_steps = 3 """int: optional, the interval in term of iterations for on screen data printing during experiment. A small value may lead to a very large log file size. """ window_size = 100 """int: optional, the interval in term of stored values for metric moving computation""" checkpointing_options = dict(every_n_epochs=1, save_top_k=-1, save_last=True) """dict: optional, specific options for model checkpointing. See https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html for details. """ random_seed = 1 """int: optional, the manual seed to make experiments more reproducible""" monitor_device = True """bool: optional, whether or not monitoring the gpu utilization""" particle_dict = {GAMMA_ID: 0, PROTON_ID: 1, # ELECTRON_ID: 2, } """particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0""" targets = collections.OrderedDict({ 'energy': { 'output_shape': 1, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': { # 'functions': , }, 'mt_balancing': True }, 'impact': { 'output_shape': 2, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': {}, 'mt_balancing': True }, 'direction': { 'output_shape': 2, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': {}, 'mt_balancing': True }, 'class': { 'label_shape': 1, 'output_shape': len(particle_dict), 'loss': torch.nn.CrossEntropyLoss(), 'loss_weight': 1, 'metrics': { 'Accuracy_particle': Accuracy(threshold=0.5), 'AUC_particle': AUROC(pos_label=particle_dict[GAMMA_ID], num_classes=len(particle_dict), compute_on_step=True ) }, 'mt_balancing': True } }) """dict: mandatory, defines for every objectives of the experiment the loss function and its weight """ # dataset_class = dsets.MemoryLSTDataset dataset_class = dsets.FileLSTDataset # dataset_class = dsets.MockLSTDataset """Dataset: mandatory, the Dataset class to load the data. Currently 2 classes are available, MemoryLSTDataset that loads images in memory, and FileLSTDataset that loads images from files during training. """ dataset_parameters = { 'camera_type': 'LST_LSTCam', 'group_by': 'image', 'use_time': True, 'particle_dict': particle_dict, 'targets': list(targets.keys()), # 'subarray': [1], } """dict: mandatory, the parameters of the dataset. camera_type is mandatory and can be: 'LST_LSTCam', 'MST_NectarCam', 'MST_FlashCam', 'SST_ASTRICam', 'SST1M_DigiCam', 'SST_CHEC', 'MST-SCT_SCTCam'. group_by is mandatory and can be 'image', 'event_all_tels', 'event_triggered_tels'. particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0, proton (101) is class 1 and electron (1) is class 2. use_time (optional): whether or not to use time information subarray (optional): the list of telescope ids to select as a subarray """ preprocessing_workers = 4 """int: optional, the max number of workers to create dataset.""" dataloader_workers = 4 """int: optional, the max number of workers for the data loaders. If 0, data are loaded from the main thread.""" mp_start_method = 'spawn' """str: optional, the method to start new process in [fork, spawn]""" # Net settings # Uncomment following lines to import your network from an external file # net_definition_file = utils.nets_definition_path() # """str: mandatory, the file where to find the net definition to use""" # # Load the network definitions module # # spec = importlib.util.spec_from_file_location("nets", net_definition_file) # nets = importlib.util.module_from_spec(spec) # spec.loader.exec_module(nets) net_parameters_dic = { 'model': nets.GammaPhysNet, 'parameters': { 'backbone': { 'model': nets.ResNetAttentionIndexed, 'parameters': { 'num_layers': 3, 'init': 'kaiming', 'batch_norm': True, # 'init': 'orthogonal', 'num_channels': 2, 'block_features': [16, 32, 64], 'attention_layer': (nets.DualAttention, {'ratio': 16}), # 'attention_layer': (nets.SqueezeExcite, {'ratio': 4}), # 'attention_layer': None, 'non_linearity': torch.nn.ReLU, } }, 'fc_width': 256, 'non_linearity': torch.nn.ReLU, 'last_bias_init': None, 'targets': {k: v.get('output_shape', 0) for k, v in targets.items()} } } """dict: mandatory, the parameters of the network. Depends on the network chosen. Must include at least a model and a parameters field. """ # checkpoint_path = main_directory + '/test_install/checkpoint_epoch=1.ckpt' """str: optional, the path where to find the backup of the model to resume""" profiler = None # profiler = {'profiler': SimpleProfiler, # 'options': dict(extended=True) # } """str: optional, the profiler to use""" ###################################################################################################################### train = True """bool: mandatory, whether or not to train the model""" # Data settings data_module_train = { 'module': GLearnDataModule, 'paths': [ Path(__file__).parent.absolute().joinpath('../../../share/data/MC_data').resolve().as_posix(), ], # TODO fill your folder path 'image_filter': { # utils.intensity_filter: {'intensity': [50, np.inf]}, # utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, # utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, }, 'event_filter': { # utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV # utils.emission_cone_filter: {'max_angle': 0.0698}, # utils.impact_distance_filter: {'max_distance': 200}, # utils.telescope_multiplicity_filter: {'multiplicity': 2}, }, 'transform': None, 'target_transform': None } """paths->list: mandatory, the folders where to find the hdf5 data files""" """image_filter->dict: optional, the filter(s) to apply to the dataset at image level""" """event_filter->dict: optional, the filter(s) to apply to the dataset""" validating_ratio = 0.2 """float: mandatory, the ratio of data to create the validating set""" max_epochs = 1 """int: mandatory, the maximum number of epochs for the experiment""" batch_size = 4 """int: mandatory, the size of the mini-batch""" # train_files_max_number = 1 """int: optional, the max number of files to use for the dataset""" pin_memory = True """bool: optional, whether or not to pin memory in dataloader""" # Training settings loss_options = { 'conditional': True, 'gamma_class': dataset_parameters['particle_dict'][0], } loss_balancing_options = { 'logvar_coeff': [2, 2, 2, 0.5], # for uncertainty 'penalty': 0, # for uncertainty } """dict: mandatory, defines for every objectives of the experiment the loss function and its weight """ loss_balancing = criterions.MultilossBalancing(targets, **loss_balancing_options) """function: mandatory, the function to compute the loss""" optimizer_dic = { 'network': optimizers.load_sgd, 'loss_balancing': optimizers.load_adam } """dict: mandatory, the optimizers to use for the experiment. One may want to use several optimizers in case of GAN for example """ optimizer_parameters = { 'network': {'lr': 1e-4, 'weight_decay': 1e-7, 'momentum': 0.9, 'nesterov': True }, 'loss_balancing': {'lr': 0.025, 'weight_decay': 1e-4, }, } """dict: mandatory, defines the parameters for every optimizers to use""" # regularization = {'function': 'gradient_penalty', # 'weight': 10} """dict: optional, regularization to use during the training process. See in optimizers.py for available regularization functions. If `function` is set to 'gradient_penalty', the training step must be `training_step_mt_gradient_penalty`.""" experiment_hparams = { 'add_pointing': False } training_step = steps.get_training_step_mt(**experiment_hparams) # training_step = steps.training_step_gradnorm # training_step = steps.training_step_mt_gradient_penalty """function: mandatory, the function to compute the training step""" eval_step = steps.get_eval_step_mt(**experiment_hparams) """function: mandatory, the function to compute the validating step""" check_val_every_n_epoch = 1 """int: optional, the interval in term of epoch for validating the model""" lr_schedulers = { 'network': { lr_scheduler.StepLR: { 'gamma': 0.1, 'step_size': 10, } }, # 'network': { # lr_scheduler.ReduceLROnPlateau: { # 'factor': 0.1, # 'patience': 30, # } # }, # 'network': { # lr_scheduler.MultiStepLR: { # 'gamma': 0.1, # 'milestones': [10, 15, 18], # } # }, # 'network': { # lr_scheduler.ExponentialLR: { # 'gamma': 0.9, # } # }, } """dict: optional, defines the learning rate schedulers""" # callbacks training_callbacks = [ LogGradientNorm(), LogModelWeightNorm(), LogModelParameters(), LogUncertaintyLogVars(), LogUncertaintyPrecisions(), # LogGradNormWeights(), LogReLUActivations(), LogLinearGradient(), # LogFeatures(), # Do not use during training !! Very costly !! ] """dict: list of callbacks """ ###################################################################################################################### # Testing settings test = True """bool: mandatory, whether or not to test the model at the end of training""" merge_test_datasets = False """bool: optional, whether or not to merge test datasets""" data_module_test = { 'module': GLearnDataModule, 'paths': [ Path(__file__).parent.absolute().joinpath('../../../share/data/MC_data').resolve().as_posix(), ], 'image_filter': { utils.intensity_filter: {'intensity': [10, np.inf]}, # # utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3, # # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, # utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, }, 'event_filter': { # utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV # utils.emission_cone_filter: {'max_angle': 0.0698}, # utils.impact_distance_filter: {'max_distance': 200}, # utils.telescope_multiplicity_filter: {'multiplicity': 2}, }, 'transform': None, 'target_transform': None } """ dict: optional, must at least contain a non-empty 'source':{'paths:[]'} path->list of str: optional, the folders containing the hdf5 data files for the test image_filter->dict: optional, filter(s) to apply to the test set at image level event_filter->dict: optional, filter(s) to apply to the test set """ test_step = steps.get_test_step_mt(**experiment_hparams) """function: mandatory, the function to compute the validating step""" dl2_path = '' """str: optional, path to store dl2 files""" test_dataset_parameters = { # 'subarray': [1], } """dict: optional, the parameters of the dataset specific to the test operation.""" test_batch_size = 10 """int: optional, the size of the mini-batch for the test""" test_callbacks = [ # WriteDL2Files() ] """dict: list of callbacks"""
PypiClean
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/extract_psortb.py
import sys from annogesiclib.gff3 import Gff3Parser def import_psortb(seq_name, psortbs, local_name, local_score, type_, results): seq_datas = seq_name.split("__") seq_id = seq_datas[0] features = seq_datas[1].split("_") prot_id = "_".join(features[:-3]) if type_ == "multi": psortbs.append({"seq_id": seq_id, "protein_id": prot_id, "strand": features[-3], "start": int(features[-2]), "end": int(features[-1]), "local": "/".join(local_name), "score": "/".join(local_score)}) else: psortbs.append({"seq_id": seq_id, "protein_id": prot_id, "strand": features[-3], "start": int(features[-2]), "end": int(features[-1]), "local": results[0], "score": results[-1]}) return {"datas": seq_datas, "features": features, "prot_id": prot_id} def get_results(line, scores, psortbs, out_p, seq_name, fuzzy): '''print the results of psorb''' local_name = [] local_score = [] if len(line) == 0: pass elif "(This protein may have multiple localization sites.)" in line: results = line.split(" ") sort_scores = sorted(scores, key=lambda x: (x["score"]), reverse=True) first = True high_scores = [] for score in sort_scores: if first: high_scores.append(score) first = False best_score = score else: if score["local"] != results[0]: if score["score"] < (best_score["score"] - fuzzy): break else: high_scores.append(score) for high_score in high_scores: local_name.append(high_score["local"]) local_score.append(str(high_score["score"])) seq_datas = import_psortb(seq_name, psortbs, local_name, local_score, "multi", results) out_p.write("\t".join([seq_datas["datas"][0], seq_datas["prot_id"], "\t".join(seq_datas["features"][-3:]), "/".join(local_name), "/".join(local_score)]) + "\n") else: results = line.split(" ") seq_datas = import_psortb(seq_name, psortbs, None, None, "unique", results) out_p.write("\t".join([seq_datas["datas"][0], seq_datas["prot_id"], "\t".join(seq_datas["features"][-3:]), results[0], results[-1]]) + "\n") return local_name, local_score def get_information(psortb_table, out_p, fuzzy): '''get the information of psorb''' scores = [] psortbs = [] seq_name = None detects = {"score": False, "result": False} with open(psortb_table, "r") as p_h: for line in p_h: line = line.strip() if (line.startswith("--")) or \ (line.startswith("Secondary localization(s):")): detects["result"] = False if detects["score"]: if "Final Prediction:" not in line: datas = line.split(" ") scores.append({"local": datas[0], "score": float(datas[-1])}) if detects["result"]: local_name, local_score = get_results( line, scores, psortbs, out_p, seq_name, fuzzy) if line.startswith("Final Prediction:"): detects["score"] = False detects["result"] = True if line.startswith("SeqID:"): seq_name = line.replace("SeqID: ", "") scores = [] if line.startswith("Localization Scores:"): detects["score"] = True return psortbs def print_gff(gffs, psortbs, out_m): for gff in gffs: detect = False for psortb in psortbs: if (gff.feature == "CDS") and \ (gff.start == psortb["start"]) and \ (gff.end == psortb["end"]) and \ (gff.strand == psortb["strand"]): if "protein_id" in gff.attributes.keys(): if gff.attributes["protein_id"] == psortb["protein_id"]: detect = True break elif "locus_tag" in gff.attributes.keys(): if gff.attributes["locus_tag"] == psortb["protein_id"]: detect = True break else: if gff.attributes["ID"] == psortb["protein_id"]: detect = True break if detect: gff.attribute_string = gff.attribute_string + \ ";subcellular_localization=" + \ psortb["local"] out_m.write("\t".join([gff.info_without_attributes, gff.attribute_string + "\n"])) else: out_m.write(gff.info + "\n") def extract_psortb(psortb_table, out_psortb, merge_gff, out_merge, fuzzy): '''extract and re-generate the output information of psorb''' gffs = [] if merge_gff: if out_merge is None: print("Error: Assign a name of output merged annotation file.") sys.exit() out_m = open(out_merge, "w") for entry in Gff3Parser().entries(open(merge_gff)): gffs.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) else: out_m = None out_p = open(out_psortb, "w") out_p.write("#Genome\tProtein\tStrand\tStart\tEnd\tLocation\tScore\n") psortbs = get_information(psortb_table, out_p, fuzzy) if merge_gff: print_gff(gffs, psortbs, out_m)
PypiClean
/GTW-1.2.6.tar.gz/GTW-1.2.6/__test__/Attr.py
from __future__ import division, print_function from __future__ import absolute_import, unicode_literals ### enforce import order import _GTW._OMP._Auth.import_Auth import _GTW._OMP._PAP.import_PAP import _GTW._OMP._EVT.import_EVT import _GTW._OMP._SWP.import_SWP import _GTW._OMP._SRM.import_SRM from _GTW.__test__.model import * from _MOM.inspect import children_trans_iter from _TFL.pyk import pyk def _attr_map (Top) : result = TFL.defaultdict (list) for T, l in children_trans_iter (Top) : ET = T.E_Type for name, kind in sorted (pyk.iteritems (ET.attributes)) : if kind.show_in_ui and (ET.children_np or not ET.is_partial) : if not ET.type_name.startswith ("Auth") : k = (name, kind.DET_Root) result [k].append ((kind, ET)) return result # end def _attr_map _test_DET = """ >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope MOMT__... >>> MOM = scope.MOM >>> nl = pyk.unichr (10) >>> a_map = _attr_map (MOM.Id_Entity) >>> for (name, DT), xs in sorted (pyk.iteritems (a_map)) : ... if len (xs) > 1 : ... print ("%%s [%%s]" %% (name, DT)) ... for kind, ET in sorted (xs, key = lambda x : x [1].i_rank) : ... flag = "*" if not ET.is_partial else "" ... print (" %%-30s%%-2s %%s" %% (ET.type_name, flag, kind.e_type.type_name)) addresses [PAP.Subject] PAP.Subject PAP.Subject PAP.Group PAP.Subject PAP.Legal_Entity PAP.Subject boat_class [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta_C SRM.Regatta_H * SRM.Regatta_H clips [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN SRM.Page * SWP.Object_PN contents [SWP.Page_Mixin] SWP.Page * SWP.Page_Mixin SWP.Page_Y * SWP.Page_Mixin SWP.Clip_X * SWP.Page_Mixin SRM.Page * SWP.Page_Mixin creation [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity date [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN SRM.Page * SWP.Object_PN desc [PAP.Property] PAP.Property PAP.Property PAP.Address * PAP.Address PAP.Email * PAP.Email PAP.Phone * PAP.Phone PAP.Url * PAP.Url desc [PAP.Subject_has_Property] PAP.Subject_has_Property PAP.Subject_has_Property PAP.Subject_has_Address PAP.Subject_has_Property PAP.Subject_has_Email PAP.Subject_has_Property PAP.Subject_has_Phone PAP.Subject_has_Property PAP.Subject_has_Url PAP.Subject_has_Property PAP.Company_has_Url * PAP.Subject_has_Property PAP.Person_has_Url * PAP.Subject_has_Property PAP.Company_has_Phone * PAP.Subject_has_Property PAP.Person_has_Phone * PAP.Subject_has_Property PAP.Company_has_Email * PAP.Subject_has_Property PAP.Person_has_Email * PAP.Subject_has_Property PAP.Company_has_Address * PAP.Subject_has_Property PAP.Person_has_Address * PAP.Subject_has_Property discards [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta emails [PAP.Subject] PAP.Subject PAP.Subject PAP.Group PAP.Subject PAP.Legal_Entity PAP.Subject events [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity extension [PAP.Subject_has_Phone] PAP.Subject_has_Phone PAP.Subject_has_Phone PAP.Company_has_Phone * PAP.Subject_has_Phone PAP.Person_has_Phone * PAP.Subject_has_Phone format [SWP.Page_Mixin] SWP.Page * SWP.Page_Mixin SWP.Page_Y * SWP.Page_Mixin SWP.Clip_X * SWP.Page_Mixin SRM.Page * SWP.Page_Mixin head_line [SWP.Page_Mixin] SWP.Page * SWP.Page_Mixin SWP.Page_Y * SWP.Page_Mixin SWP.Clip_X * SWP.Page_Mixin SRM.Page * SWP.Page_Mixin hidden [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN SRM.Page * SWP.Object_PN is_cancelled [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta kind [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta last_change [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity last_cid [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity left [MOM.Link] MOM.Link MOM.Link MOM.Link1 MOM.Link1 MOM._Link_n_ MOM.Link MOM.Link2 MOM.Link PAP.Link MOM.Link PAP.Link1 MOM.Link PAP.Address_Position * PAP.Address_Position PAP._Link_n_ MOM.Link PAP.Link2 MOM.Link PAP.Subject_has_Property PAP.Subject_has_Property PAP.Person_has_Account * PAP.Person_has_Account EVT.Link MOM.Link EVT.Link1 MOM.Link EVT.Event * EVT.Event EVT.Event_occurs * EVT.Event_occurs EVT._Recurrence_Mixin_ MOM.Link EVT.Recurrence_Spec * EVT.Recurrence_Spec EVT.Recurrence_Rule * EVT.Recurrence_Rule SWP.Link MOM.Link SWP.Link1 MOM.Link SWP.Clip_O * SWP.Clip_O SWP.Picture * SWP.Picture SRM.Link MOM.Link SRM.Link1 MOM.Link SRM.Boat * SRM.Boat SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta SRM.Sailor * SRM.Sailor SRM._Link_n_ MOM.Link SRM.Link2 MOM.Link SRM.Boat_in_Regatta * SRM.Boat_in_Regatta SRM.Race_Result * SRM.Race_Result SRM.Team * SRM.Team SRM.Crew_Member * SRM.Crew_Member SRM.Team_has_Boat_in_Regatta * SRM.Team_has_Boat_in_Regatta PAP.Subject_has_Address PAP.Subject_has_Address PAP.Subject_has_Email PAP.Subject_has_Email PAP.Subject_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Url PAP.Subject_has_Url PAP.Company_has_Url * PAP.Company_has_Url PAP.Person_has_Url * PAP.Person_has_Url PAP.Company_has_Phone * PAP.Company_has_Phone PAP.Person_has_Phone * PAP.Person_has_Phone PAP.Company_has_Email * PAP.Company_has_Email PAP.Person_has_Email * PAP.Person_has_Email PAP.Company_has_Address * PAP.Company_has_Address PAP.Person_has_Address * PAP.Person_has_Address lifetime [PAP.Subject] PAP.Subject PAP.Subject PAP.Group PAP.Subject PAP.Legal_Entity PAP.Subject PAP.Company * PAP.Subject PAP.Person * PAP.Subject name [PAP.Group] PAP.Group PAP.Group PAP.Legal_Entity PAP.Legal_Entity PAP.Company * PAP.Company name [SRM._Boat_Class_] SRM._Boat_Class_ SRM._Boat_Class_ SRM.Boat_Class * SRM.Boat_Class SRM.Handicap * SRM.Handicap perma_name [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta perma_name [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN SRM.Page * SWP.Object_PN phones [PAP.Subject] PAP.Subject PAP.Subject PAP.Group PAP.Subject PAP.Legal_Entity PAP.Subject pid [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity prio [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN SRM.Page * SWP.Object_PN races [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta races_counted [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta result [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta right [MOM._Link_n_] MOM._Link_n_ MOM._Link_n_ MOM.Link2 MOM._Link_n_ PAP._Link_n_ MOM._Link_n_ PAP.Link2 MOM._Link_n_ PAP.Subject_has_Property PAP.Subject_has_Property PAP.Person_has_Account * PAP.Person_has_Account SRM._Link_n_ MOM._Link_n_ SRM.Link2 MOM._Link_n_ SRM.Boat_in_Regatta * SRM.Boat_in_Regatta SRM.Crew_Member * SRM.Crew_Member SRM.Team_has_Boat_in_Regatta * SRM.Team_has_Boat_in_Regatta PAP.Subject_has_Address PAP.Subject_has_Address PAP.Subject_has_Email PAP.Subject_has_Email PAP.Subject_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Url PAP.Subject_has_Url PAP.Company_has_Url * PAP.Company_has_Url PAP.Person_has_Url * PAP.Person_has_Url PAP.Company_has_Phone * PAP.Company_has_Phone PAP.Person_has_Phone * PAP.Person_has_Phone PAP.Company_has_Email * PAP.Company_has_Email PAP.Person_has_Email * PAP.Person_has_Email PAP.Company_has_Address * PAP.Company_has_Address PAP.Person_has_Address * PAP.Person_has_Address short_name [PAP.Group] PAP.Group PAP.Group PAP.Legal_Entity PAP.Legal_Entity PAP.Company * PAP.Company short_title [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Object_PN SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN starters_rl [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta text [SWP.Page_Mixin] SWP.Page * SWP.Page_Mixin SWP.Page_Y * SWP.Page_Mixin SWP.Clip_X * SWP.Clip_X SRM.Page * SWP.Page_Mixin title [SWP.Object_PN] SWP.Object_PN SWP.Object_PN SWP.Page * SWP.Object_PN SWP.Page_Y * SWP.Object_PN SWP.Clip_X * SWP.Clip_X SWP.Gallery * SWP.Object_PN SWP.Referral * SWP.Object_PN type_name [MOM.Id_Entity] MOM.Id_Entity MOM.Id_Entity MOM.Link MOM.Id_Entity MOM.Link1 MOM.Id_Entity MOM._Link_n_ MOM.Id_Entity MOM.Link2 MOM.Id_Entity MOM.Object MOM.Id_Entity PAP.Id_Entity MOM.Id_Entity PAP.Object MOM.Id_Entity PAP.Property MOM.Id_Entity PAP.Address * MOM.Id_Entity PAP.Subject MOM.Id_Entity PAP.Group MOM.Id_Entity PAP.Legal_Entity MOM.Id_Entity PAP.Company * MOM.Id_Entity PAP.Email * MOM.Id_Entity PAP.Phone * MOM.Id_Entity PAP.Person * MOM.Id_Entity PAP.Url * MOM.Id_Entity PAP.Link MOM.Id_Entity PAP.Link1 MOM.Id_Entity PAP.Address_Position * MOM.Id_Entity PAP._Link_n_ MOM.Id_Entity PAP.Link2 MOM.Id_Entity PAP.Subject_has_Property MOM.Id_Entity PAP.Person_has_Account * MOM.Id_Entity EVT.Id_Entity MOM.Id_Entity EVT.Object MOM.Id_Entity EVT.Calendar * MOM.Id_Entity EVT.Link MOM.Id_Entity EVT.Link1 MOM.Id_Entity EVT.Event * MOM.Id_Entity EVT.Event_occurs * MOM.Id_Entity EVT._Recurrence_Mixin_ MOM.Id_Entity EVT.Recurrence_Spec * MOM.Id_Entity EVT.Recurrence_Rule * MOM.Id_Entity SWP.Id_Entity MOM.Id_Entity SWP.Object MOM.Id_Entity SWP.Object_PN MOM.Id_Entity SWP.Page * MOM.Id_Entity SWP.Page_Y * MOM.Id_Entity SWP.Link MOM.Id_Entity SWP.Link1 MOM.Id_Entity SWP.Clip_O * MOM.Id_Entity SWP.Clip_X * MOM.Id_Entity SWP.Gallery * MOM.Id_Entity SWP.Picture * MOM.Id_Entity SWP.Referral * MOM.Id_Entity SRM.Id_Entity MOM.Id_Entity SRM.Object MOM.Id_Entity SRM._Boat_Class_ MOM.Id_Entity SRM.Boat_Class * MOM.Id_Entity SRM.Handicap * MOM.Id_Entity SRM.Link MOM.Id_Entity SRM.Link1 MOM.Id_Entity SRM.Boat * MOM.Id_Entity SRM.Club * MOM.Id_Entity SRM.Regatta_Event * MOM.Id_Entity SRM.Page * MOM.Id_Entity SRM.Regatta MOM.Id_Entity SRM.Regatta_C * MOM.Id_Entity SRM.Regatta_H * MOM.Id_Entity SRM.Sailor * MOM.Id_Entity SRM._Link_n_ MOM.Id_Entity SRM.Link2 MOM.Id_Entity SRM.Boat_in_Regatta * MOM.Id_Entity SRM.Race_Result * MOM.Id_Entity SRM.Team * MOM.Id_Entity SRM.Crew_Member * MOM.Id_Entity SRM.Team_has_Boat_in_Regatta * MOM.Id_Entity PAP.Subject_has_Address MOM.Id_Entity PAP.Subject_has_Email MOM.Id_Entity PAP.Subject_has_Phone MOM.Id_Entity PAP.Subject_has_Url MOM.Id_Entity PAP.Company_has_Url * MOM.Id_Entity PAP.Person_has_Url * MOM.Id_Entity PAP.Company_has_Phone * MOM.Id_Entity PAP.Person_has_Phone * MOM.Id_Entity PAP.Company_has_Email * MOM.Id_Entity PAP.Person_has_Email * MOM.Id_Entity PAP.Company_has_Address * MOM.Id_Entity PAP.Person_has_Address * MOM.Id_Entity urls [PAP.Subject] PAP.Subject PAP.Subject PAP.Group PAP.Subject PAP.Legal_Entity PAP.Subject year [SRM.Regatta] SRM.Regatta SRM.Regatta SRM.Regatta_C * SRM.Regatta SRM.Regatta_H * SRM.Regatta >>> name, DT = ("left", "MOM.Link") >>> xs = a_map [name, DT] >>> for kind, ET in sorted (xs, key = lambda x : x [1].i_rank) : ### left ... print ("%%-30s %%-30s %%s" %% (ET.type_name, kind.attr.DET, kind.attr.DET_Base)) MOM.Link MOM.Link None MOM.Link1 MOM.Link1 MOM.Link MOM._Link_n_ MOM.Link None MOM.Link2 MOM.Link None PAP.Link MOM.Link None PAP.Link1 MOM.Link None PAP.Address_Position PAP.Address_Position MOM.Link1 PAP._Link_n_ MOM.Link None PAP.Link2 MOM.Link None PAP.Subject_has_Property PAP.Subject_has_Property MOM.Link PAP.Person_has_Account PAP.Person_has_Account MOM.Link EVT.Link MOM.Link None EVT.Link1 MOM.Link None EVT.Event EVT.Event MOM.Link1 EVT.Event_occurs EVT.Event_occurs MOM.Link1 EVT._Recurrence_Mixin_ MOM.Link None EVT.Recurrence_Spec EVT.Recurrence_Spec MOM.Link1 EVT.Recurrence_Rule EVT.Recurrence_Rule MOM.Link1 SWP.Link MOM.Link None SWP.Link1 MOM.Link None SWP.Clip_O SWP.Clip_O MOM.Link1 SWP.Picture SWP.Picture MOM.Link1 SRM.Link MOM.Link None SRM.Link1 MOM.Link None SRM.Boat SRM.Boat MOM.Link1 SRM.Regatta SRM.Regatta MOM.Link1 SRM.Regatta_C SRM.Regatta MOM.Link1 SRM.Regatta_H SRM.Regatta MOM.Link1 SRM.Sailor SRM.Sailor MOM.Link1 SRM._Link_n_ MOM.Link None SRM.Link2 MOM.Link None SRM.Boat_in_Regatta SRM.Boat_in_Regatta MOM.Link SRM.Race_Result SRM.Race_Result MOM.Link1 SRM.Team SRM.Team MOM.Link1 SRM.Crew_Member SRM.Crew_Member MOM.Link SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta MOM.Link PAP.Subject_has_Address PAP.Subject_has_Property MOM.Link PAP.Subject_has_Email PAP.Subject_has_Property MOM.Link PAP.Subject_has_Phone PAP.Subject_has_Property MOM.Link PAP.Subject_has_Url PAP.Subject_has_Property MOM.Link PAP.Company_has_Url PAP.Company_has_Url PAP.Subject_has_Property PAP.Person_has_Url PAP.Person_has_Url PAP.Subject_has_Property PAP.Company_has_Phone PAP.Company_has_Phone PAP.Subject_has_Property PAP.Person_has_Phone PAP.Person_has_Phone PAP.Subject_has_Property PAP.Company_has_Email PAP.Company_has_Email PAP.Subject_has_Property PAP.Person_has_Email PAP.Person_has_Email PAP.Subject_has_Property PAP.Company_has_Address PAP.Company_has_Address PAP.Subject_has_Property PAP.Person_has_Address PAP.Person_has_Address PAP.Subject_has_Property >>> name, DT = ("left", "MOM.Link") >>> xs = a_map [name, DT] >>> for ak, ET in sorted (xs, key = lambda x : x [1].i_rank) : ### left, no Link1 ... if len (ET.Roles) > 1 : ... db = ak.det_base.type_name if ak.det_base else None ... print ("%%-30s %%-30s %%s" %% (ET.type_name, ak.e_type.type_name, db)) MOM._Link_n_ MOM.Link None MOM.Link2 MOM.Link None PAP._Link_n_ MOM.Link None PAP.Link2 MOM.Link None PAP.Subject_has_Property PAP.Subject_has_Property MOM.Link PAP.Person_has_Account PAP.Person_has_Account MOM.Link SRM._Link_n_ MOM.Link None SRM.Link2 MOM.Link None SRM.Boat_in_Regatta SRM.Boat_in_Regatta MOM.Link SRM.Crew_Member SRM.Crew_Member MOM.Link SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta MOM.Link PAP.Subject_has_Address PAP.Subject_has_Address MOM.Link PAP.Subject_has_Email PAP.Subject_has_Email MOM.Link PAP.Subject_has_Phone PAP.Subject_has_Phone MOM.Link PAP.Subject_has_Url PAP.Subject_has_Url MOM.Link PAP.Company_has_Url PAP.Company_has_Url PAP.Subject_has_Property PAP.Person_has_Url PAP.Person_has_Url PAP.Subject_has_Property PAP.Company_has_Phone PAP.Company_has_Phone PAP.Subject_has_Property PAP.Person_has_Phone PAP.Person_has_Phone PAP.Subject_has_Property PAP.Company_has_Email PAP.Company_has_Email PAP.Subject_has_Property PAP.Person_has_Email PAP.Person_has_Email PAP.Subject_has_Property PAP.Company_has_Address PAP.Company_has_Address PAP.Subject_has_Property PAP.Person_has_Address PAP.Person_has_Address PAP.Subject_has_Property >>> name, DT = ("right", "MOM._Link_n_") >>> xs = a_map [name, DT] >>> for ak, ET in sorted (xs, key = lambda x : x [1].i_rank) : ### right ... db = ak.det_base.type_name if ak.det_base else None ... print ("%%-30s %%-30s %%s" %% (ET.type_name, ak.e_type.type_name, db)) MOM._Link_n_ MOM._Link_n_ None MOM.Link2 MOM._Link_n_ None PAP._Link_n_ MOM._Link_n_ None PAP.Link2 MOM._Link_n_ None PAP.Subject_has_Property PAP.Subject_has_Property MOM._Link_n_ PAP.Person_has_Account PAP.Person_has_Account MOM._Link_n_ SRM._Link_n_ MOM._Link_n_ None SRM.Link2 MOM._Link_n_ None SRM.Boat_in_Regatta SRM.Boat_in_Regatta MOM._Link_n_ SRM.Crew_Member SRM.Crew_Member MOM._Link_n_ SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta MOM._Link_n_ PAP.Subject_has_Address PAP.Subject_has_Address PAP.Subject_has_Property PAP.Subject_has_Email PAP.Subject_has_Email PAP.Subject_has_Property PAP.Subject_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Property PAP.Subject_has_Url PAP.Subject_has_Url PAP.Subject_has_Property PAP.Company_has_Url PAP.Company_has_Url PAP.Subject_has_Property PAP.Person_has_Url PAP.Person_has_Url PAP.Subject_has_Property PAP.Company_has_Phone PAP.Company_has_Phone PAP.Subject_has_Property PAP.Person_has_Phone PAP.Person_has_Phone PAP.Subject_has_Property PAP.Company_has_Email PAP.Company_has_Email PAP.Subject_has_Property PAP.Person_has_Email PAP.Person_has_Email PAP.Subject_has_Property PAP.Company_has_Address PAP.Company_has_Address PAP.Subject_has_Property PAP.Person_has_Address PAP.Person_has_Address PAP.Subject_has_Property >>> name, DT = ("left", "MOM.Link") >>> xs = a_map [name, DT] >>> for ak, ET in sorted (xs, key = lambda x : x [1].i_rank) : ### left ... flag = "*" if not ET.is_partial else "" ... rt = ak.E_Type.type_name if ak.E_Type else None ... print ("%%-30s%%-2s %%s" %% (ET.type_name, flag, rt)) MOM.Link None MOM.Link1 None MOM._Link_n_ None MOM.Link2 None PAP.Link None PAP.Link1 None PAP.Address_Position * PAP.Address PAP._Link_n_ None PAP.Link2 None PAP.Subject_has_Property PAP.Subject PAP.Person_has_Account * PAP.Person EVT.Link None EVT.Link1 None EVT.Event * MOM.Id_Entity EVT.Event_occurs * EVT.Event EVT._Recurrence_Mixin_ None EVT.Recurrence_Spec * EVT.Event EVT.Recurrence_Rule * EVT.Recurrence_Spec SWP.Link None SWP.Link1 None SWP.Clip_O * SWP.Object_PN SWP.Picture * SWP.Gallery SRM.Link None SRM.Link1 None SRM.Boat * SRM.Boat_Class SRM.Regatta SRM.Regatta_Event SRM.Regatta_C * SRM.Regatta_Event SRM.Regatta_H * SRM.Regatta_Event SRM.Sailor * PAP.Person SRM._Link_n_ None SRM.Link2 None SRM.Boat_in_Regatta * SRM.Boat SRM.Race_Result * SRM.Boat_in_Regatta SRM.Team * SRM.Regatta_C SRM.Crew_Member * SRM.Boat_in_Regatta SRM.Team_has_Boat_in_Regatta * SRM.Team PAP.Subject_has_Address PAP.Subject PAP.Subject_has_Email PAP.Subject PAP.Subject_has_Phone PAP.Subject PAP.Subject_has_Url PAP.Subject PAP.Company_has_Url * PAP.Company PAP.Person_has_Url * PAP.Person PAP.Company_has_Phone * PAP.Company PAP.Person_has_Phone * PAP.Person PAP.Company_has_Email * PAP.Company PAP.Person_has_Email * PAP.Person PAP.Company_has_Address * PAP.Company PAP.Person_has_Address * PAP.Person >>> name, DT = ("right", "MOM._Link_n_") >>> xs = a_map [name, DT] >>> for ak, ET in sorted (xs, key = lambda x : x [1].i_rank) : ### right ... flag = "*" if not ET.is_partial else "" ... rt = ak.E_Type.type_name if ak.E_Type else None ... print ("%%-30s%%-2s %%s" %% (ET.type_name, flag, rt)) MOM._Link_n_ None MOM.Link2 None PAP._Link_n_ None PAP.Link2 None PAP.Subject_has_Property PAP.Property PAP.Person_has_Account * Auth.Account SRM._Link_n_ None SRM.Link2 None SRM.Boat_in_Regatta * SRM.Regatta SRM.Crew_Member * SRM.Sailor SRM.Team_has_Boat_in_Regatta * SRM.Boat_in_Regatta PAP.Subject_has_Address PAP.Address PAP.Subject_has_Email PAP.Email PAP.Subject_has_Phone PAP.Phone PAP.Subject_has_Url PAP.Url PAP.Company_has_Url * PAP.Url PAP.Person_has_Url * PAP.Url PAP.Company_has_Phone * PAP.Phone PAP.Person_has_Phone * PAP.Phone PAP.Company_has_Email * PAP.Email PAP.Person_has_Email * PAP.Email PAP.Company_has_Address * PAP.Address PAP.Person_has_Address * PAP.Address >>> name, DT = ("left", "MOM.Link") >>> xs = a_map [name, DT] >>> for i, (kind, ET) in enumerate (sorted (xs, key = lambda x : x [1].i_rank)) : ### left ... if not i : ... print ("%%-30s %%-30s %%-30s %%s" %% ("E_Type", "det", "det_base", "attr.E_Type")) ... print ("=" * 110) ... if len (ET.Roles) > 1 : ... det = kind.attr.det.type_name ... deb = kind.attr.det_base ... db = deb.type_name if deb else "<Undef/value>" ... at = kind.E_Type.type_name if kind.E_Type else "<Undef/value>" ... print ("%%-30s %%-30s %%-30s %%s" %% (ET.type_name, det, db, at)) E_Type det det_base attr.E_Type ============================================================================================================== MOM._Link_n_ MOM.Link <Undef/value> <Undef/value> MOM.Link2 MOM.Link <Undef/value> <Undef/value> PAP._Link_n_ MOM.Link <Undef/value> <Undef/value> PAP.Link2 MOM.Link <Undef/value> <Undef/value> PAP.Subject_has_Property PAP.Subject_has_Property MOM.Link PAP.Subject PAP.Person_has_Account PAP.Person_has_Account MOM.Link PAP.Person SRM._Link_n_ MOM.Link <Undef/value> <Undef/value> SRM.Link2 MOM.Link <Undef/value> <Undef/value> SRM.Boat_in_Regatta SRM.Boat_in_Regatta MOM.Link SRM.Boat SRM.Crew_Member SRM.Crew_Member MOM.Link SRM.Boat_in_Regatta SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta MOM.Link SRM.Team PAP.Subject_has_Address PAP.Subject_has_Property MOM.Link PAP.Subject PAP.Subject_has_Email PAP.Subject_has_Property MOM.Link PAP.Subject PAP.Subject_has_Phone PAP.Subject_has_Property MOM.Link PAP.Subject PAP.Subject_has_Url PAP.Subject_has_Property MOM.Link PAP.Subject PAP.Company_has_Url PAP.Company_has_Url PAP.Subject_has_Property PAP.Company PAP.Person_has_Url PAP.Person_has_Url PAP.Subject_has_Property PAP.Person PAP.Company_has_Phone PAP.Company_has_Phone PAP.Subject_has_Property PAP.Company PAP.Person_has_Phone PAP.Person_has_Phone PAP.Subject_has_Property PAP.Person PAP.Company_has_Email PAP.Company_has_Email PAP.Subject_has_Property PAP.Company PAP.Person_has_Email PAP.Person_has_Email PAP.Subject_has_Property PAP.Person PAP.Company_has_Address PAP.Company_has_Address PAP.Subject_has_Property PAP.Company PAP.Person_has_Address PAP.Person_has_Address PAP.Subject_has_Property PAP.Person >>> name, DT = ("right", "MOM._Link_n_") >>> xs = a_map [name, DT] >>> for i, (kind, ET) in enumerate (sorted (xs, key = lambda x : x [1].i_rank)) : ### right ... if not i : ... print ("%%-30s %%-30s %%-30s %%s" %% ("E_Type", "det", "det_base", "attr.E_Type")) ... print ("=" * 110) ... det = kind.attr.det.type_name ... deb = kind.attr.det_base ... db = deb.type_name if deb else "<Undef/value>" ... at = kind.E_Type.type_name if kind.E_Type else "<Undef/value>" ... print ("%%-30s %%-30s %%-30s %%s" %% (ET.type_name, det, db, at)) E_Type det det_base attr.E_Type ============================================================================================================== MOM._Link_n_ MOM._Link_n_ <Undef/value> <Undef/value> MOM.Link2 MOM._Link_n_ <Undef/value> <Undef/value> PAP._Link_n_ MOM._Link_n_ <Undef/value> <Undef/value> PAP.Link2 MOM._Link_n_ <Undef/value> <Undef/value> PAP.Subject_has_Property PAP.Subject_has_Property MOM._Link_n_ PAP.Property PAP.Person_has_Account PAP.Person_has_Account MOM._Link_n_ Auth.Account SRM._Link_n_ MOM._Link_n_ <Undef/value> <Undef/value> SRM.Link2 MOM._Link_n_ <Undef/value> <Undef/value> SRM.Boat_in_Regatta SRM.Boat_in_Regatta MOM._Link_n_ SRM.Regatta SRM.Crew_Member SRM.Crew_Member MOM._Link_n_ SRM.Sailor SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta MOM._Link_n_ SRM.Boat_in_Regatta PAP.Subject_has_Address PAP.Subject_has_Address PAP.Subject_has_Property PAP.Address PAP.Subject_has_Email PAP.Subject_has_Email PAP.Subject_has_Property PAP.Email PAP.Subject_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Property PAP.Phone PAP.Subject_has_Url PAP.Subject_has_Url PAP.Subject_has_Property PAP.Url PAP.Company_has_Url PAP.Subject_has_Url PAP.Subject_has_Property PAP.Url PAP.Person_has_Url PAP.Subject_has_Url PAP.Subject_has_Property PAP.Url PAP.Company_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Property PAP.Phone PAP.Person_has_Phone PAP.Subject_has_Phone PAP.Subject_has_Property PAP.Phone PAP.Company_has_Email PAP.Subject_has_Email PAP.Subject_has_Property PAP.Email PAP.Person_has_Email PAP.Subject_has_Email PAP.Subject_has_Property PAP.Email PAP.Company_has_Address PAP.Subject_has_Address PAP.Subject_has_Property PAP.Address PAP.Person_has_Address PAP.Subject_has_Address PAP.Subject_has_Property PAP.Address >>> name, DT = ("left", "MOM.Link") >>> xs = a_map [name, DT] >>> for i, (kind, ET) in enumerate (sorted (xs, key = lambda x : x [1].i_rank)) : ### left ... if not i : ... print ("%%-30s %%-30s %%s" %% ("E_Type", "det", "det_kind")) ... print ("=" * 80) ... if len (ET.Roles) > 1 : ... det = kind.attr.det.type_name ... dek = kind.attr.det_kind ... print ("%%-30s %%-30s %%s" %% (ET.type_name, det, dek)) E_Type det det_kind ================================================================================ MOM._Link_n_ MOM.Link Left `left` MOM.Link2 MOM.Link Left `left` PAP._Link_n_ MOM.Link Left `left` PAP.Link2 MOM.Link Left `left` PAP.Subject_has_Property PAP.Subject_has_Property Subject `left` PAP.Person_has_Account PAP.Person_has_Account Person `left` SRM._Link_n_ MOM.Link Left `left` SRM.Link2 MOM.Link Left `left` SRM.Boat_in_Regatta SRM.Boat_in_Regatta Boat `left` SRM.Crew_Member SRM.Crew_Member Boat_in_Regatta `left` SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta Team `left` PAP.Subject_has_Address PAP.Subject_has_Property Subject `left` PAP.Subject_has_Email PAP.Subject_has_Property Subject `left` PAP.Subject_has_Phone PAP.Subject_has_Property Subject `left` PAP.Subject_has_Url PAP.Subject_has_Property Subject `left` PAP.Company_has_Url PAP.Company_has_Url Company `left` PAP.Person_has_Url PAP.Person_has_Url Person `left` PAP.Company_has_Phone PAP.Company_has_Phone Company `left` PAP.Person_has_Phone PAP.Person_has_Phone Person `left` PAP.Company_has_Email PAP.Company_has_Email Company `left` PAP.Person_has_Email PAP.Person_has_Email Person `left` PAP.Company_has_Address PAP.Company_has_Address Company `left` PAP.Person_has_Address PAP.Person_has_Address Person `left` >>> name, DT = ("right", "MOM._Link_n_") >>> xs = a_map [name, DT] >>> for i, (kind, ET) in enumerate (sorted (xs, key = lambda x : x [1].i_rank)) : ### right ... if not i : ... print ("%%-30s %%-30s %%s" %% ("E_Type", "det", "det_kind")) ... print ("=" * 80) ... det = kind.attr.det.type_name ... dek = kind.attr.det_kind ... print ("%%-30s %%-30s %%s" %% (ET.type_name, det, dek)) E_Type det det_kind ================================================================================ MOM._Link_n_ MOM._Link_n_ Right `right` MOM.Link2 MOM._Link_n_ Right `right` PAP._Link_n_ MOM._Link_n_ Right `right` PAP.Link2 MOM._Link_n_ Right `right` PAP.Subject_has_Property PAP.Subject_has_Property Property `right` PAP.Person_has_Account PAP.Person_has_Account Account `right` SRM._Link_n_ MOM._Link_n_ Right `right` SRM.Link2 MOM._Link_n_ Right `right` SRM.Boat_in_Regatta SRM.Boat_in_Regatta Regatta `right` SRM.Crew_Member SRM.Crew_Member Sailor `right` SRM.Team_has_Boat_in_Regatta SRM.Team_has_Boat_in_Regatta Boat_in_Regatta `right` PAP.Subject_has_Address PAP.Subject_has_Address Address `right` PAP.Subject_has_Email PAP.Subject_has_Email Email `right` PAP.Subject_has_Phone PAP.Subject_has_Phone Phone `right` PAP.Subject_has_Url PAP.Subject_has_Url Url `right` PAP.Company_has_Url PAP.Subject_has_Url Url `right` PAP.Person_has_Url PAP.Subject_has_Url Url `right` PAP.Company_has_Phone PAP.Subject_has_Phone Phone `right` PAP.Person_has_Phone PAP.Subject_has_Phone Phone `right` PAP.Company_has_Email PAP.Subject_has_Email Email `right` PAP.Person_has_Email PAP.Subject_has_Email Email `right` PAP.Company_has_Address PAP.Subject_has_Address Address `right` PAP.Person_has_Address PAP.Subject_has_Address Address `right` """ _test_pickled_types = """ >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope MOMT__... >>> MOM = scope.MOM >>> nl = pyk.unichr (10) >>> a_map = _attr_map (MOM.Id_Entity) >>> p_types = set () >>> for (name, DT), xs in sorted (pyk.iteritems (a_map)) : ... kind, ET = xs [0] ... if kind.save_to_db and not kind.is_composite : ... pt = kind.Pickled_Type ... if pt : ... at = kind.attr ... pn = pt.__name__ ... pn = portable_repr.Type_Name_Map.get (pn, pn) ... p_types.add (pn) ... print ("%%-20s %%-15s %%-15s %%6s %%5d %%5d" %% (kind.name, at.typ, pn, pt.max_length, pt.max_ui_length, pt.length)) abstract Text text-string 0 120 0 address Email text-string 80 81 80 beam Float float None 22 22 boat_class Entity _Boat_Class_ None 20 20 calendar Entity Calendar None 20 20 cc Numeric_String text-string 3 4 3 city String text-string 30 31 30 club Entity Club None 20 20 club Entity Club None 20 20 club Entity Club None 20 20 contents Text text-string 0 120 0 contents Text text-string 0 120 0 count Int int None 1 1 country String text-string 20 21 20 date Date date None 12 12 date_exceptions Date_List byte-string 0 20 0 dates Date_List byte-string 0 20 0 desc String text-string 80 81 80 desc String text-string 20 21 20 desc String text-string 20 21 20 desc String text-string 20 21 20 desc String text-string 30 31 30 desc String text-string 160 161 160 desc String text-string 160 161 160 detail String text-string 160 161 160 directory Directory byte-string 0 120 0 discarded Boolean bool None 5 5 discards Int int None 3 3 download_name String text-string 64 65 64 easter_offset Int_List byte-string 0 20 0 event Entity Regatta_Event None 20 20 extension Numeric_String text-string 5 6 5 finish Date date None 12 12 first_name String text-string 32 33 32 format Format text-string 8 9 8 head_line String text-string 256 257 256 hidden Boolean bool None 5 5 is_cancelled Boolean bool None 5 5 is_cancelled Boolean bool None 5 5 is_exception Boolean bool None 5 5 is_team_race Boolean bool None 5 5 key Int int None 20 20 kind String text-string 32 33 32 last_cid Int int None 20 20 last_name String text-string 48 49 48 leader Entity Person None 20 20 link_to Url text-string 160 161 160 loa Float float None 22 22 long_name String text-string 64 65 64 max_crew Int int None 2 2 middle_name String text-string 32 33 32 mna_number Int int None 7 7 month Int_List byte-string 0 20 0 month_day Int_List byte-string 0 20 0 name Name text-string 32 33 32 name String text-string 64 65 64 name String text-string 48 49 48 name String text-string 8 9 8 name String text-string 64 65 64 name String text-string 64 65 64 name String text-string 48 49 48 name String text-string 100 101 100 nation Nation text-string 3 20 3 nation Nation text-string 3 20 3 ndc Numeric_String text-string 5 6 5 number Int int None 20 20 parent_url Url text-string 160 161 160 period Int int None 1 1 perma_name String text-string 64 65 64 perma_name String text-string 64 65 64 perma_name Date-Slug text-string 80 81 80 pid Surrogate int None 20 20 place Int int None 1 1 place Int int None 1 1 points Int int None 1 1 points Int int None 1 1 prio Int int None 20 20 prio Int int None 20 20 race Int int None 3 3 races Int int None 3 3 rank Int int None 20 20 region String text-string 20 21 20 registered_in String text-string 64 65 64 registration_date Date date None 12 12 registration_date Date date None 12 12 restrict_pos Int_List byte-string 0 20 0 role String text-string 32 33 32 sail_area Float float None 22 22 sail_number Int int None 7 7 sail_number_x String text-string 8 9 8 sex Sex text-string 1 2 1 short_name String text-string 12 13 12 short_title String text-string 30 31 30 short_title String text-string 30 31 30 skipper Entity Sailor None 20 20 sn Numeric_String text-string 14 15 14 start Date date None 12 12 starters_rl Int int None 20 20 status String text-string 8 9 8 street String text-string 60 61 60 target_url Url text-string 160 161 160 text Text text-string 0 120 0 title String text-string 20 21 20 title String text-string 120 121 120 type_name String text-string 0 120 0 unit Unit int None 20 20 value Url text-string 160 161 160 week Int_List byte-string 0 20 0 week_day Weekday_RR_List byte-string 0 20 0 yardstick Int int None 3 3 year Int int None 5 5 year_day Int_List byte-string 0 20 0 zip String text-string 6 7 6 >>> for p in sorted (p_types) : ... print (p) Calendar Club Person Regatta_Event Sailor _Boat_Class_ bool byte-string date float int text-string >>> for (name, DT), xs in sorted (pyk.iteritems (a_map)) : ... kind, ET = xs [0] ... if kind.save_to_db and not kind.is_composite : ... pt = kind.Pickled_Type_Raw ... if pt : ... pn = pt.__name__ ... pn = portable_repr.Type_Name_Map.get (pn, pn) ... print ("%%-20s %%-15s %%-15s %%6s %%5d %%5d" %% (kind.name, kind.typ, pn, pt.max_length, pt.max_ui_length, pt.length)) address Email text-string 80 81 80 city String text-string 30 31 30 country String text-string 20 21 20 first_name String text-string 32 33 32 last_name String text-string 48 49 48 middle_name String text-string 32 33 32 mna_number Int text-string None 7 7 name String text-string 64 65 64 name String text-string 8 9 8 name String text-string 64 65 64 name String text-string 64 65 64 name String text-string 48 49 48 region String text-string 20 21 20 registered_in String text-string 64 65 64 sail_number Int text-string None 7 7 sail_number_x String text-string 8 9 8 short_name String text-string 12 13 12 street String text-string 60 61 60 title String text-string 20 21 20 zip String text-string 6 7 6 """ _test_types = """ >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope MOMT__... >>> MOM = scope.MOM >>> for T, l in children_trans_iter (MOM.Id_Entity) : ... ET = T.E_Type ... print ("%%s%%s" %% (" " * l, ET.type_name)) MOM.Id_Entity MOM.Link MOM.Link1 Auth.Link1 Auth._Account_Action_ Auth.Account_Activation Auth.Account_Password_Change_Required Auth._Account_Token_Action_ Auth.Account_EMail_Verification Auth.Account_Password_Reset PAP.Link1 PAP.Address_Position EVT.Link1 EVT.Event EVT.Event_occurs EVT._Recurrence_Mixin_ EVT.Recurrence_Spec EVT.Recurrence_Rule SWP.Link1 SWP.Clip_O SWP.Picture SRM.Link1 SRM.Boat SRM.Regatta SRM.Regatta_C SRM.Regatta_H SRM.Sailor SRM.Race_Result SRM.Team MOM._Link_n_ MOM.Link2 Auth.Link2 Auth.Account_in_Group PAP.Link2 PAP.Subject_has_Property PAP.Subject_has_Address PAP.Company_has_Address PAP.Person_has_Address PAP.Subject_has_Email PAP.Company_has_Email PAP.Person_has_Email PAP.Subject_has_Phone PAP.Company_has_Phone PAP.Person_has_Phone PAP.Subject_has_Url PAP.Company_has_Url PAP.Person_has_Url PAP.Person_has_Account SRM.Link2 SRM.Boat_in_Regatta SRM.Crew_Member SRM.Team_has_Boat_in_Regatta MOM.Link3 Auth._Link_n_ PAP._Link_n_ SRM._Link_n_ Auth.Link PAP.Link EVT.Link SWP.Link SRM.Link MOM.Object Auth.Object Auth._Account_ Auth.Account_Anonymous Auth.Account Auth.Certificate Auth.Group PAP.Object PAP.Property PAP.Address PAP.Email PAP.Phone PAP.Url PAP.Subject PAP.Group PAP.Legal_Entity PAP.Company PAP.Person EVT.Object EVT.Calendar SWP.Object SWP.Object_PN SWP.Page SWP.Page_Y SWP.Clip_X SRM.Page SWP.Gallery SWP.Referral SRM.Object SRM._Boat_Class_ SRM.Boat_Class SRM.Handicap SRM.Club SRM.Regatta_Event Auth.Id_Entity PAP.Id_Entity EVT.Id_Entity SWP.Id_Entity SRM.Id_Entity """ __test__ = Scaffold.create_test_dict \ ( dict ( test_DET = _test_DET , test_pickled_types = _test_pickled_types , test_types = _test_types ) ) ### __END__ GTW.__test__.Attr
PypiClean
/EVE-SRP-0.12.11.tar.gz/EVE-SRP-0.12.11/src/evesrp/auth/models.py
from __future__ import absolute_import from base64 import urlsafe_b64encode from itertools import groupby import os import pickle from flask import url_for import six from six.moves import filter from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm.collections import attribute_mapped_collection, collection from .. import db from . import AuthMethod, PermissionType from ..util import AutoID, Timestamped, AutoName, unistr, ensure_unicode from ..models import Action, Modifier, Request if six.PY3: unicode = str users_groups = db.Table('users_groups', db.Model.metadata, db.Column('user_id', db.Integer, db.ForeignKey('user.id')), db.Column('group_id', db.Integer, db.ForeignKey('group.id'))) @unistr class Entity(db.Model, AutoID, AutoName): """Private class for shared functionality between :py:class:`User` and :py:class:`Group`. This class defines a number of helper methods used indirectly by User and Group subclasses such as automatically defining the table name and mapper arguments. This class should `not` be inherited from directly, instead either :py:class:`User` or :py:class:`Group` should be used. """ #: The name of the entity. Usually a nickname. name = db.Column(db.String(100, convert_unicode=True), nullable=False) #: Polymorphic discriminator column. type_ = db.Column(db.String(50, convert_unicode=True)) #: :py:class:`Permission`\s associated specifically with this entity. entity_permissions = db.relationship('Permission', back_populates='entity', collection_class=set, cascade='save-update,merge,refresh-expire,expunge', lazy='dynamic') #: The name of the :py:class:`AuthMethod` for this entity. authmethod = db.Column(db.String(50, convert_unicode=True), nullable=False) @declared_attr def __mapper_args__(cls): """SQLAlchemy late-binding attribute to set mapper arguments. Obviates subclasses from having to specify polymorphic identities. """ cls_name = unicode(cls.__name__) args = {'polymorphic_identity': cls_name} if cls_name == u'Entity': args['polymorphic_on'] = cls.type_ return args def __init__(self, name, authmethod, **kwargs): self.name = ensure_unicode(name) self.authmethod = ensure_unicode(authmethod) super(Entity, self).__init__(**kwargs) def __repr__(self): return "{x.__class__.__name__}('{x.name}')".format(x=self) def __unicode__(self): return u"{x.name}".format(x=self) def has_permission(self, permissions, division_or_request=None): """Returns if this entity has been granted a permission in a division. If ``division_or_request`` is ``None``, this method checks if this group has the given permission in `any` division. :param permissions: The series of permissions to check :type permissions: iterable :param division_or_request: The division to check. May also be ``None`` or an SRP request. :type division: :py:class:`Division` or :py:class:`~.models.Request` :rtype: bool """ if permissions in PermissionType.all: permissions = (permissions,) # admin permission includes the reviewer and payer privileges if PermissionType.admin not in permissions and \ PermissionType.elevated.issuperset(permissions): if self.has_permission(PermissionType.admin, division_or_request): return True perms = self.permissions.filter(Permission.permission.in_(permissions)) if division_or_request is not None: # requests have a 'division' attribute, so we check for that if hasattr(division_or_request, 'division'): division = division_or_request.division else: division = division_or_request perms = perms.filter_by(division=division) return db.session.query(perms.exists()).all()[0][0] def _json(self, extended=False): try: parent = super(Entity, self)._json(extended) except AttributeError: parent = {} parent[u'name'] = self.name parent[u'source'] = self.authmethod return parent class APIKey(db.Model, AutoID, AutoName, Timestamped): """Represents an API key for use with the :ref:`external-api`.""" user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) #: The :py:class:`User` this key belongs to. user = db.relationship('User', back_populates='api_keys', cascade='save-update,merge,refresh-expire,expunge') #: The raw key data. key = db.Column(db.LargeBinary(32), nullable=False) def __init__(self, user): self.user = user self.key = os.urandom(32) @property def hex_key(self): """The key data in a modified base-64 format safe for use in URLs.""" return urlsafe_b64encode(self.key).decode('utf-8').replace(u'=', u',') def _json(self, extended=False): try: parent = super(APIKey, self)._json(extended) except AttributeError: parent = {} parent[u'key'] = self.hex_key parent[u'timestamp'] = self.timestamp return parent class User(Entity): """User base class. Represents users who can submit, review and/or pay out requests. It also supplies a number of convenience methods for subclasses. """ id = db.Column(db.Integer, db.ForeignKey('entity.id'), primary_key=True) #: If the user is an administrator. This allows the user to create and #: administer divisions. admin = db.Column(db.Boolean(name='admin_bool'), nullable=False, default=False) #: :py:class:`~.Request`\s this user has submitted. requests = db.relationship(Request, back_populates='submitter') #: :py:class:`~.Action`\s this user has performed on requests. actions = db.relationship(Action, back_populates='user') #: :py:class:`~.Pilot`\s associated with this user. pilots = db.relationship('Pilot', back_populates='user', collection_class=set) #: :py:class:`Group`\s this user is a member of groups = db.relationship('Group', secondary=users_groups, back_populates='users', collection_class=set) notes = db.relationship('Note', back_populates='user', order_by='desc(Note.timestamp)', foreign_keys='Note.user_id') notes_made = db.relationship('Note', back_populates='noter', order_by='desc(Note.timestamp)', foreign_keys='Note.noter_id') api_keys = db.relationship(APIKey, back_populates='user') @hybrid_property def permissions(self): """All :py:class:`Permission` objects associated with this user.""" groups = db.session.query(users_groups.c.group_id.label('group_id'))\ .filter(users_groups.c.user_id==self.id).subquery() group_perms = db.session.query(Permission)\ .join(groups, groups.c.group_id==Permission.entity_id) user_perms = db.session.query(Permission)\ .join(User)\ .filter(User.id==self.id) perms = user_perms.union(group_perms) return perms @permissions.expression def permissions(cls): groups = db.select([users_groups.c.group_id])\ .where(users_groups.c.user_id==cls.id).alias() group_permissions = db.select([Permission])\ .where(Permission.entity_id.in_(groups)).alias() user_permissions = db.select([Permission])\ .where(Permission.entity_id==cls.id) return user_permissions.union(group_permissions) @property def is_authenticated(self): """Part of the interface for Flask-Login.""" return True @property def is_active(self): """Part of the interface for Flask-Login.""" return True @property def is_anonymous(self): """Part of the interface for Flask-Login.""" return False def get_id(self): """Part of the interface for Flask-Login.""" return str(self.id) def submit_divisions(self): """Get a list of the divisions this user is able to submit requests to. :returns: A list of tuples. The tuples are in the form (division.id, division.name) :rtype: list """ submit_perms = self.permissions\ .filter_by(permission=PermissionType.submit)\ .subquery() divisions = db.session.query(Division).join(submit_perms)\ .order_by(Division.name) # Remove duplicates and sort divisions by name choices = [] for name, group in groupby(divisions, lambda d: d.name): choices.append((six.next(group).id, name)) return choices def _json(self, extended=False): try: parent = super(User, self)._json(extended) except AttributeError: parent = {} parent[u'href'] = url_for('api.user_detail', user_id=self.id) return parent class Note(db.Model, AutoID, Timestamped, AutoName): """A note about a particular :py:class:`User`.""" user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) #: The :py:class:`User` this note refers to. user = db.relationship(User, back_populates='notes', cascade='save-update,merge,refresh-expire,expunge', foreign_keys=[user_id]) #: The actual contents of this note. content = db.Column(db.Text(convert_unicode=True), nullable=False) noter_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) #: The author of this note. noter = db.relationship(User, back_populates='notes_made', cascade='save-update,merge,refresh-expire,expunge', foreign_keys=[noter_id]) def __init__(self, user, noter, note): self.user = user self.noter = noter self.content = ensure_unicode(note) @unistr class Pilot(db.Model, AutoID, AutoName): """Represents an in-game character.""" # Character names in Eve are resticted to ASCII, but use unicode for # consistency with the rest of the database columns (and if they ever # decide to lift this restriction). #: The name of the character name = db.Column(db.String(150, convert_unicode=True), nullable=False) #: The id of the User this character belongs to. user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True) #: The User this character belongs to. user = db.relationship(User, back_populates='pilots') #: The Requests filed with lossmails from this character. requests = db.relationship(Request, back_populates='pilot', collection_class=list, order_by=Request.timestamp.desc()) def __init__(self, user, name, id_): """Create a new Pilot instance. :param user: The user this character belpongs to. :type user: :py:class:`~.User` :param str name: The name of this character. :param int id_: The CCP-given characterID number. """ self.user = user self.name = ensure_unicode(name) self.id = id_ def __repr__(self): return "{x.__class__.__name__}({x.user}, '{x.name}', {x.id})".format( x=self) def __unicode__(self): return self.name def _json(self, extended=False): try: parent = super(Pilot, self)._json(extended) except AttributeError: parent = {} parent[u'name'] = self.name if extended: parent[u'user'] = self.user parent[u'requests'] = self.requests return parent class Group(Entity): """Base class for a group of users. Represents a group of users. Usable for granting permissions to submit, evaluate and pay. """ id = db.Column(db.Integer, db.ForeignKey('entity.id'), primary_key=True) #: :py:class:`User` s that belong to this group. users = db.relationship(User, secondary=users_groups, back_populates='groups', collection_class=set) #: Synonym for :py:attr:`entity_permissions` permissions = db.synonym('entity_permissions') def _json(self, extended=False): try: parent = super(Group, self)._json(extended) except AttributeError: parent = {} parent[u'href'] = url_for('api.group_detail', group_id=self.id) if extended: parent[u'count'] = len(self.users) return parent class Permission(db.Model, AutoID, AutoName): __table_args__ = ( db.UniqueConstraint('division_id', 'entity_id', 'permission', name='division_entity_permission'), ) division_id = db.Column(db.Integer, db.ForeignKey('division.id'), nullable=False) #: The division this permission is granting access to division = db.relationship('Division', back_populates='division_permissions', cascade='save-update,merge,refresh-expire,expunge') entity_id = db.Column(db.Integer, db.ForeignKey('entity.id'), nullable=False) #: The :py:class:`Entity` being granted access entity = db.relationship(Entity, back_populates='entity_permissions', cascade='save-update,merge,refresh-expire,expunge') #: The permission being granted. permission = db.Column(PermissionType.db_type(), nullable=False) def __init__(self, division, permission, entity): """Create a Permission object granting an entity access to a division. """ self.division = division self.entity = entity self.permission = permission def __repr__(self): return ("{x.__class__.__name__}('{x.permission}', {x.entity}, " "{x.division})").format(x=self) class TransformerRef(db.Model, AutoID, AutoName): """Stores associations between :py:class:`~.Transformer`\s and :py:class:`.Division`\s. """ __table_args__ = ( db.UniqueConstraint('division_id', 'attribute_name', name='division_transformer'), ) #: The attribute this transformer is applied to. attribute_name = db.Column(db.String(50), nullable=False) #: The transformer instance. # Force pickle protocol to version 2, to support the same DB across Py3 and # Py2. transformer = db.Column(db.PickleType(protocol=2), nullable=True) division_id = db.Column(db.Integer, db.ForeignKey('division.id'), nullable=False) #: The division the transformer is associated with division = db.relationship('Division', back_populates='division_transformers', cascade='save-update,merge,refresh-expire,expunge') @db.validates('transformer') def prune_null_transformers(self, attr, transformer): """Removes :py:class:`TransformerRef`\s when :py:attr:`.transformer` is removed. """ if transformer is None: self.division = None return transformer @unistr class Division(db.Model, AutoID, AutoName): """A reimbursement division. A division has (possibly non-intersecting) groups of people that can submit requests, review requests, and pay out requests. """ #: The name of this division. name = db.Column(db.String(128, convert_unicode=True), nullable=False) #: All :py:class:`Permission`\s associated with this division. division_permissions = db.relationship(Permission, back_populates='division', cascade='all,delete-orphan', collection_class=set) #: :py:class:`Request` s filed under this division. requests = db.relationship(Request, back_populates='division', cascade='all,delete-orphan') division_transformers = db.relationship(TransformerRef, collection_class=attribute_mapped_collection('attribute_name'), back_populates='division', cascade='all,delete-orphan') #: A mapping of attribute names to :py:class:`~.transformer.Transformer` #: instances. transformers = association_proxy( 'division_transformers', 'transformer', creator=lambda attr, trans: TransformerRef(attribute_name=attr, transformer=trans)) @property def permissions(self): """The permissions objects for this division, mapped via their permission names. """ class _PermProxy(object): def __init__(self, perms): self.perms = perms def __getitem__(self, key): return set(filter(lambda x: x.permission == key, self.perms)) return _PermProxy(self.division_permissions) def __init__(self, name): self.name = ensure_unicode(name) def __repr__(self): return "{x.__class__.__name__}('{x.name}')".format(x=self) def __unicode__(self): return u"{}".format(self.name) def _json(self, extended=False): try: parent = super(Division, self)._json(extended) except AttributeError: parent = {} parent[u'href'] = url_for('divisions.get_division_details', division_id=self.id) parent[u'name'] = self.name if extended: entities = {} for perm in PermissionType.all: members = [] for member in [p.entity for p in self.permissions[perm]]: members.append(member._json(extended)) entities[perm.name] = members parent[u'entities'] = entities return parent
PypiClean
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/node-fetch/README.md
node-fetch ========== [![npm version][npm-image]][npm-url] [![build status][travis-image]][travis-url] [![coverage status][codecov-image]][codecov-url] [![install size][install-size-image]][install-size-url] [![Discord][discord-image]][discord-url] A light-weight module that brings `window.fetch` to Node.js (We are looking for [v2 maintainers and collaborators](https://github.com/bitinn/node-fetch/issues/567)) [![Backers][opencollective-image]][opencollective-url] <!-- TOC --> - [Motivation](#motivation) - [Features](#features) - [Difference from client-side fetch](#difference-from-client-side-fetch) - [Installation](#installation) - [Loading and configuring the module](#loading-and-configuring-the-module) - [Common Usage](#common-usage) - [Plain text or HTML](#plain-text-or-html) - [JSON](#json) - [Simple Post](#simple-post) - [Post with JSON](#post-with-json) - [Post with form parameters](#post-with-form-parameters) - [Handling exceptions](#handling-exceptions) - [Handling client and server errors](#handling-client-and-server-errors) - [Advanced Usage](#advanced-usage) - [Streams](#streams) - [Buffer](#buffer) - [Accessing Headers and other Meta data](#accessing-headers-and-other-meta-data) - [Extract Set-Cookie Header](#extract-set-cookie-header) - [Post data using a file stream](#post-data-using-a-file-stream) - [Post with form-data (detect multipart)](#post-with-form-data-detect-multipart) - [Request cancellation with AbortSignal](#request-cancellation-with-abortsignal) - [API](#api) - [fetch(url[, options])](#fetchurl-options) - [Options](#options) - [Class: Request](#class-request) - [Class: Response](#class-response) - [Class: Headers](#class-headers) - [Interface: Body](#interface-body) - [Class: FetchError](#class-fetcherror) - [License](#license) - [Acknowledgement](#acknowledgement) <!-- /TOC --> ## Motivation Instead of implementing `XMLHttpRequest` in Node.js to run browser-specific [Fetch polyfill](https://github.com/github/fetch), why not go from native `http` to `fetch` API directly? Hence, `node-fetch`, minimal code for a `window.fetch` compatible API on Node.js runtime. See Matt Andrews' [isomorphic-fetch](https://github.com/matthew-andrews/isomorphic-fetch) or Leonardo Quixada's [cross-fetch](https://github.com/lquixada/cross-fetch) for isomorphic usage (exports `node-fetch` for server-side, `whatwg-fetch` for client-side). ## Features - Stay consistent with `window.fetch` API. - Make conscious trade-off when following [WHATWG fetch spec][whatwg-fetch] and [stream spec](https://streams.spec.whatwg.org/) implementation details, document known differences. - Use native promise but allow substituting it with [insert your favorite promise library]. - Use native Node streams for body on both request and response. - Decode content encoding (gzip/deflate) properly and convert string output (such as `res.text()` and `res.json()`) to UTF-8 automatically. - Useful extensions such as timeout, redirect limit, response size limit, [explicit errors](ERROR-HANDLING.md) for troubleshooting. ## Difference from client-side fetch - See [Known Differences](LIMITS.md) for details. - If you happen to use a missing feature that `window.fetch` offers, feel free to open an issue. - Pull requests are welcomed too! ## Installation Current stable release (`2.x`) ```sh $ npm install node-fetch ``` ## Loading and configuring the module We suggest you load the module via `require` until the stabilization of ES modules in node: ```js const fetch = require('node-fetch'); ``` If you are using a Promise library other than native, set it through `fetch.Promise`: ```js const Bluebird = require('bluebird'); fetch.Promise = Bluebird; ``` ## Common Usage NOTE: The documentation below is up-to-date with `2.x` releases; see the [`1.x` readme](https://github.com/bitinn/node-fetch/blob/1.x/README.md), [changelog](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) and [2.x upgrade guide](UPGRADE-GUIDE.md) for the differences. #### Plain text or HTML ```js fetch('https://github.com/') .then(res => res.text()) .then(body => console.log(body)); ``` #### JSON ```js fetch('https://api.github.com/users/github') .then(res => res.json()) .then(json => console.log(json)); ``` #### Simple Post ```js fetch('https://httpbin.org/post', { method: 'POST', body: 'a=1' }) .then(res => res.json()) // expecting a json response .then(json => console.log(json)); ``` #### Post with JSON ```js const body = { a: 1 }; fetch('https://httpbin.org/post', { method: 'post', body: JSON.stringify(body), headers: { 'Content-Type': 'application/json' }, }) .then(res => res.json()) .then(json => console.log(json)); ``` #### Post with form parameters `URLSearchParams` is available in Node.js as of v7.5.0. See [official documentation](https://nodejs.org/api/url.html#url_class_urlsearchparams) for more usage methods. NOTE: The `Content-Type` header is only set automatically to `x-www-form-urlencoded` when an instance of `URLSearchParams` is given as such: ```js const { URLSearchParams } = require('url'); const params = new URLSearchParams(); params.append('a', 1); fetch('https://httpbin.org/post', { method: 'POST', body: params }) .then(res => res.json()) .then(json => console.log(json)); ``` #### Handling exceptions NOTE: 3xx-5xx responses are *NOT* exceptions and should be handled in `then()`; see the next section for more information. Adding a catch to the fetch promise chain will catch *all* exceptions, such as errors originating from node core libraries, network errors and operational errors, which are instances of FetchError. See the [error handling document](ERROR-HANDLING.md) for more details. ```js fetch('https://domain.invalid/') .catch(err => console.error(err)); ``` #### Handling client and server errors It is common to create a helper function to check that the response contains no client (4xx) or server (5xx) error responses: ```js function checkStatus(res) { if (res.ok) { // res.status >= 200 && res.status < 300 return res; } else { throw MyCustomError(res.statusText); } } fetch('https://httpbin.org/status/400') .then(checkStatus) .then(res => console.log('will not get here...')) ``` ## Advanced Usage #### Streams The "Node.js way" is to use streams when possible: ```js fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') .then(res => { const dest = fs.createWriteStream('./octocat.png'); res.body.pipe(dest); }); ``` In Node.js 14 you can also use async iterators to read `body`; however, be careful to catch errors -- the longer a response runs, the more likely it is to encounter an error. ```js const fetch = require('node-fetch'); const response = await fetch('https://httpbin.org/stream/3'); try { for await (const chunk of response.body) { console.dir(JSON.parse(chunk.toString())); } } catch (err) { console.error(err.stack); } ``` In Node.js 12 you can also use async iterators to read `body`; however, async iterators with streams did not mature until Node.js 14, so you need to do some extra work to ensure you handle errors directly from the stream and wait on it response to fully close. ```js const fetch = require('node-fetch'); const read = async body => { let error; body.on('error', err => { error = err; }); for await (const chunk of body) { console.dir(JSON.parse(chunk.toString())); } return new Promise((resolve, reject) => { body.on('close', () => { error ? reject(error) : resolve(); }); }); }; try { const response = await fetch('https://httpbin.org/stream/3'); await read(response.body); } catch (err) { console.error(err.stack); } ``` #### Buffer If you prefer to cache binary data in full, use buffer(). (NOTE: `buffer()` is a `node-fetch`-only API) ```js const fileType = require('file-type'); fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') .then(res => res.buffer()) .then(buffer => fileType(buffer)) .then(type => { /* ... */ }); ``` #### Accessing Headers and other Meta data ```js fetch('https://github.com/') .then(res => { console.log(res.ok); console.log(res.status); console.log(res.statusText); console.log(res.headers.raw()); console.log(res.headers.get('content-type')); }); ``` #### Extract Set-Cookie Header Unlike browsers, you can access raw `Set-Cookie` headers manually using `Headers.raw()`. This is a `node-fetch` only API. ```js fetch(url).then(res => { // returns an array of values, instead of a string of comma-separated values console.log(res.headers.raw()['set-cookie']); }); ``` #### Post data using a file stream ```js const { createReadStream } = require('fs'); const stream = createReadStream('input.txt'); fetch('https://httpbin.org/post', { method: 'POST', body: stream }) .then(res => res.json()) .then(json => console.log(json)); ``` #### Post with form-data (detect multipart) ```js const FormData = require('form-data'); const form = new FormData(); form.append('a', 1); fetch('https://httpbin.org/post', { method: 'POST', body: form }) .then(res => res.json()) .then(json => console.log(json)); // OR, using custom headers // NOTE: getHeaders() is non-standard API const form = new FormData(); form.append('a', 1); const options = { method: 'POST', body: form, headers: form.getHeaders() } fetch('https://httpbin.org/post', options) .then(res => res.json()) .then(json => console.log(json)); ``` #### Request cancellation with AbortSignal > NOTE: You may cancel streamed requests only on Node >= v8.0.0 You may cancel requests with `AbortController`. A suggested implementation is [`abort-controller`](https://www.npmjs.com/package/abort-controller). An example of timing out a request after 150ms could be achieved as the following: ```js import AbortController from 'abort-controller'; const controller = new AbortController(); const timeout = setTimeout( () => { controller.abort(); }, 150, ); fetch(url, { signal: controller.signal }) .then(res => res.json()) .then( data => { useData(data) }, err => { if (err.name === 'AbortError') { // request was aborted } }, ) .finally(() => { clearTimeout(timeout); }); ``` See [test cases](https://github.com/bitinn/node-fetch/blob/master/test/test.js) for more examples. ## API ### fetch(url[, options]) - `url` A string representing the URL for fetching - `options` [Options](#fetch-options) for the HTTP(S) request - Returns: <code>Promise&lt;[Response](#class-response)&gt;</code> Perform an HTTP(S) fetch. `url` should be an absolute url, such as `https://example.com/`. A path-relative URL (`/file/under/root`) or protocol-relative URL (`//can-be-http-or-https.com/`) will result in a rejected `Promise`. <a id="fetch-options"></a> ### Options The default values are shown after each option key. ```js { // These properties are part of the Fetch Standard method: 'GET', headers: {}, // request headers. format is the identical to that accepted by the Headers constructor (see below) body: null, // request body. can be null, a string, a Buffer, a Blob, or a Node.js Readable stream redirect: 'follow', // set to `manual` to extract redirect headers, `error` to reject redirect signal: null, // pass an instance of AbortSignal to optionally abort requests // The following properties are node-fetch extensions follow: 20, // maximum redirect count. 0 to not follow redirect timeout: 0, // req/res timeout in ms, it resets on redirect. 0 to disable (OS limit applies). Signal is recommended instead. compress: true, // support gzip/deflate content encoding. false to disable size: 0, // maximum response body size in bytes. 0 to disable agent: null // http(s).Agent instance or function that returns an instance (see below) } ``` ##### Default Headers If no values are set, the following request headers will be sent automatically: Header | Value ------------------- | -------------------------------------------------------- `Accept-Encoding` | `gzip,deflate` _(when `options.compress === true`)_ `Accept` | `*/*` `Connection` | `close` _(when no `options.agent` is present)_ `Content-Length` | _(automatically calculated, if possible)_ `Transfer-Encoding` | `chunked` _(when `req.body` is a stream)_ `User-Agent` | `node-fetch/1.0 (+https://github.com/bitinn/node-fetch)` Note: when `body` is a `Stream`, `Content-Length` is not set automatically. ##### Custom Agent The `agent` option allows you to specify networking related options which are out of the scope of Fetch, including and not limited to the following: - Support self-signed certificate - Use only IPv4 or IPv6 - Custom DNS Lookup See [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) for more information. In addition, the `agent` option accepts a function that returns `http`(s)`.Agent` instance given current [URL](https://nodejs.org/api/url.html), this is useful during a redirection chain across HTTP and HTTPS protocol. ```js const httpAgent = new http.Agent({ keepAlive: true }); const httpsAgent = new https.Agent({ keepAlive: true }); const options = { agent: function (_parsedURL) { if (_parsedURL.protocol == 'http:') { return httpAgent; } else { return httpsAgent; } } } ``` <a id="class-request"></a> ### Class: Request An HTTP(S) request containing information about URL, method, headers, and the body. This class implements the [Body](#iface-body) interface. Due to the nature of Node.js, the following properties are not implemented at this moment: - `type` - `destination` - `referrer` - `referrerPolicy` - `mode` - `credentials` - `cache` - `integrity` - `keepalive` The following node-fetch extension properties are provided: - `follow` - `compress` - `counter` - `agent` See [options](#fetch-options) for exact meaning of these extensions. #### new Request(input[, options]) <small>*(spec-compliant)*</small> - `input` A string representing a URL, or another `Request` (which will be cloned) - `options` [Options][#fetch-options] for the HTTP(S) request Constructs a new `Request` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Request/Request). In most cases, directly `fetch(url, options)` is simpler than creating a `Request` object. <a id="class-response"></a> ### Class: Response An HTTP(S) response. This class implements the [Body](#iface-body) interface. The following properties are not implemented in node-fetch at this moment: - `Response.error()` - `Response.redirect()` - `type` - `trailer` #### new Response([body[, options]]) <small>*(spec-compliant)*</small> - `body` A `String` or [`Readable` stream][node-readable] - `options` A [`ResponseInit`][response-init] options dictionary Constructs a new `Response` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Response/Response). Because Node.js does not implement service workers (for which this class was designed), one rarely has to construct a `Response` directly. #### response.ok <small>*(spec-compliant)*</small> Convenience property representing if the request ended normally. Will evaluate to true if the response status was greater than or equal to 200 but smaller than 300. #### response.redirected <small>*(spec-compliant)*</small> Convenience property representing if the request has been redirected at least once. Will evaluate to true if the internal redirect counter is greater than 0. <a id="class-headers"></a> ### Class: Headers This class allows manipulating and iterating over a set of HTTP headers. All methods specified in the [Fetch Standard][whatwg-fetch] are implemented. #### new Headers([init]) <small>*(spec-compliant)*</small> - `init` Optional argument to pre-fill the `Headers` object Construct a new `Headers` object. `init` can be either `null`, a `Headers` object, an key-value map object or any iterable object. ```js // Example adapted from https://fetch.spec.whatwg.org/#example-headers-class const meta = { 'Content-Type': 'text/xml', 'Breaking-Bad': '<3' }; const headers = new Headers(meta); // The above is equivalent to const meta = [ [ 'Content-Type', 'text/xml' ], [ 'Breaking-Bad', '<3' ] ]; const headers = new Headers(meta); // You can in fact use any iterable objects, like a Map or even another Headers const meta = new Map(); meta.set('Content-Type', 'text/xml'); meta.set('Breaking-Bad', '<3'); const headers = new Headers(meta); const copyOfHeaders = new Headers(headers); ``` <a id="iface-body"></a> ### Interface: Body `Body` is an abstract interface with methods that are applicable to both `Request` and `Response` classes. The following methods are not yet implemented in node-fetch at this moment: - `formData()` #### body.body <small>*(deviation from spec)*</small> * Node.js [`Readable` stream][node-readable] Data are encapsulated in the `Body` object. Note that while the [Fetch Standard][whatwg-fetch] requires the property to always be a WHATWG `ReadableStream`, in node-fetch it is a Node.js [`Readable` stream][node-readable]. #### body.bodyUsed <small>*(spec-compliant)*</small> * `Boolean` A boolean property for if this body has been consumed. Per the specs, a consumed body cannot be used again. #### body.arrayBuffer() #### body.blob() #### body.json() #### body.text() <small>*(spec-compliant)*</small> * Returns: <code>Promise</code> Consume the body and return a promise that will resolve to one of these formats. #### body.buffer() <small>*(node-fetch extension)*</small> * Returns: <code>Promise&lt;Buffer&gt;</code> Consume the body and return a promise that will resolve to a Buffer. #### body.textConverted() <small>*(node-fetch extension)*</small> * Returns: <code>Promise&lt;String&gt;</code> Identical to `body.text()`, except instead of always converting to UTF-8, encoding sniffing will be performed and text converted to UTF-8 if possible. (This API requires an optional dependency of the npm package [encoding](https://www.npmjs.com/package/encoding), which you need to install manually. `webpack` users may see [a warning message](https://github.com/bitinn/node-fetch/issues/412#issuecomment-379007792) due to this optional dependency.) <a id="class-fetcherror"></a> ### Class: FetchError <small>*(node-fetch extension)*</small> An operational error in the fetching process. See [ERROR-HANDLING.md][] for more info. <a id="class-aborterror"></a> ### Class: AbortError <small>*(node-fetch extension)*</small> An Error thrown when the request is aborted in response to an `AbortSignal`'s `abort` event. It has a `name` property of `AbortError`. See [ERROR-HANDLING.MD][] for more info. ## Acknowledgement Thanks to [github/fetch](https://github.com/github/fetch) for providing a solid implementation reference. `node-fetch` v1 was maintained by [@bitinn](https://github.com/bitinn); v2 was maintained by [@TimothyGu](https://github.com/timothygu), [@bitinn](https://github.com/bitinn) and [@jimmywarting](https://github.com/jimmywarting); v2 readme is written by [@jkantr](https://github.com/jkantr). ## License MIT [npm-image]: https://flat.badgen.net/npm/v/node-fetch [npm-url]: https://www.npmjs.com/package/node-fetch [travis-image]: https://flat.badgen.net/travis/bitinn/node-fetch [travis-url]: https://travis-ci.org/bitinn/node-fetch [codecov-image]: https://flat.badgen.net/codecov/c/github/bitinn/node-fetch/master [codecov-url]: https://codecov.io/gh/bitinn/node-fetch [install-size-image]: https://flat.badgen.net/packagephobia/install/node-fetch [install-size-url]: https://packagephobia.now.sh/result?p=node-fetch [discord-image]: https://img.shields.io/discord/619915844268326952?color=%237289DA&label=Discord&style=flat-square [discord-url]: https://discord.gg/Zxbndcm [opencollective-image]: https://opencollective.com/node-fetch/backers.svg [opencollective-url]: https://opencollective.com/node-fetch [whatwg-fetch]: https://fetch.spec.whatwg.org/ [response-init]: https://fetch.spec.whatwg.org/#responseinit [node-readable]: https://nodejs.org/api/stream.html#stream_readable_streams [mdn-headers]: https://developer.mozilla.org/en-US/docs/Web/API/Headers [LIMITS.md]: https://github.com/bitinn/node-fetch/blob/master/LIMITS.md [ERROR-HANDLING.md]: https://github.com/bitinn/node-fetch/blob/master/ERROR-HANDLING.md [UPGRADE-GUIDE.md]: https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md
PypiClean
/CC-dbgen-0.2.0.tar.gz/CC-dbgen-0.2.0/dbgen/scripts/Pure/Graph/quickgraph.py
from typing import Tuple,List,Dict from gpaw import GPAW #type: ignore from gpaw.utilities import h2gpts #type: ignore from ase.io import read,write #type: ignore from ase import Atoms,Atom #type: ignore from ase.units import Bohr #type: ignore import os import tempfile from collections import defaultdict import numpy as np #type: ignore from json import dumps def quickgraph(atoms:Atoms)->str: calc = GPAW(mode='lcao',basis = 'dzp' ,gpts = h2gpts(0.15, atoms.get_cell(), idiv=8) ,txt = 'log' ,fixdensity = True ,convergence = {'energy':float('inf') ,'density':float('inf')}) stordir = tempfile.mkdtemp() os.chdir(stordir) atoms.set_calculator(calc) atoms.get_potential_energy() density = calc.get_all_electron_density() * Bohr**3 write(stordir+'/total_density.cube',atoms,data=density) job_control = '\n'.join(['<net charge>',"0.0","</net charge>" ,"<periodicity along A, B, and C vectors>" ,".true.",".true.",".true." ,"</periodicity along A, B, and C vectors>" ,'<compute BOs>','.true.','</compute BOs>' ,'<atomic densities directory complete path>' ,os.environ['CHARGEMOL_DENSITIES'] ,'</atomic densities directory complete path>' ,'<charge type>','DDEC6','</charge type>']) os.system(os.environ['CHARGEMOL_BINARY']) ############################################################### def parse_line(line : str) -> Tuple[int,float,List[int]]: """ Get bonded atom, bond order, and offset """ assert line[:16]==' Bonded to the (', "No parse line ->"+line+'<- %d'%len(line) offStr,indStr,boStr = line[16:].split(')') offset = [int(x) for x in offStr.split(',')] # Chunk containing offset info ind = int(indStr.split()[-3]) - 1 # Chunk containing index info (chargemol starts at 1, not 0) bo = float(boStr.split()[4]) # Chunk containing B.O. info return (ind,bo,offset) class PotentialEdge(object): """ Container for information we need to decide later if it's a graph-worthy bond. This class is nothing more than a dictionary. """ def __init__(self ,fromNode : int ,toNode : int ,bond_order : float ,offset : List[int] ) -> None: self.fromNode = fromNode self.toNode = toNode self.bondorder= bond_order self.pbc_x = offset[0] self.pbc_y = offset[1] self.pbc_z = offset[2] class BondOrderSection(object): """ Process one section of the Bond Order output of Chargemol """ def __init__(self ,ind : int ,raw_lines : List[str] ,pbcdict : dict ) -> None: self.ind = ind self.bonds = [parse_line(x) for x in raw_lines] self.pbcdict = pbcdict def _relative_shift(self ,i : int ,j : int ) -> np.array: """ Given a pbc_dict and two indices, return the original pbc shift for a bond from i to j """ pi,pj = [np.array(self.pbcdict[x]) for x in [i,j]] return pj - pi def makeEdge(self ,tup : Tuple[int,float,List[int]] ) -> PotentialEdge: """ Creates an Edge instance from the result of a parsed Bond Order log line """ (toInd,bo,offset) = tup fromInd = self.ind # correct for WRAPPING atoms offset = (np.array(offset) + self._relative_shift(fromInd,toInd)).tolist() shift = np.dot(offset, atoms.get_cell()) # PBC shift p1 = atoms[fromInd].position p2 = atoms[toInd].position + shift d = np.linalg.norm(p2-p1) return PotentialEdge(fromInd,toInd,bo,offset) def make_edges(self)->List[PotentialEdge]: """Apply edgemaker to result of parsing logfile lines""" return [self.makeEdge(b) for b in self.bonds] def mk_pbc_dict(atoms : Atoms ) -> Dict[int,Tuple[int,int,int]]: """ Helpful docstring """ def g(tup : tuple)->Tuple[int,int,int]: """ Helper function to yield tuples for pbc_dict """ def f(x : float)->int: """ Helper function for g """ if x < 0: return -1 elif x < 1: return 0 else: return 1 x,y,z = tup return (f(x),f(y),f(z)) scaled_pos = atoms.get_scaled_positions(wrap=False).tolist() scaled_pos_ = zip(range(len(atoms)),scaled_pos) pbc_dict = {i : g(p) for i,p in scaled_pos_} return pbc_dict def parse_chargemol_pbc(header_lines : List[str] ,cell : List[float] ) -> Dict[int,Tuple[int,int,int]]: """ Helpful docstring """ atoms = Atoms(cell=cell) for i,l in enumerate(header_lines[2:]): try: if '*' in l: l = l.replace('*','') + ' ???' #Asterisks are touching the z value s,x,y,z,_ = l.split() p = [float(q) for q in [x,y,z]] atoms.append(Atom(s,position=p)) except Exception as e: pass #print('exception???',e) return mk_pbc_dict(atoms) def dict_diff(d1 : Dict[int,np.array] ,d2 : Dict[int,np.array] ) -> Dict[int,np.array]: """ Helpful docstring """ return {i: np.array(d2[i]) - np.array(d1[i]) for i in d1.keys()} header,content = [], [] # type: Tuple[List[str],List[str]] sections = [] # type: List[BondOrderSection] head_flag,counter = True,-1 # Initialize filepath = stordir+'/DDEC6_even_tempered_bond_orders.xyz' # File to parse pbcdict = mk_pbc_dict(atoms) # the 'true' PBC coordinate of each atom filtedges = [] with open(filepath,'r') as f: for line in f: if not (line.isspace() or line[:4] == ' 201'): # remove blank lines and calendar date if line[1]=='=': head_flag = False # no longer in header section chargemol_pbc = parse_chargemol_pbc(header,atoms.get_cell()) # PBC of chargemol atoms elif head_flag: header.append(line) # we're still in the header section elif 'Printing' in line: # we're describing a new atom content = [] # reset buffer content counter += 1 # update index of our from_index atom elif 'sum' in line: # summary line at end of a section dic = dict_diff(pbcdict,chargemol_pbc) edgs = BondOrderSection(counter,content,dic).make_edges() filtedges.extend([e for e in edgs if (e.bondorder > 0.04) and (e.fromNode <= e.toNode)]) else: content.append(line) # business-as-usual, add to buffer return dumps([e.__dict__.values() for e in filtedges]) if __name__ == '__main__': print("GIVE UP")
PypiClean
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/models/pet.py
from swaggerpetstore.api_helper import APIHelper from swaggerpetstore.models.category import Category from swaggerpetstore.models.tag import Tag class Pet(object): """Implementation of the 'Pet' model. TODO: type model description here. Attributes: id (long|int): TODO: type description here. category (Category): TODO: type description here. name (string): TODO: type description here. photo_urls (list of string): TODO: type description here. tags (list of Tag): TODO: type description here. status (StatusEnum): pet status in the store """ # Create a mapping from Model property names to API property names _names = { "name": 'name', "photo_urls": 'photoUrls', "id": 'id', "category": 'category', "tags": 'tags', "status": 'status' } _optionals = [ 'id', 'category', 'tags', 'status', ] def __init__(self, name=None, photo_urls=None, id=APIHelper.SKIP, category=APIHelper.SKIP, tags=APIHelper.SKIP, status=APIHelper.SKIP): """Constructor for the Pet class""" # Initialize members of the class if id is not APIHelper.SKIP: self.id = id if category is not APIHelper.SKIP: self.category = category self.name = name self.photo_urls = photo_urls if tags is not APIHelper.SKIP: self.tags = tags if status is not APIHelper.SKIP: self.status = status @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary name = dictionary.get("name") if dictionary.get("name") else None photo_urls = dictionary.get("photoUrls") if dictionary.get("photoUrls") else None id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP category = Category.from_dictionary(dictionary.get('category')) if 'category' in dictionary.keys() else APIHelper.SKIP tags = None if dictionary.get('tags') is not None: tags = [Tag.from_dictionary(x) for x in dictionary.get('tags')] else: tags = APIHelper.SKIP status = dictionary.get("status") if dictionary.get("status") else APIHelper.SKIP # Return an object of this model return cls(name, photo_urls, id, category, tags, status)
PypiClean
/ECmean4-0.1.4.tar.gz/ECmean4-0.1.4/ecmean/libs/support.py
import os from glob import glob import logging import xarray as xr import xesmf as xe import numpy as np from ecmean.libs.ncfixers import xr_preproc from ecmean.libs.files import inifiles_priority from ecmean.libs.areas import area_cell loggy = logging.getLogger(__name__) # mask to be searched atm_mask_names = ['lsm', 'sftlf'] oce_mask_names = ['lsm', 'sftof', 'mask_opensea'] class Supporter(): """ Support class for ECmean4, including areas and masks to be used in global mean and performance indices """ def __init__(self, component, atmdict, ocedict, areas=True, remap=False, targetgrid=None): """Class for masks, areas and interpolation (xESMF-based) for both atmospheric and oceanic component""" # define the basics self.atmareafile = inifiles_priority(atmdict) self.oceareafile = inifiles_priority(ocedict) self.atmmaskfile = atmdict['maskfile'] self.ocemaskfile = ocedict['maskfile'] self.atmcomponent = component['atm'] self.ocecomponent = component['oce'] self.targetgrid = targetgrid # remapping default self.ocefix, self.oceremap = None, None self.atmfix, self.atmremap = None, None # areas and mask for amip case self.ocemask = None self.ocearea = None # loading and examining atmospheric file self.atmfield = self.load_area_field(self.atmareafile, comp='atm') self.atmgridtype = identify_grid(self.atmfield) loggy.warning('Atmosphere grid is is a %s grid!', self.atmgridtype) # compute atmopheric area if areas: self.atmarea = self.make_areas(self.atmgridtype, self.atmfield) # initialize the interpolation for atmosphere if self.targetgrid and remap: self.atmfix, self.atmremap = self.make_atm_interp_weights(self.atmfield) # init the land-sea mask for atm (mandatory) self.atmmask = self.make_atm_masks() # do the same if oceanic file is found if self.oceareafile: self.ocefield = self.load_area_field(self.oceareafile, comp='oce') self.ocegridtype = identify_grid(self.ocefield) loggy.warning('Oceanic grid is is a %s grid!', self.ocegridtype) # compute oceanic area if areas: self.ocearea = self.make_areas(self.ocegridtype, self.ocefield) # init the ocean interpolation if self.targetgrid and remap: self.ocefix, self.oceremap = self.make_oce_interp_weights(self.ocefield) # ocean mask if self.ocemaskfile: self.ocemask = self.make_oce_masks() else: # if it is missing, when remapping I can use the atmospheric one if self.targetgrid and remap: self.ocemask = self.atmmask # otherwise, no solution! else: loggy.warning('No mask available for oceanic vars, this might lead to inconsistent results...') else: loggy.warning("Ocereafile cannot be found, assuming this is an AMIP run") def make_atm_masks(self): """Create land-sea masks for atmosphere model""" # prepare ATM LSM loggy.info('maskatmfile is %s', self.atmmaskfile) self.atmmaskfile = check_file_exist(self.atmmaskfile) if self.atmcomponent == 'oifs': # create mask: opening a grib and loading only lsm to avoid # inconsistencies in the grib structure -> # see here https://github.com/ecmwf/cfgrib/issues/13 mask = xr.open_mfdataset( self.atmmaskfile, engine="cfgrib", indexpath=None, filter_by_keys={ 'shortName': 'lsm'}, preprocess=xr_preproc)['lsm'] elif self.atmcomponent in ['cmoratm', 'globo']: dmask = xr.open_mfdataset(self.atmmaskfile, preprocess=xr_preproc) mvar = [var for var in dmask.data_vars if var in atm_mask_names] # the case we cannot find the variable we are looking for in the required file if len(mvar)>0: mask = fix_mask_values(dmask[mvar[0]]) else: raise KeyError(f"ERROR: make_atm_masks -> Cannot find mask variable in {self.atmmaskfile}") else: raise KeyError("ERROR: make_atm_masks -> Atmospheric component not supported!") # interp the mask if required if self.atmremap is not None: if self.atmfix: mask = self.atmfix(mask, keep_attrs=True) mask = self.atmremap(mask, keep_attrs=True) return mask def make_oce_masks(self): """Create land-sea masks for oceanic model. This is used only for CMIP""" # prepare ocean LSM: loggy.info('maskocefile is %s', self.ocemaskfile) self.ocemaskfile = check_file_exist(self.ocemaskfile) if self.ocecomponent in ['cmoroce', 'nemo']: dmask = xr.open_mfdataset(self.ocemaskfile, preprocess=xr_preproc) mvar = [var for var in dmask.data_vars if var in oce_mask_names] # the case we cannot find the variable we are looking for in the required file if len(mvar)>0: mask = fix_mask_values(dmask[mvar[0]]) else: loggy.warning('No mask array found in %s for oceanic vars, this might lead to inconsistent results...', self.ocemaskfile) return None else: raise KeyError("ERROR: make_oce_masks -> Oceanic component not supported!") # interp the mask if required if self.oceremap is not None: #if self.ocefix is not None: # mask = self.ocefix(mask, keep_attrs=True) mask = self.oceremap(mask, keep_attrs=True) return mask def load_area_field(self, areafile, comp): """Loading files for area and interpolation""" loggy.info(f'{comp}mareafile is ' + areafile) areafile = check_file_exist(areafile) return xr.open_mfdataset(areafile, preprocess=xr_preproc).load() def make_areas(self, gridtype, xfield): """Create weights for area operations. Minimal structure.""" # this might be universal, but keep this as for supported components only if 'areacello' in xfield.data_vars: # as oceanic CMOR case area = xfield['areacello'] elif 'cell_area' in xfield.data_vars: # as ECE4 NEMO case for nemo-initial-state.nc area = xfield['cell_area'] elif 'e1t' in xfield.data_vars: # ECE4 NEMO case for domaing_cfg.nc area = xfield['e1t'] * xfield['e2t'] else: # automatic solution, wish you luck! area = area_cell(xfield, gridtype) return area def make_atm_interp_weights(self, xfield): """Create atmospheric interpolator weights""" if self.atmcomponent == 'oifs': # this is to get lon and lat from the Equator xname = list(xfield.data_vars)[-1] m = xfield[xname].isel(time=0).load() # use numpy since it is faster g = np.unique(m.lat.data) f = np.unique(m.sel(cell=m.lat == g[int(len(g) / 2)]).lon.data) # this creates a a gaussian non reduced grid gaussian_regular = xr.Dataset({"lon": (["lon"], f), "lat": (["lat"], g)}) # use nearest neighbour to remap to gaussian regular fix = xe.Regridder( xfield[xname], gaussian_regular, method="nearest_s2d", locstream_in=True, periodic=True) # create bilinear interpolator remap = xe.Regridder( fix(xfield[xname]), self.targetgrid, periodic=True, method="bilinear") elif self.atmcomponent in ['cmoratm', 'globo']: fix = None remap = xe.Regridder( xfield, self.targetgrid, periodic=True, method="bilinear") else: raise KeyError( "ERROR: Atm weights not defined for this component, this cannot be handled!") return fix, remap def make_oce_interp_weights(self, xfield): """Create oceanic interpolator weights""" if self.ocecomponent in ['nemo', 'cmoroce']: if 'areacello' in xfield.data_vars: # CMOR case xname = 'areacello' elif 'cell_area' in xfield.data_vars: # ECE4 NEMO case for nemo-initial-state.nc xname = 'cell_area' else: # tentative extraction xname = list(xfield.data_vars)[-1] else: raise KeyError( "ERROR: Oce weights not defined for this component, this cannot be handled!") if self.ocegridtype in ['unstructured']: # print("Detecting a unstructured grid, using nearest neighbour!") fix = None remap = xe.Regridder( xfield[xname], self.targetgrid, method="nearest_s2d", locstream_in=True, periodic=True) else: # print("Detecting regular or curvilinear grid, using bilinear!") fix = None remap = xe.Regridder( xfield[xname], self.targetgrid, method="bilinear", periodic=True, ignore_degenerate=True) return fix, remap def check_file_exist(file): """Simple check to verify that a file to be loaded is defined and found on disk""" if file is None: raise KeyError("ERROR: file not defined!") file = glob(file)[0] if not os.path.isfile(file): raise KeyError(f"ERROR: {file} cannot be found") return file def fix_mask_values(mask): """ Function to normalize the mask whatever format. By convention in ECmean masks are 1 over land and 0 over the ocean. It might cause a bit of slowdown since we need to load the data """ if 'time' in mask.dims: mask = mask.isel(time=0).squeeze() # safety filler mask = mask.fillna(0) # if it is a percentage if mask.max() > 99: loggy.info('%s is being normalized', mask.name) mask = mask/100 # if it is an ocean mask if mask.mean() > 0.5: loggy.info('%s is being flipped', mask.name) mask = abs(1 - mask) return mask def identify_grid(xfield): """Receiveng an xarray object (DataArray or Dataset) investigates its coordinates and dimensions and provide the grid type (regular, gaussian, curvilinear, gaussian reduced, unstructured). It assumes that data is defined by 'lon' and 'lat' dimensions Args : xfield: Returns string with the grid type """ # if the coordinates are lon/lat proceed if all(x in xfield.coords for x in ['lon', 'lat']): # if dimensions are lon/lat as well, this is a regular grid if all(x in xfield.dims for x in ['lon', 'lat']): lat = xfield.coords['lat'] # if lat grid spacing is equal, is regular lonlat, otherwise gaussian if (lat[3] - lat[2]) == (lat[1] - lat[0]): gridtype = 'lonlat' else: gridtype = 'gaussian' else: # if the coords are 2D, we are curvilinear if xfield.coords['lon'].ndim == 2 and xfield.coords['lon'].ndim == 2: gridtype = 'curvilinear' else: # check the first four elements of the grid (arbitrary) lat = xfield.coords['lat'].values[0:5] # if they are all the same, we have a gaussian reduced, else unstructured if (lat == lat[0]).all(): gridtype = 'gaussian_reduced' else: gridtype = 'unstructured' else: raise ValueError("Cannot find any lon/lat dimension, aborting...") return gridtype
PypiClean
/CWR-API-0.0.40.tar.gz/CWR-API-0.0.40/cwr/parser/encoder/cwrjson.py
import json import sys from cwr.parser.encoder.dictionary import FileDictionaryEncoder from cwr.parser.encoder.common import Encoder """ Classes for encoding CWR classes into JSON dictionaries. It just consists of a single parser, the JSONEncoder, which delegates most of the work to an instance of the CWRDictionaryEncoder. """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class JSONEncoder(Encoder): """ Encodes a CWR class instance into a JSON. For this, first the instance is transformed into a dictionary, then dumped into a JSON. A bit of additional work is done for handling the dates, which are transformed into the ISO format. """ def __init__(self): super(JSONEncoder, self).__init__() self._dict_encoder = FileDictionaryEncoder() def encode(self, entity): """ Encodes the data, creating a JSON structure from an instance from the domain model. :param entity: the instance to encode :return: a JSON structure created from the received data """ encoded = self._dict_encoder.encode(entity) if sys.version_info[0] == 2: result = json.dumps(encoded, ensure_ascii=False, default=_iso_handler, encoding='latin1') else: # For Python 3 result = json.dumps(encoded, ensure_ascii=False, default=_iso_handler) return result def _unicode_handler(obj): """ Transforms an unicode string into a UTF-8 equivalent. :param obj: object to transform into it's UTF-8 equivalent :return: the UTF-8 equivalent of the string """ try: result = obj.isoformat() except AttributeError: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result def _iso_handler(obj): """ Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object """ if hasattr(obj, 'isoformat'): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
PypiClean
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/client/messageparse.py
import itertools import re import typing from .. import helpers, utils from ..tl import types if typing.TYPE_CHECKING: from .telegramclient import TelegramClient class MessageParseMethods: # region Public properties @property def parse_mode(self: 'TelegramClient'): """ This property is the default parse mode used when sending messages. Defaults to `telethon.extensions.markdown`. It will always be either `None` or an object with ``parse`` and ``unparse`` methods. When setting a different value it should be one of: * Object with ``parse`` and ``unparse`` methods. * A ``callable`` to act as the parse method. * A `str` indicating the ``parse_mode``. For Markdown ``'md'`` or ``'markdown'`` may be used. For HTML, ``'htm'`` or ``'html'`` may be used. The ``parse`` method should be a function accepting a single parameter, the text to parse, and returning a tuple consisting of ``(parsed message str, [MessageEntity instances])``. The ``unparse`` method should be the inverse of ``parse`` such that ``assert text == unparse(*parse(text))``. See :tl:`MessageEntity` for allowed message entities. Example .. code-block:: python # Disabling default formatting client.parse_mode = None # Enabling HTML as the default format client.parse_mode = 'html' """ return self._parse_mode @parse_mode.setter def parse_mode(self: 'TelegramClient', mode: str): self._parse_mode = utils.sanitize_parse_mode(mode) # endregion # region Private methods async def _replace_with_mention(self: 'TelegramClient', entities, i, user): """ Helper method to replace ``entities[i]`` to mention ``user``, or do nothing if it can't be found. """ try: entities[i] = types.InputMessageEntityMentionName( entities[i].offset, entities[i].length, await self.get_input_entity(user) ) return True except (ValueError, TypeError): return False async def _parse_message_text(self: 'TelegramClient', message, parse_mode): """ Returns a (parsed message, entities) tuple depending on ``parse_mode``. """ if parse_mode == (): parse_mode = self._parse_mode else: parse_mode = utils.sanitize_parse_mode(parse_mode) if not parse_mode: return message, [] original_message = message message, msg_entities = parse_mode.parse(message) if original_message and not message and not msg_entities: raise ValueError("Failed to parse message") for i in reversed(range(len(msg_entities))): e = msg_entities[i] if not e.length: # 0-length MessageEntity is no longer valid #3884. # Because the user can provide their own parser (with reasonable 0-length # entities), strip them here rather than fixing the built-in parsers. del msg_entities[i] elif isinstance(e, types.MessageEntityTextUrl): m = re.match(r'^@|\+|tg://user\?id=(\d+)', e.url) if m: user = int(m.group(1)) if m.group(1) else e.url is_mention = await self._replace_with_mention(msg_entities, i, user) if not is_mention: del msg_entities[i] elif isinstance(e, (types.MessageEntityMentionName, types.InputMessageEntityMentionName)): is_mention = await self._replace_with_mention(msg_entities, i, e.user_id) if not is_mention: del msg_entities[i] return message, msg_entities def _get_response_message(self: 'TelegramClient', request, result, input_chat): """ Extracts the response message known a request and Update result. The request may also be the ID of the message to match. If ``request is None`` this method returns ``{id: message}``. If ``request.random_id`` is a list, this method returns a list too. """ if isinstance(result, types.UpdateShort): updates = [result.update] entities = {} elif isinstance(result, (types.Updates, types.UpdatesCombined)): updates = result.updates entities = {utils.get_peer_id(x): x for x in itertools.chain(result.users, result.chats)} else: return None random_to_id = {} id_to_message = {} for update in updates: if isinstance(update, types.UpdateMessageID): random_to_id[update.random_id] = update.id elif isinstance(update, ( types.UpdateNewChannelMessage, types.UpdateNewMessage)): update.message._finish_init(self, entities, input_chat) # Pinning a message with `updatePinnedMessage` seems to # always produce a service message we can't map so return # it directly. The same happens for kicking users. # # It could also be a list (e.g. when sending albums). # # TODO this method is getting messier and messier as time goes on if hasattr(request, 'random_id') or utils.is_list_like(request): id_to_message[update.message.id] = update.message else: return update.message elif (isinstance(update, types.UpdateEditMessage) and helpers._entity_type(request.peer) != helpers._EntityType.CHANNEL): update.message._finish_init(self, entities, input_chat) # Live locations use `sendMedia` but Telegram responds with # `updateEditMessage`, which means we won't have `id` field. if hasattr(request, 'random_id'): id_to_message[update.message.id] = update.message elif request.id == update.message.id: return update.message elif (isinstance(update, types.UpdateEditChannelMessage) and utils.get_peer_id(request.peer) == utils.get_peer_id(update.message.peer_id)): if request.id == update.message.id: update.message._finish_init(self, entities, input_chat) return update.message elif isinstance(update, types.UpdateNewScheduledMessage): update.message._finish_init(self, entities, input_chat) # Scheduled IDs may collide with normal IDs. However, for a # single request there *shouldn't* be a mix between "some # scheduled and some not". id_to_message[update.message.id] = update.message elif isinstance(update, types.UpdateMessagePoll): if request.media.poll.id == update.poll_id: m = types.Message( id=request.id, peer_id=utils.get_peer(request.peer), media=types.MessageMediaPoll( poll=update.poll, results=update.results ) ) m._finish_init(self, entities, input_chat) return m if request is None: return id_to_message random_id = request if isinstance(request, (int, list)) else getattr(request, 'random_id', None) if random_id is None: # Can happen when pinning a message does not actually produce a service message. self._log[__name__].warning( 'No random_id in %s to map to, returning None message for %s', request, result) return None if not utils.is_list_like(random_id): msg = id_to_message.get(random_to_id.get(random_id)) if not msg: self._log[__name__].warning( 'Request %s had missing message mapping %s', request, result) return msg try: return [id_to_message[random_to_id[rnd]] for rnd in random_id] except KeyError: # Sometimes forwards fail (`MESSAGE_ID_INVALID` if a message gets # deleted or `WORKER_BUSY_TOO_LONG_RETRY` if there are issues at # Telegram), in which case we get some "missing" message mappings. # Log them with the hope that we can better work around them. # # This also happens when trying to forward messages that can't # be forwarded because they don't exist (0, service, deleted) # among others which could be (like deleted or existing). self._log[__name__].warning( 'Request %s had missing message mappings %s', request, result) return [ id_to_message.get(random_to_id[rnd]) if rnd in random_to_id else None for rnd in random_id ] # endregion
PypiClean
/NudeNetClassifier-2.1.1.tar.gz/NudeNetClassifier-2.1.1/README.md
# NudeNet: Neural Nets for Nudity Classification, Detection and selective censoring [![DOI](https://zenodo.org/badge/173154449.svg)](https://zenodo.org/badge/latestdoi/173154449) ![Upload Python package](https://github.com/notAI-tech/NudeNet/actions/workflows/python-publish.yml/badge.svg) ## Fork differences: - Only the default classifier is available. - The classifier no longer throws the `Initializer block1_conv1_bn/keras_learning_phase:0 appears in graph inputs and will not be treated as constant value/weight. etc.` warning. - It only works on images. - The classifier is included in the project itself. - Only the v2 model is available (the original repo's default). So `v2` from original is `main` here. Uncensored version of the following image can be found at https://i.imgur.com/rga6845.jpg (NSFW) ![](https://i.imgur.com/0KPJbl9.jpg) **Classifier classes:** |class name | Description | |--------|:--------------: |safe | Image is not sexually explicit | |unsafe | Image is sexually explicit| # As self-hostable API service ```bash # Classifier docker run -it -p8080:8080 notaitech/nudenet:classifier # See fastDeploy-file_client.py for running predictions via fastDeploy's REST endpoints wget https://raw.githubusercontent.com/notAI-tech/fastDeploy/master/cli/fastDeploy-file_client.py # Single input python fastDeploy-file_client.py --file PATH_TO_YOUR_IMAGE # Client side batching python fastDeploy-file_client.py --dir PATH_TO_FOLDER --ext jpg ``` **Note: golang example https://github.com/notAI-tech/NudeNet/issues/63#issuecomment-729555360**, thanks to [Preetham Kamidi](https://github.com/preetham) # As Python module **Installation**: ```bash pip install -U git+https://github.com/platelminto/NudeNet ``` **Classifier Usage**: ```python # Import module from nudenet import NudeClassifier # initialize classifier (downloads the checkpoint file automatically the first time) classifier = NudeClassifier() # Classify single image classifier.classify('path_to_image_1') # Returns {'path_to_image_1': {'safe': PROBABILITY, 'unsafe': PROBABILITY}} # Classify multiple images (batch prediction) # batch_size is optional; defaults to 4 classifier.classify(['path_to_image_1', 'path_to_image_2'], batch_size=BATCH_SIZE) # Returns {'path_to_image_1': {'safe': PROBABILITY, 'unsafe': PROBABILITY}, # 'path_to_image_2': {'safe': PROBABILITY, 'unsafe': PROBABILITY}} ``` # Notes: - The current version of NudeDetector is trained on 160,000 entirely auto-labelled (using classification heat maps and various other hybrid techniques) images. - The entire data for the classifier is available at https://archive.org/details/NudeNet_classifier_dataset_v1 - A part of the auto-labelled data (Images are from the classifier dataset above) used to train the base Detector is available at https://github.com/notAI-tech/NudeNet/releases/download/v0/DETECTOR_AUTO_GENERATED_DATA.zip
PypiClean
/DACBench-0.2.0.tar.gz/DACBench-0.2.0/dacbench/benchmarks/luby_benchmark.py
import csv import os import ConfigSpace as CS import ConfigSpace.hyperparameters as CSH import numpy as np from dacbench.abstract_benchmark import AbstractBenchmark, objdict from dacbench.envs import LubyEnv, luby_gen from dacbench.wrappers import RewardNoiseWrapper MAX_STEPS = 2**6 LUBY_SEQUENCE = np.log2([next(luby_gen(i)) for i in range(1, 2 * MAX_STEPS + 2)]) HISTORY_LENGTH = 5 DEFAULT_CFG_SPACE = CS.ConfigurationSpace() SEQ = CSH.UniformIntegerHyperparameter( name="sequence_element", lower=0, upper=np.log2(MAX_STEPS) ) DEFAULT_CFG_SPACE.add_hyperparameter(SEQ) INFO = { "identifier": "Luby", "name": "Luby Sequence Approximation", "reward": "Boolean sucess indication", "state_description": [ "Action t-2", "Step t-2", "Action t-1", "Step t-1", "Action t (current)", "Step t (current)", ], } LUBY_DEFAULTS = objdict( { "config_space": DEFAULT_CFG_SPACE, "observation_space_class": "Box", "observation_space_type": np.float32, "observation_space_args": [ np.array([-1 for _ in range(HISTORY_LENGTH + 1)]), np.array([2 ** max(LUBY_SEQUENCE + 1) for _ in range(HISTORY_LENGTH + 1)]), ], "reward_range": (-1, 0), "cutoff": MAX_STEPS, "hist_length": HISTORY_LENGTH, "min_steps": 2**3, "seed": 0, "instance_set_path": "../instance_sets/luby/luby_default.csv", "benchmark_info": INFO, } ) class LubyBenchmark(AbstractBenchmark): """ Benchmark with default configuration & relevant functions for Sigmoid """ def __init__(self, config_path=None, config=None): """ Initialize Luby Benchmark Parameters ------- config_path : str Path to config file (optional) """ super(LubyBenchmark, self).__init__(config_path, config) if not self.config: self.config = objdict(LUBY_DEFAULTS.copy()) for key in LUBY_DEFAULTS: if key not in self.config: self.config[key] = LUBY_DEFAULTS[key] def get_environment(self): """ Return Luby env with current configuration Returns ------- LubyEnv Luby environment """ if "instance_set" not in self.config.keys(): self.read_instance_set() # Read test set if path is specified if ( "test_set" not in self.config.keys() and "test_set_path" in self.config.keys() ): self.read_instance_set(test=True) env = LubyEnv(self.config) for func in self.wrap_funcs: env = func(env) return env def set_cutoff(self, steps): """ Set cutoff and adapt dependencies Parameters ------- int Maximum number of steps """ self.config.cutoff = steps self.config.action_space_args = [int(np.log2(steps))] LUBY_SEQUENCE = np.log2([next(luby_gen(i)) for i in range(1, 2 * steps + 2)]) self.config.observation_space_args = [ np.array([-1 for _ in range(self.config.hist_length + 1)]), np.array( [ 2 ** max(LUBY_SEQUENCE + 1) for _ in range(self.config.hist_length + 1) ] ), ] def set_history_length(self, length): """ Set history length and adapt dependencies Parameters ------- int History length """ self.config.hist_length = length self.config.observation_space_args = [ np.array([-1 for _ in range(length + 1)]), np.array([2 ** max(LUBY_SEQUENCE + 1) for _ in range(length + 1)]), ] def read_instance_set(self, test=False): """Read instance set from file""" if test: path = ( os.path.dirname(os.path.abspath(__file__)) + "/" + self.config.test_set_path ) keyword = "test_set" else: path = ( os.path.dirname(os.path.abspath(__file__)) + "/" + self.config.instance_set_path ) keyword = "instance_set" self.config[keyword] = {} with open(path, "r") as fh: reader = csv.DictReader(fh) for row in reader: self.config[keyword][int(row["ID"])] = [ float(shift) for shift in row["start"].split(",") ] + [float(slope) for slope in row["sticky"].split(",")] def get_benchmark(self, L=8, fuzziness=1.5, seed=0): """ Get Benchmark from DAC paper Parameters ------- L : int Minimum sequence lenght, was 8, 16 or 32 in the paper fuzziness : float Amount of noise applied. Was 1.5 for most of the experiments seed : int Environment seed Returns ------- env : LubyEnv Luby environment """ self.config = objdict(LUBY_DEFAULTS.copy()) self.config.min_steps = L self.config.seed = seed self.config.instance_set = {0: [0, 0]} self.config.reward_range = (-10, 10) env = LubyEnv(self.config) rng = np.random.RandomState(self.config.seed) def fuzz(): return rng.normal(-1, fuzziness) fuzzy_env = RewardNoiseWrapper(env, noise_function=fuzz) return fuzzy_env
PypiClean
/AiBot.py-1.3.0-py3-none-any.whl/AiBot/_WinBot.py
import abc import socket import socketserver import subprocess import sys import threading import time import re from ast import literal_eval from typing import Optional, List, Tuple from loguru import logger from ._utils import _protect, Point, _Region, _Algorithm, _SubColors from urllib import request, parse import json import base64 class _ThreadingTCPServer(socketserver.ThreadingTCPServer): daemon_threads = True allow_reuse_address = True class WinBotMain(socketserver.BaseRequestHandler, metaclass=_protect("handle", "execute")): raise_err = False wait_timeout = 3 # seconds interval_timeout = 0.5 # seconds log_path = "" log_level = "INFO" log_format = "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " \ "<level>{level: <8}</level> | " \ "{thread.name: <8} | " \ "<cyan>{module}.{function}:{line}</cyan> | " \ "<level>{message}</level>" # 日志内容 def __init__(self, request, client_address, server): self._lock = threading.Lock() self.log = logger self.log.remove() self.log.add(sys.stdout, level=self.log_level.upper(), format=self.log_format) if self.log_path: self.log.add(self.log_path, level=self.log_level.upper(), format=self.log_format, rotation='5 MB', retention='2 days') super().__init__(request, client_address, server) def __send_data(self, *args) -> str: args_len = "" args_text = "" for argv in args: if argv is None: argv = "" elif isinstance(argv, bool) and argv: argv = "true" elif isinstance(argv, bool) and not argv: argv = "false" argv = str(argv) args_text += argv args_len += str(len(bytes(argv, 'utf8'))) + "/" data = (args_len.strip("/") + "\n" + args_text).encode("utf8") try: with self._lock: self.log.debug(rf"->-> {data}") self.request.sendall(data) response = self.request.recv(65535) if response == b"": raise ConnectionAbortedError(f"{self.client_address[0]}:{self.client_address[1]} 客户端断开链接。") data_length, data = response.split(b"/", 1) while int(data_length) > len(data): data += self.request.recv(65535) self.log.debug(rf"<-<- {data}") return data.decode("utf8").strip() except Exception as e: self.log.error("send/read tcp data error: " + str(e)) raise e # ############# # 窗口操作 # # ############# def find_window(self, class_name: str = None, window_name: str = None) -> Optional[str]: """ 查找窗口句柄,仅查找顶级窗口,不包含子窗口 :param class_name: 窗口类名 :param window_name: 窗口名 :return: """ response = self.__send_data("findWindow", class_name, window_name) if response == "null": return None return response def find_windows(self, class_name: str = None, window_name: str = None) -> List[str]: """ 查找窗口句柄数组,仅查找顶级窗口,不包含子窗口 class_name 和 window_name 都为 None,则返回所有窗口句柄 :param class_name: 窗口类名 :param window_name: 窗口名 :return: 窗口句柄的列表 """ response = self.__send_data("findWindows", class_name, window_name) if response == "null": return [] return response.split("|") def find_sub_window(self, hwnd: str, class_name: str = None, window_name: str = None) -> Optional[str]: """ 查找子窗口句柄 :param hwnd: 当前窗口句柄 :param class_name: 窗口类名 :param window_name: 窗口名 :return: 子窗口句柄或 None """ response = self.__send_data("findSubWindow", hwnd, class_name, window_name) if response == "null": return None return response def find_parent_window(self, hwnd: str) -> Optional[str]: """ 查找父窗口句柄 :param hwnd: 当前窗口句柄 :return: 父窗口句柄或 None """ response = self.__send_data("findParentWindow", hwnd) if response == "null": return None return response def find_desktop_window(self) -> Optional[str]: """ 查找桌面窗口句柄 :return: 桌面窗口句柄或 None """ response = self.__send_data("findDesktopWindow") if response == "null": return None return response def get_window_name(self, hwnd: str) -> Optional[str]: """ 获取窗口名称 :param hwnd: 当前窗口句柄 :return: 窗口名称或 None """ response = self.__send_data("getWindowName", hwnd) if response == "null": return None return response def show_window(self, hwnd: str, show: bool) -> bool: """ 显示/隐藏窗口 :param hwnd: 当前窗口句柄 :param show: 是否显示窗口 :return: """ return self.__send_data("showWindow", hwnd, show) == "true" def set_window_top(self, hwnd: str, top: bool) -> bool: """ 设置窗口到最顶层 :param hwnd: 当前窗口句柄 :param top: 是否置顶,True 置顶, False 取消置顶 :return: """ return self.__send_data("setWindowTop", hwnd, top) == "true" def get_window_pos(self, hwnd: str, wait_time: float = None, interval_time: float = None) -> Optional[ Tuple[Point, Point]]: """ 获取窗口位置 :param hwnd: 窗口句柄 :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("getWindowPos", hwnd) if response == "-1|-1|-1|-1": time.sleep(interval_time) continue else: x1, y1, x2, y2 = response.split("|") return Point(x=float(x1), y=float(y1)), Point(x=float(x2), y=float(y2)) # 超时 return None def set_window_pos(self, hwnd: str, left: float, top: float, width: float, height: float) -> bool: """ 设置窗口位置 :param hwnd: 当前窗口句柄 :param left: 左上角横坐标 :param top: 左上角纵坐标 :param width: 窗口宽度 :param height: 窗口高度 :return: """ return self.__send_data("setWindowPos", hwnd, left, top, width, height) == "true" # ############# # 键鼠操作 # # ############# def move_mouse(self, hwnd: str, x: float, y: float, mode: bool = False, ele_hwnd: str = "0") -> bool: """ 移动鼠标 :param hwnd: 当前窗口句柄 :param x: 横坐标 :param y: 纵坐标 :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :param ele_hwnd: 元素句柄,如果 mode=True 且目标控件有单独的句柄,则需要通过 get_element_window 获得元素句柄,指定 ele_hwnd 的值(极少应用窗口由父窗口响应消息,则无需指定); :return: """ return self.__send_data("moveMouse", hwnd, x, y, mode, ele_hwnd) == "true" def move_mouse_relative(self, hwnd: str, x: float, y: float, mode: bool = False) -> bool: """ 移动鼠标(相对坐标) :param hwnd: 当前窗口句柄 :param x: 相对横坐标 :param y: 相对纵坐标 :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :return: """ return self.__send_data("moveMouseRelative", hwnd, x, y, mode) == "true" def scroll_mouse(self, hwnd: str, x: float, y: float, count: int, mode: bool = False) -> bool: """ 滚动鼠标 :param hwnd: 当前窗口句柄 :param x: 横坐标 :param y: 纵坐标 :param count: 鼠标滚动次数, 负数下滚鼠标, 正数上滚鼠标 :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :return: """ return self.__send_data("rollMouse", hwnd, x, y, count, mode) == "true" def click_mouse(self, hwnd: str, x: float, y: float, typ: int, mode: bool = False, ele_hwnd: str = "0") -> bool: """ 鼠标点击 :param hwnd: 当前窗口句柄 :param x: 横坐标 :param y: 纵坐标 :param typ: 点击类型,单击左键:1 单击右键:2 按下左键:3 弹起左键:4 按下右键:5 弹起右键:6 双击左键:7 双击右键:8 :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :param ele_hwnd: 元素句柄,如果 mode=True 且目标控件有单独的句柄,则需要通过 get_element_window 获得元素句柄,指定 ele_hwnd 的值(极少应用窗口由父窗口响应消息,则无需指定); :return: """ return self.__send_data("clickMouse", hwnd, x, y, typ, mode, ele_hwnd) == "true" def send_keys(self, text: str) -> bool: """ 输入文本 :param text: 输入的文本 :return: """ return self.__send_data("sendKeys", text) == "true" def send_keys_by_hwnd(self, hwnd: str, text: str) -> bool: """ 后台输入文本(杀毒软件可能会拦截) :param hwnd: 窗口句柄 :param text: 输入的文本 :return: """ return self.__send_data("sendKeysByHwnd", hwnd, text) == "true" def send_vk(self, vk: int, typ: int) -> bool: """ 输入虚拟键值(VK) :param vk: VK键值 :param typ: 输入类型,按下弹起:1 按下:2 弹起:3 :return: """ return self.__send_data("sendVk", vk, typ) == "true" def send_vk_by_hwnd(self, hwnd: str, vk: int, typ: int) -> bool: """ 后台输入虚拟键值(VK) :param hwnd: 窗口句柄 :param vk: VK键值 :param typ: 输入类型,按下弹起:1 按下:2 弹起:3 :return: """ return self.__send_data("sendVkByHwnd", hwnd, vk, typ) == "true" # ############# # 图色操作 # # ############# def save_screenshot(self, hwnd: str, save_path: str, region: _Region = None, algorithm: _Algorithm = None, mode: bool = False) -> bool: """ 截图 :param hwnd: 窗口句柄 :param save_path: 图片存储路径 :param region: 截图区域,默认全屏,``region = (起点x、起点y、终点x、终点y)``,得到一个矩形 :param algorithm: 处理截图所用算法和参数,默认保存原图, ``algorithm = (algorithm_type, threshold, max_val)`` 按元素顺序分别代表: 0. ``algorithm_type`` 算法类型 1. ``threshold`` 阈值 2. ``max_val`` 最大值 ``threshold`` 和 ``max_val`` 同为 255 时灰度处理. ``algorithm_type`` 算法类型说明: 0. ``THRESH_BINARY`` 算法,当前点值大于阈值 `threshold` 时,取最大值 ``max_val``,否则设置为 0; 1. ``THRESH_BINARY_INV`` 算法,当前点值大于阈值 `threshold` 时,设置为 0,否则设置为最大值 max_val; 2. ``THRESH_TOZERO`` 算法,当前点值大于阈值 `threshold` 时,不改变,否则设置为 0; 3. ``THRESH_TOZERO_INV`` 算法,当前点值大于阈值 ``threshold`` 时,设置为 0,否则不改变; 4. ``THRESH_TRUNC`` 算法,当前点值大于阈值 ``threshold`` 时,设置为阈值 ``threshold``,否则不改变; 5. ``ADAPTIVE_THRESH_MEAN_C`` 算法,自适应阈值; 6. ``ADAPTIVE_THRESH_GAUSSIAN_C`` 算法,自适应阈值; :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :return: """ if not region: region = [0, 0, 0, 0] if not algorithm: algorithm_type, threshold, max_val = [0, 0, 0] else: algorithm_type, threshold, max_val = algorithm if algorithm_type in (5, 6): threshold = 127 max_val = 255 return self.__send_data("saveScreenshot", hwnd, save_path, *region, algorithm_type, threshold, max_val, mode) == "true" def get_color(self, hwnd: str, x: float, y: float, mode: bool = False) -> Optional[str]: """ 获取指定坐标点的色值,返回色值字符串(#008577)或者 None :param hwnd: 窗口句柄; :param x: x 坐标; :param y: y 坐标; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :return: """ response = self.__send_data("getColor", hwnd, x, y, mode) if response == "null": return None return response def find_color(self, hwnd: str, color: str, sub_colors: _SubColors = None, region: _Region = None, similarity: float = 0.9, mode: bool = False, wait_time: float = None, interval_time: float = None): """ 获取指定色值的坐标点,返回坐标或者 None :param hwnd: 窗口句柄; :param color: 颜色字符串,必须以 # 开头,例如:#008577; :param sub_colors: 辅助定位的其他颜色; :param region: 在指定区域内找色,默认全屏; :param similarity: 相似度,0-1 的浮点数,默认 0.9; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: .. seealso:: :meth:`save_screenshot`: ``region`` 和 ``algorithm`` 的参数说明 """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout if not region: region = [0, 0, 0, 0] if sub_colors: sub_colors_str = "" for sub_color in sub_colors: offset_x, offset_y, color_str = sub_color sub_colors_str += f"{offset_x}/{offset_y}/{color_str}\n" # 去除最后一个 \n sub_colors_str = sub_colors_str.strip() else: sub_colors_str = "null" end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("findColor", hwnd, color, sub_colors_str, *region, similarity, mode) # 找色失败 if response == "-1.0|-1.0": time.sleep(interval_time) else: # 找色成功 x, y = response.split("|") return Point(x=float(x), y=float(y)) # 超时 return None def compare_color(self, hwnd: str, main_x: float, main_y: float, color: str, sub_colors: _SubColors = None, region: _Region = None, similarity: float = 0.9, mode: bool = False, wait_time: float = None, interval_time: float = None, raise_err: bool = None) -> Optional[Point]: """ 比较指定坐标点的颜色值 :param hwnd: 窗口句柄; :param main_x: 主颜色所在的X坐标; :param main_y: 主颜色所在的Y坐标; :param color: 颜色字符串,必须以 # 开头,例如:#008577; :param sub_colors: 辅助定位的其他颜色; :param region: 截图区域,默认全屏,``region = (起点x、起点y、终点x、终点y)``,得到一个矩形 :param similarity: 相似度,0-1 的浮点数,默认 0.9; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :param raise_err: 超时是否抛出异常; :return: True或者 False """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout if raise_err is None: raise_err = self.raise_err if not region: region = [0, 0, 0, 0] if sub_colors: sub_colors_str = "" for sub_color in sub_colors: offset_x, offset_y, color_str = sub_color sub_colors_str += f"{offset_x}/{offset_y}/{color_str}\n" # 去除最后一个 \n sub_colors_str = sub_colors_str.strip() else: sub_colors_str = "null" end_time = time.time() + wait_time while time.time() < end_time: return self.__send_data("compareColor", hwnd, main_x, main_y, color, sub_colors_str, *region, similarity, mode) == "true" # 超时 if raise_err: raise TimeoutError("`compare_color` 操作超时") return None def extract_image_by_video(self, video_path: str, save_folder: str, jump_frame: int = 1) -> bool: """ 提取视频帧 :param video_path: 视频路径 :param save_folder: 提取的图片保存的文件夹目录 :param jump_frame: 跳帧,默认为1 不跳帧 :return: True或者False """ return self.__send_data("extractImageByVideo", video_path, save_folder, jump_frame) == "true" def crop_image(self, image_path, save_path, left, top, rigth, bottom) -> bool: """ 裁剪图片 :param image_path: 图片路径 :param save_path: 裁剪后保存的图片路径 :param left: 裁剪的左上角横坐标 :param top: 裁剪的左上角纵坐标 :param rigth: 裁剪的右下角横坐标 :param bottom: 裁剪的右下角纵坐标 :return: True或者False """ return self.__send_data("cropImage", image_path, save_path, left, top, rigth, bottom) == "true" def find_images(self, hwnd: str, image_path: str, region: _Region = None, algorithm: _Algorithm = None, similarity: float = 0.9, mode: bool = False, multi: int = 1, wait_time: float = None, interval_time: float = None) -> List[Point]: """ 寻找图片坐标,在当前屏幕中寻找给定图片中心点的坐标,返回坐标列表 :param hwnd: 窗口句柄; :param image_path: 图片的绝对路径; :param region: 从指定区域中找图,默认全屏; :param algorithm: 处理屏幕截图所用的算法,默认原图,注意:给定图片处理时所用的算法,应该和此方法的算法一致; :param similarity: 相似度,0-1 的浮点数,默认 0.9; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :param multi: 返回图片数量,默认1张; :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: .. seealso:: :meth:`save_screenshot`: ``region`` 和 ``algorithm`` 的参数说明 """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout if not region: region = [0, 0, 0, 0] if not algorithm: algorithm_type, threshold, max_val = [0, 0, 0] else: algorithm_type, threshold, max_val = algorithm if algorithm_type in (5, 6): threshold = 127 max_val = 255 end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("findImage", hwnd, image_path, *region, similarity, algorithm_type, threshold, max_val, multi, mode) # 找图失败 if response in ["-1.0|-1.0", "-1|-1"]: time.sleep(interval_time) continue else: # 找图成功,返回图片左上角坐标 # 分割出多个图片的坐标 image_points = response.split("/") point_list = [] for point_str in image_points: x, y = point_str.split("|") point_list.append(Point(x=float(x), y=float(y))) return point_list # 超时 return [] def find_dynamic_image(self, hwnd: str, interval_ti: int, region: _Region = None, mode: bool = False, wait_time: float = None, interval_time: float = None) -> List[Point]: """ 找动态图,对比同一张图在不同时刻是否发生变化,返回坐标列表 :param hwnd: 窗口句柄; :param interval_ti: 前后时刻的间隔时间,单位毫秒; :param region: 在指定区域找图,默认全屏; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: .. seealso:: :meth:`save_screenshot`: ``region`` 的参数说明 """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout if not region: region = [0, 0, 0, 0] end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("findAnimation", hwnd, interval_ti, *region, mode) # 找图失败 if response == "-1.0|-1.0": time.sleep(interval_time) continue else: # 找图成功,返回图片左上角坐标 # 分割出多个图片的坐标 image_points = response.split("/") point_list = [] for point_str in image_points: x, y = point_str.split("|") point_list.append(Point(x=float(x), y=float(y))) return point_list # 超时 return [] # ############## # OCR 相关 # # ############## @staticmethod def __parse_ocr(text: str) -> list: """ 解析 OCR 识别出出来的信息 :param text: :return: """ # pattern = re.compile(r'(\[\[\[).+?(\)])') # matches = pattern.finditer(text) # # text_info_list = [] # for match in matches: # result_str = match.group() # text_info = literal_eval(result_str) # text_info_list.append(text_info) return literal_eval(text) def __ocr_server(self, hwnd: str, region: _Region = None, algorithm: _Algorithm = None, mode: bool = False) -> list: """ OCR 服务,通过 OCR 识别屏幕中文字 :param hwnd: :param region: :param algorithm: :param mode: :return: """ if not region: region = [0, 0, 0, 0] if not algorithm: algorithm_type, threshold, max_val = [0, 0, 0] else: algorithm_type, threshold, max_val = algorithm if algorithm_type in (5, 6): threshold = 127 max_val = 255 response = self.__send_data("ocr", hwnd, *region, algorithm_type, threshold, max_val, mode) if response == "null" or response == "": return [] return self.__parse_ocr(response) def __ocr_server_by_file(self, image_path: str, region: _Region = None, algorithm: _Algorithm = None) -> list: """ OCR 服务,通过 OCR 识别屏幕中文字 :param image_path: :param region: :param algorithm: :return: """ if not region: region = [0, 0, 0, 0] if not algorithm: algorithm_type, threshold, max_val = [0, 0, 0] else: algorithm_type, threshold, max_val = algorithm if algorithm_type in (5, 6): threshold = 127 max_val = 255 response = self.__send_data("ocrByFile", image_path, *region, algorithm_type, threshold, max_val) if response == "null" or response == "": return [] return self.__parse_ocr(response) def init_ocr_server(self, ip: str, port: int = 9752) -> bool: """ 初始化 OCR 服务 :param ip: :param port: :return: """ return self.__send_data("initOcr", ip, port) == "true" def get_text(self, hwnd_or_image_path: str, region: _Region = None, algorithm: _Algorithm = None, mode: bool = False) -> List[str]: """ 通过 OCR 识别窗口/图片中的文字,返回文字列表 :param hwnd_or_image_path: 窗口句柄或者图片路径; :param region: 识别区域,默认全屏; :param algorithm: 处理图片/屏幕所用算法和参数,默认保存原图; :param mode: 操作模式,后台 true,前台 false, 默认前台操作; :return: .. seealso:: :meth:`save_screenshot`: ``region`` 和 ``algorithm`` 的参数说明 """ if hwnd_or_image_path.isdigit(): # 句柄 text_info_list = self.__ocr_server(hwnd_or_image_path, region, algorithm, mode) else: # 图片 text_info_list = self.__ocr_server_by_file(hwnd_or_image_path, region, algorithm) text_list = [] for text_info in text_info_list: text = text_info[-1][0] text_list.append(text) return text_list def find_text(self, hwnd_or_image_path: str, text: str, region: _Region = None, algorithm: _Algorithm = None, mode: bool = False) -> List[Point]: """ 通过 OCR 识别窗口/图片中的文字,返回文字列表 :param hwnd_or_image_path: 句柄或者图片路径 :param text: 要查找的文字 :param region: 识别区域,默认全屏 :param algorithm: 处理图片/屏幕所用算法和参数,默认保存原图 :param mode: 操作模式,后台 true,前台 false, 默认前台操作 :return: 文字列表 .. seealso:: :meth:`save_screenshot`: ``region`` 和 ``algorithm`` 的参数说明 """ if not region: region = [0, 0, 0, 0] if hwnd_or_image_path.isdigit(): # 句柄 text_info_list = self.__ocr_server(hwnd_or_image_path, region, algorithm, mode) else: # 图片 text_info_list = self.__ocr_server_by_file(hwnd_or_image_path, region, algorithm) text_points = [] for text_info in text_info_list: if text in text_info[-1][0]: points, words_tuple = text_info left, _, right, _ = points # 文本区域起点坐标 start_x = left[0] start_y = left[1] # 文本区域终点坐标 end_x = right[0] end_y = right[1] # 文本区域中心点据左上角的偏移量 # 可能指定文本只是部分文本,要计算出实际位置(x轴) width = end_x - start_x height = end_y - start_y words: str = words_tuple[0] # 单字符宽度 single_word_width = width / len(words) # 文本在整体文本的起始位置 pos = words.find(text) offset_x = single_word_width * (pos + len(text) / 2) offset_y = height / 2 # 计算文本区域中心坐标 text_point = Point( x=float(region[0] + start_x + offset_x), y=float(region[1] + start_y + offset_y), ) text_points.append(text_point) return text_points # ############## # 元素操作 # # ############## def get_element_name(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) \ -> Optional[str]: """ 获取元素名称 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: 元素名称字符串或 None """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("getElementName", hwnd, xpath) if response == "null": time.sleep(interval_time) continue else: return response # 超时 return None def get_element_value(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) \ -> Optional[str]: """ 获取元素文本 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: 元素文本字符串或 None """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("getElementValue", hwnd, xpath) if response == "null": time.sleep(interval_time) continue else: return response # 超时 return None def get_element_rect(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) \ -> Optional[Tuple[Point, Point]]: """ 获取元素矩形,返回左上和右下坐标 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: 左上和右下坐标 :rtype: Optional[Tuple[Point, Point]] """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("getElementRect", hwnd, xpath) if response == "-1|-1|-1|-1": time.sleep(interval_time) continue else: x1, y1, x2, y2 = response.split("|") return Point(x=float(x1), y=float(y1)), Point(x=float(x2), y=float(y2)) # 超时 return None def get_element_window(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) \ -> Optional[str]: """ 获取元素窗口句柄 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: 元素窗口句柄字符串或 None """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data("getElementWindow", hwnd, xpath) if response == "null": time.sleep(interval_time) continue else: return response # 超时 return None def click_element(self, hwnd: str, xpath: str, typ: int, wait_time: float = None, interval_time: float = None) -> bool: """ 点击元素 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param typ: 操作类型,单击左键:1 单击右键:2 按下左键:3 弹起左键:4 按下右键:5 弹起右键:6 双击左键:7 双击右键:8 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('clickElement', hwnd, xpath, typ) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def invoke_element(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) -> bool: """ 执行元素默认操作(一般是点击操作) :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('invokeElement', hwnd, xpath) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def set_element_focus(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) -> bool: """ 设置元素作为焦点 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('setElementFocus', hwnd, xpath) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def set_element_value(self, hwnd: str, xpath: str, value: str, wait_time: float = None, interval_time: float = None) -> bool: """ 设置元素文本 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param value: 要设置的内容 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('setElementValue', hwnd, xpath, value) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def scroll_element(self, hwnd: str, xpath: str, horizontal: int, vertical: int, wait_time: float = None, interval_time: float = None) -> bool: """ 滚动元素 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param horizontal: 水平百分比 -1不滚动 :param vertical: 垂直百分比 -1不滚动 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('setElementScroll', hwnd, xpath, horizontal, vertical) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def is_selected(self, hwnd: str, xpath: str, wait_time: float = None, interval_time: float = None) -> bool: """ 单/复选框是否选中 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param wait_time: 等待时间,默认取 self.wait_timeout; :param interval_time: 轮询间隔时间,默认取 self.interval_timeout; :return: """ if wait_time is None: wait_time = self.wait_timeout if interval_time is None: interval_time = self.interval_timeout end_time = time.time() + wait_time while time.time() < end_time: response = self.__send_data('isSelected', hwnd, xpath) if response == "false": time.sleep(interval_time) continue else: return True # 超时 return False def close_window(self, hwnd: str, xpath: str) -> bool: """ 关闭窗口 :param hwnd: 窗口句柄 :param xpath: 元素路径 :return: """ return self.__send_data('closeWindow', hwnd, xpath) == 'true' def set_element_state(self, hwnd: str, xpath: str, state: str) -> bool: """ 设置窗口状态 :param hwnd: 窗口句柄 :param xpath: 元素路径 :param state: 0正常 1最大化 2 最小化 :return: """ return self.__send_data('setWindowState', hwnd, xpath, state) == 'true' # ############### # 系统剪切板 # # ############### def set_clipboard_text(self, text: str) -> bool: """ 设置剪切板内容 :param text: 要设置的内容 :return: """ return self.__send_data("setClipboardText", text) == 'true' def get_clipboard_text(self) -> str: """ 设置剪切板内容 :return: """ return self.__send_data("getClipboardText") # ############# # 启动进程 # # ############# def start_process(self, cmd: str, show_window=True, is_wait=False) -> bool: """ 执行cmd命令 :param cmd: 命令 :param show_window: 是否显示窗口,默认显示 :param is_wait: 是否等待程序结束, 默认不等待 :return: """ return self.__send_data("startProcess", cmd, show_window, is_wait) == "true" def execute_command(self, command: str, waitTimeout: int = 300) -> str: """ 执行cmd命令 :param command: cmd命令,不能含 "cmd"字串 :param waitTimeout: 可选参数,等待结果返回超时,单位毫秒,默认300毫秒 :return: cmd执行结果 """ return self.__send_data("executeCommand", command, waitTimeout) def download_file(self, url: str, file_path: str, is_wait: bool) -> bool: """ 下载文件 :param url: 文件地址 :param file_path: 文件保存的路径 :param is_wait: 是否等待下载完成 :return: """ return self.__send_data("downloadFile", url, file_path, is_wait) == "true" # ############# # EXCEL操作 # # ############# def open_excel(self, excel_path: str) -> Optional[dict]: """ 打开excel文档 :param excel_path: excle路径 :return: excel对象或者None """ response = self.__send_data("openExcel", excel_path) if response == "null": return None return json.loads(response) def open_excel_sheet(self, excel_object: dict, sheet_name: str) -> Optional[dict]: """ 打开excel表格 :param excel_object: excel对象 :param sheet_name: 表名 :return: sheet对象或者None """ response = self.__send_data("openExcelSheet", excel_object['book'], excel_object['path'], sheet_name) if response == "null": return None return response def save_excel(self, excel_object: dict) -> bool: """ 保存excel文档 :param excel_object: excel对象 :return: True或者False """ return self.__send_data("saveExcel", excel_object['book'], excel_object['path']) == "true" def write_excel_num(self, excel_object: dict, row: int, col: int, value: int) -> bool: """ 写入数字到excel表格 :param excel_object: excel对象 :param row: 行 :param col: 列 :param value: 写入的值 :return: True或者False """ return self.__send_data("writeExcelNum", excel_object, row, col, value) == "true" def write_excel_str(self, excel_object: dict, row: int, col: int, str_value: str) -> bool: """ 写入字符串到excel表格 :param excel_object: excel对象 :param row: 行 :param col: 列 :param str_value: 写入的值 :return: True或者False """ return self.__send_data("writeExcelStr", excel_object, row, col, str_value) == "true" def read_excel_num(self, excel_object: dict, row: int, col: int) -> int: """ 读取excel表格数字 :param excel_object: excel对象 :param row: 行 :param col: 列 :return: 读取到的数字 """ response = self.__send_data("readExcelNum", excel_object, row, col) return float(response) def read_excel_str(self, excel_object: dict, row: int, col: int) -> str: """ 读取excel表格字符串 :param excel_object: excel对象 :param row: 行 :param col: 列 :return: 读取到的字符 """ return self.__send_data("readExcelStr", excel_object, row, col) def remove_excel_row(self, excel_object: dict, row_first: int, row_last: int) -> bool: """ 删除excel表格行 :param excel_object: excel对象 :param row_first: 起始行 :param row_last: 结束行 :return: True或者False """ return self.__send_data("removeExcelRow", excel_object, row_first, row_last) == "true" def remove_excel_col(self, excel_object: dict, col_first: int, col_last: int) -> bool: """ 删除excel表格列 :param excel_object: excel对象 :param col_first: 起始列 :param col_last: 结束列 :return: True或者False """ return self.__send_data("removeExcelCol", excel_object, col_first, col_last) == "true" # ########## # 验证码 # ############ def get_captcha(self, file_path: str, username: str, password: str, soft_id: str, code_type: str, len_min: str = '0') -> Optional[dict]: """ 识别验证码 :param file_path: 图片文件路径 :param username: 用户名 :param password: 密码 :param soft_id: 软件ID :param code_type: 图片类型 参考https://www.chaojiying.com/price.html :param len_min: 最小位数 默认0为不启用,图片类型为可变位长时可启用这个参数 :return: JSON err_no,(数值) 返回代码 为0 表示正常,错误代码 参考https://www.chaojiying.com/api-23.html err_str,(字符串) 中文描述的返回信息 pic_id,(字符串) 图片标识号,或图片id号 pic_str,(字符串) 识别出的结果 md5,(字符串) md5校验值,用来校验此条数据返回是否真实有效 """ file = open(file_path, mode="rb") file_data = file.read() file_base64 = base64.b64encode(file_data) file.close() url = "http://upload.chaojiying.net/Upload/Processing.php" data = { 'user': username, 'pass': password, 'softid': soft_id, 'codetype': code_type, 'len_min': len_min, 'file_base64': file_base64 } headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0', 'Content-Type': 'application/x-www-form-urlencoded' } parseData = parse.urlencode(data).encode('utf8') req = request.Request(url, parseData, headers) response = request.urlopen(req) result = response.read().decode() return json.loads(result) def error_captcha(self, username: str, password: str, soft_id: str, pic_id: str) -> Optional[dict]: """ 识别报错返分 :param username: 用户名 :param password: 密码 :param soft_id: 软件ID :param pic_id: 图片ID 对应 getCaptcha返回值的pic_id 字段 :return: JSON err_no,(数值) 返回代码 err_str,(字符串) 中文描述的返回信息 """ url = "http://upload.chaojiying.net/Upload/ReportError.php" data = { 'user': username, 'pass': password, 'softid': soft_id, 'id': pic_id, } headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0', 'Content-Type': 'application/x-www-form-urlencoded' } parseData = parse.urlencode(data).encode('utf8') req = request.Request(url, parseData, headers) response = request.urlopen(req) result = response.read().decode() return json.loads(result) def score_captcha(self, username: str, password: str) -> Optional[dict]: """ 查询验证码剩余题分 :param username: 用户名 :param password: 密码 :return: JSON err_no,(数值) 返回代码 err_str,(字符串) 中文描述的返回信息 tifen,(数值) 题分 tifen_lock,(数值) 锁定题分 """ url = "http://upload.chaojiying.net/Upload/GetScore.php" data = { 'user': username, 'pass': password, } headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0', 'Content-Type': 'application/x-www-form-urlencoded' } parseData = parse.urlencode(data).encode('utf8') req = request.Request(url, parseData, headers) response = request.urlopen(req) result = response.read().decode() return json.loads(result) # ############# # 语音服务 # # ############# def activate_speech_service(self, activate_key: str) -> bool: """ 激活 initSpeechService (不支持win7) :param activate_key: 激活密钥,联系管理员 :return: True或者False """ return self.__send_data("activateSpeechService", activate_key) == "true" def init_speech_service(self, speech_key: str, speech_region: str) -> bool: """ 初始化语音服务(不支持win7),需要调用 activateSpeechService 激活 :param speech_key: 密钥 :param speech_region: 区域 :return: True或者False """ return self.__send_data("initSpeechService", speech_key, speech_region) == "true" def audio_file_to_text(self, file_path, language: str) -> Optional[str]: """ 音频文件转文本 :param file_path: 音频文件路径 :param language: 语言,参考开发文档 语言和发音人 :return: 转换后的音频文本或者None """ response = self.__send_data("audioFileToText", file_path, language) if response == "null": return None return response def microphone_to_text(self, language: str) -> Optional[str]: """ 麦克风输入流转换文本 :param language: 语言,参考开发文档 语言和发音人 :return: 转换后的音频文本或者None """ response = self.__send_data("microphoneToText", language) if response == "null": return None return response def text_to_bullhorn(self, ssmlPath_or_text: str, language: str, voice_name: str) -> bool: """ 文本合成音频到扬声器 :param ssmlPath_or_text: 要转换语音的文本或者".xml"格式文件路径 :param language: 语言,参考开发文档 语言和发音人 :param voice_name: 发音人,参考开发文档 语言和发音人 :return: True或者False """ return self.__send_data("textToBullhorn", ssmlPath_or_text, language, voice_name) == "true" def text_to_audio_file(self, ssmlPath_or_text: str, language: str, voice_name: str, audio_path: str) -> bool: """ 文本合成音频并保存到文件 :param ssmlPath_or_text: 要转换语音的文本或者".xml"格式文件路径 :param language: 语言,参考开发文档 语言和发音人 :param voice_name: 发音人,参考开发文档 语言和发音人 :param audio_path: 保存音频文件路径 :return: True或者False """ return self.__send_data("textToAudioFile", ssmlPath_or_text, language, voice_name, audio_path) == "true" def microphone_translation_text(self, source_language: str, target_language: str) -> Optional[str]: """ 麦克风输入流转换文本 :param source_language: 要翻译的语言,参考开发文档 语言和发音人 :param target_language: 翻译后的语言,参考开发文档 语言和发音人 :return: 转换后的音频文本或者None """ response = self.__send_data("microphoneTranslationText", source_language, target_language) if response == "null": return None return response def audio_file_translation_text(self, audio_path: str, source_language: str, target_language: str) -> Optional[str]: """ 麦克风输入流转换文本 :param audio_path: 要翻译的音频文件路径 :param source_language: 要翻译的语言,参考开发文档 语言和发音人 :param target_language: 翻译后的语言,参考开发文档 语言和发音人 :return: 转换后的音频文本或者None """ response = self.__send_data("audioFileTranslationText", audio_path, source_language, target_language) if response == "null": return None return response # ############# # 数字人 # # ############# def init_metahuman(self, metahuman_mde_path: str, metahuman_scale_value: str, is_update_metahuman: bool = False) -> bool: """ 初始化数字人,第一次初始化需要一些时间 :param metahuman_mde_path: 数字人模型路径 :param metahuman_scale_value: 数字人缩放倍数,1为原始大小。为0.5时放大一倍,2则缩小一半 :param is_update_metahuman: 是否强制更新,默认fasle。为true时强制更新会拖慢初始化速度 :return: True或者False """ return self.__send_data("initMetahuman", metahuman_mde_path, metahuman_scale_value, is_update_metahuman) == "true" def metahuman_speech(self, save_voice_folder: str, text: str, language: str, voice_name: str, quality: int = 0, wait_play_sound: bool = True, speech_rate: int = 0, voice_style: str = "General") -> bool: """ 数字人说话,此函数需要调用 initSpeechService 初始化语音服务 :param save_voice_folder: 保存的发音文件目录,文件名以0开始依次增加,扩展为.wav格式 :param text: 要转换语音的文本 :param language: 语言,参考开发文档 语言和发音人 :param voice_name: 发音人,参考开发文档 语言和发音人 :param quality: 音质,0低品质 1中品质 2高品质, 默认为0低品质 :param wait_play_sound: 等待音频播报完毕,默认为 true等待 :param speech_rate: 语速,默认为0,取值范围 -100 至 200 :param voice_style: 语音风格,默认General常规风格,其他风格参考开发文档 语言和发音人 :return: True或者False """ return self.__send_data("metahumanSpeech", save_voice_folder, text, language, voice_name, quality, wait_play_sound, speech_rate, voice_style) == "true" def metahuman_speech_cache(self, save_voice_folder: str, text: str, language: str, voice_name: str, quality: int = 0, wait_play_sound: bool = True, speech_rate: int = 0, voice_style: str = "General") -> bool: """ *数字人说话缓存模式,需要调用 initSpeechService 初始化语音服务。函数一般用于常用的话术播报,非常用话术切勿使用,否则内存泄漏 :param save_voice_folder: 保存的发音文件目录,文件名以0开始依次增加,扩展为.wav格式 :param text: 要转换语音的文本 :param language: 语言,参考开发文档 语言和发音人 :param voice_name: 发音人,参考开发文档 语言和发音人 :param quality: 音质,0低品质 1中品质 2高品质, 默认为0低品质 :param wait_play_sound: 等待音频播报完毕,默认为 true等待 :param speech_rate: 语速,默认为0,取值范围 -100 至 200 :param voice_style: 语音风格,默认General常规风格,其他风格参考开发文档 语言和发音人 :return: True或者False """ return self.__send_data("metahumanSpeechCache", save_voice_folder, text, language, voice_name, quality, wait_play_sound, speech_rate, voice_style) == "true" def metahuman_insert_video(self, video_file_path: str, audio_file_path: str, wait_play_video: bool = True) -> bool: """ 数字人插入视频 :param video_file_path: 插入的视频文件路径 :param audio_file_path: 插入的音频文件路径 :param wait_play_video: 等待视频播放完毕,默认为 true等待 :return: True或者False """ return self.__send_data("metahumanInsertVideo", video_file_path, audio_file_path, wait_play_video) == "true" def replace_background(self, bg_file_path: str, replace_red: int = -1, replace_green: int = -1, replace_blue: int = -1, sim_value: int = 0) -> bool: """ 替换数字人背景 :param bg_file_path: 数字人背景 图片/视频 路径,默认不替换背景。仅替换绿幕背景的数字人模型 :param replace_red: 数字人背景的三通道之一的 R通道色值。默认-1 自动提取 :param replace_green: 数字人背景的三通道之一的 G通道色值。默认-1 自动提取 :param replace_blue: 数字人背景的三通道之一的 B通道色值。默认-1 自动提取 :param sim_value: 相似度。 默认为0,取值应当大于等于0 :return: True或者False """ return self.__send_data("replaceBackground", bg_file_path, replace_red, replace_green, replace_blue, sim_value) == "true" def show_speech_text(self, origin_y: int = 0, font_type: str = "Arial", font_size: int = 30, font_red: int = 128, font_green: int = 255, font_blue: int = 0, italic: bool = False, underline: bool = False) -> bool: """ 显示数字人说话的文本 :param origin_y, 第一个字显示的起始Y坐标点。 默认0 自适应高度 :param font_type, 字体样式,支持操作系统已安装的字体。例如"Arial"、"微软雅黑"、"楷体" :param font_size, 字体的大小。默认30 :param font_red, 字体颜色三通道之一的 R通道色值。默认128 :param font_green, 字体颜色三通道之一的 G通道色值。默认255 :param font_blue, 字体颜色三通道之一的 B通道色值。默认0 :param italic, 是否斜体,默认false :param underline, 是否有下划线,默认false :return: True或者False """ return self.__send_data("showSpeechText", origin_y, font_type, font_size, font_red, font_green, font_blue, italic, underline) == "true" ################# # 驱动程序相关 # ################# def get_extend_param(self) -> Optional[str]: """ 获取WindowsDriver.exe 命令扩展参数 :return: WindowsDriver 驱动程序的命令行["extendParam"] 字段的参数 """ return self.__send_data("getExtendParam") def close_driver(self) -> bool: """ 关闭WindowsDriver.exe驱动程序 :return: """ self.__send_data("closeDriver") return # ########## # 其他 # ############ def handle(self) -> None: # 设置阻塞模式 # self.request.setblocking(False) # 设置缓冲区 # self.request.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65535) self.request.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 1024) # 发送缓冲区 10M # 执行脚本 self.script_main() @abc.abstractmethod def script_main(self): """脚本入口,由子类重写 """ @classmethod def execute(cls, listen_port: int, local: bool = True): """ 多线程启动 Socket 服务 :param listen_port: 脚本监听的端口 :param local: 脚本是否部署在本地 :return: """ if listen_port < 0 or listen_port > 65535: raise OSError("`listen_port` must be in 0-65535.") print("启动服务...") # 获取 IPv4 可用地址 address_info = socket.getaddrinfo(None, listen_port, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[ 0] *_, socket_address = address_info # 如果是本地部署,则自动启动 WindowsDriver.exe if local: try: print("尝试本地启动 WindowsDriver ...") subprocess.Popen(["WindowsDriver.exe", "127.0.0.1", str(listen_port)]) print("本地启动 WindowsDriver 成功,开始执行脚本") except FileNotFoundError as e: err_msg = "\n异常排除步骤:\n1. 检查 Aibote.exe 路径是否存在中文;\n2. 是否启动 Aibote.exe 初始化环境变量;\n3. 检查电脑环境变量是否初始化成功,环境变量中是否存在 %Aibote% 开头的;\n4. 首次初始化环境变量后,是否重启开发工具;\n5. 是否以管理员权限启动开发工具;\n" print("\033[92m", err_msg, "\033[0m") raise e else: print("等待驱动连接...") # 启动 Socket 服务 sock = _ThreadingTCPServer(socket_address, cls, bind_and_activate=True) sock.serve_forever()
PypiClean
/Mezzanine-6.0.0.tar.gz/Mezzanine-6.0.0/README.rst
.. image:: https://img.shields.io/pypi/v/mezzanine.svg :target: https://pypi.org/project/mezzanine/ .. image:: https://img.shields.io/pypi/pyversions/mezzanine.svg :target: https://pypi.org/project/mezzanine/ .. image:: https://img.shields.io/pypi/djversions/mezzanine.svg :target: https://pypi.org/project/mezzanine/ .. image:: https://github.com/stephenmcd/mezzanine/workflows/Test%20and%20release/badge.svg :target: https://github.com/stephenmcd/mezzanine/actions?query=workflow%3A%22Test+and+release%22 .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/psf/black Created by `Stephen McDonald <http://twitter.com/stephen_mcd>`_ ======== Overview ======== Mezzanine is a powerful, consistent, and flexible content management platform. Built using the `Django`_ framework, Mezzanine provides a simple yet highly extensible architecture that encourages diving in and hacking on the code. Mezzanine is `BSD licensed`_ and supported by a diverse and active community. In some ways, Mezzanine resembles tools such as `Wordpress`_, providing an intuitive interface for managing pages, blog posts, form data, store products, and other types of content. But Mezzanine is also different. Unlike many other platforms that make extensive use of modules or reusable applications, Mezzanine provides most of its functionality by default. This approach yields a more integrated and efficient platform. Visit the `Mezzanine project page`_ to see some of the `great sites people have built using Mezzanine`_. Features ======== In addition to the usual features provided by Django such as MVC architecture, ORM, templating, caching and an automatic admin interface, Mezzanine provides the following: * Hierarchical page navigation * Save as draft and preview on site * Scheduled publishing * Drag-and-drop page ordering * WYSIWYG editing * `In-line page editing`_ * Drag-and-drop HTML5 forms builder with CSV export * SEO friendly URLs and meta data * Ecommerce / Shopping cart module (`Cartridge`_) * Configurable `dashboard`_ widgets * Blog engine * Tagging * `Free Themes`_ Marketplace * User accounts and profiles with email verification * Translated to over 35 languages * Sharing via Facebook or Twitter * `Multi-lingual sites`_ * `Custom templates`_ per page or blog post * `Twitter Bootstrap`_ integration * API for `custom content types`_ * `Search engine and API`_ * Seamless integration with third-party Django apps * One step migration from other blogging engines * `Disqus`_ integration, or built-in threaded comments * `Gravatar`_ integration * `Google Analytics`_ integration * `bit.ly`_ integration * `Akismet`_ spam filtering * `JVM`_ compatible (via `Jython`_) The Mezzanine admin dashboard: .. image:: http://mezzanine.jupo.org/docs/_images/dashboard.png Support ======= To **report a security issue**, please send an email privately to `core-team@mezzaninecms.com`_. This gives us a chance to fix the issue and create an official release prior to the issue being made public. For **all other Mezzanine support**, the primary channel is the `mezzanine-users`_ mailing list. Questions, comments, issues, feature requests, and all other related discussions should take place here. If you're **certain** you've come across a bug, then please use the `GitHub issue tracker`_, however it's crucial that enough information is provided to reproduce the bug, ideally with a small code sample repo we can simply fork, run, and see the issue with. Other useful information includes things such as the Python stack trace generated by error pages, as well as other aspects of the development environment used, such as operating system, database, and Python version. If **you're not sure you've found a reproducible bug**, then please try the mailing list first. Finally, feel free to drop by the `#mezzanine IRC channel`_ on `Freenode`_, for a chat! Lastly, communications in all Mezzanine spaces are expected to conform to the `Django Code of Conduct`_. Contributing ============ Mezzanine is an open source project managed using both the Git and Mercurial version control systems. These repositories are hosted on both `GitHub`_ and `Bitbucket`_ respectively, so contributing is as easy as forking the project on either of these sites and committing back your enhancements. Donating ======== If you would like to make a donation to continue development of Mezzanine, you can do so via the `Mezzanine Project`_ website. Quotes ====== * "I'm enjoying working with Mezzanine, it's good work" - `Van Lindberg`_, `Python Software Foundation`_ chairman * "Mezzanine looks like it may be Django's killer app" - `Antonio Rodriguez`_, ex CTO of `Hewlett Packard`_, founder of `Tabblo`_ * "Mezzanine looks pretty interesting, tempting to get me off Wordpress" - `Jesse Noller`_, Python core contributor, `Python Software Foundation`_ board member * "I think I'm your newest fan. Love these frameworks" - `Emile Petrone`_, integrations engineer at `Urban Airship`_ * "Mezzanine is amazing" - `Audrey Roy`_, founder of `PyLadies`_ and `Django Packages`_ * "Mezzanine convinced me to switch from the Ruby world over to Python" - `Michael Delaney`_, developer * "Like Linux and Python, Mezzanine just feels right" - `Phil Hughes`_, Linux For Dummies author, `The Linux Journal`_ columnist * "Impressed with Mezzanine so far" - `Brad Montgomery`_, founder of `Work For Pie`_ * "From the moment I installed Mezzanine, I have been delighted, both with the initial experience and the community involved in its development" - `John Campbell`_, founder of `Head3 Interactive`_ * "You need to check out the open source project Mezzanine. In one word: Elegant" - `Nick Hagianis`_, developer .. _`Django`: http://djangoproject.com/ .. _`Django Code of Conduct`: https://www.djangoproject.com/conduct/ .. _`Wordpress`: http://wordpress.org/ .. _`BSD licensed`: http://www.linfo.org/bsdlicense.html .. _`great sites people have built using Mezzanine`: http://mezzanine.jupo.org/sites/ .. _`Mezzanine project page`: http://mezzanine.jupo.org .. _`In-line page editing`: http://mezzanine.jupo.org/docs/inline-editing.html .. _`custom content types`: http://mezzanine.jupo.org/docs/content-architecture.html#creating-custom-content-types .. _`Cartridge`: http://cartridge.jupo.org/ .. _`Search engine and API`: http://mezzanine.jupo.org/docs/search-engine.html .. _`dashboard`: http://mezzanine.jupo.org/docs/admin-customization.html#dashboard .. _`Free Themes`: https://github.com/thecodinghouse/mezzanine-themes .. _`Custom templates`: http://mezzanine.jupo.org/docs/content-architecture.html#page-templates .. _`Multi-lingual sites`: http://mezzanine.jupo.org/docs/multi-lingual-sites.html .. _`JVM`: http://en.wikipedia.org/wiki/Java_virtual_machine .. _`Jython`: http://www.jython.org/ .. _`Twitter Bootstrap`: http://getbootstrap.com/ .. _`Disqus`: http://disqus.com/ .. _`Gravatar`: http://gravatar.com/ .. _`Google Analytics`: http://www.google.com/analytics/ .. _`bit.ly`: http://bit.ly/ .. _`Akismet`: http://akismet.com/ .. _`GitHub`: http://github.com/stephenmcd/mezzanine/ .. _`Bitbucket`: http://bitbucket.org/stephenmcd/mezzanine/ .. _`mezzanine-users`: http://groups.google.com/group/mezzanine-users/topics .. _`core-team@mezzaninecms.com`: mailto:core-team@mezzaninecms.com?subject=Mezzanine+Security+Issue .. _`GitHub issue tracker`: http://github.com/stephenmcd/mezzanine/issues .. _`#mezzanine IRC channel`: irc://irc.freenode.net/mezzanine .. _`Freenode`: http://freenode.net .. _`Mezzanine Project`: http://mezzanine.jupo.org .. _`Python Software Foundation`: http://www.python.org/psf/ .. _`Urban Airship`: http://urbanairship.com/ .. _`Django Packages`: http://djangopackages.com/ .. _`Hewlett Packard`: http://www.hp.com/ .. _`Tabblo`: http://www.tabblo.com/ .. _`The Linux Journal`: http://www.linuxjournal.com .. _`Work For Pie`: http://workforpie.com/ .. _`Van Lindberg`: http://www.lindbergd.info/ .. _`Antonio Rodriguez`: http://an.ton.io/ .. _`Jesse Noller`: http://jessenoller.com/ .. _`Emile Petrone`: https://twitter.com/emilepetrone .. _`Audrey Roy`: http://cartwheelweb.com/ .. _`Michael Delaney`: http://github.com/fusepilot/ .. _`John Campbell`: http://head3.com/ .. _`Phil Hughes`: http://www.linuxjournal.com/blogs/phil-hughes .. _`Nick Hagianis`: http://hagianis.com .. _`Brad Montgomery`: http://blog.bradmontgomery.net .. _`Head3 Interactive`: http://head3.com .. _`PyLadies`: http://www.pyladies.com
PypiClean
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dijit/nls/dijit-all_ca.js
dojo.provide("dijit.nls.dijit-all_ca");dojo.provide("dojo.nls.colors");dojo.nls.colors._built=true;dojo.provide("dojo.nls.colors.ca");dojo.nls.colors.ca={"lightsteelblue":"blau acer clar","orangered":"taronja vermellós","midnightblue":"blau mitjanit","cadetblue":"blau marí","seashell":"petxina marina","slategrey":"gris pissarra","coral":"corall","darkturquoise":"turquesa fosc","antiquewhite":"blanc antic","mediumspringgreen":"verd primavera mitjà","salmon":"salmó","darkgrey":"gris fosc","ivory":"marbre","greenyellow":"verd grogós","mistyrose":"rosa dens","lightsalmon":"salmó clar","silver":"argent","dimgrey":"gris fosc","orange":"taronja","white":"blanc","navajowhite":"blanc Navajo","royalblue":"blau marí intens","deeppink":"rosa profund","lime":"verd llimona","oldlace":"rosa cremós","chartreuse":"Llimona pàl·lid","darkcyan":"cian fosc","yellow":"groc","linen":"lli","olive":"oliva","gold":"daurat","lawngreen":"verd gespa","lightyellow":"groc clar","tan":"tan","darkviolet":"violeta fosc","lightslategrey":"gris pissarra clar","grey":"gris","darkkhaki":"caqui fosc","green":"verd","deepskyblue":"blau cel profund","aqua":"aigua","sienna":"siena","mintcream":"menta pàl·lid","rosybrown":"marró rosat","mediumslateblue":"blau pissarra mitjà","magenta":"magenta","lightseagreen":"verd marí clar","cyan":"cian","olivedrab":"gris oliva","darkgoldenrod":"ocre fosc","slateblue":"blau pissarra","mediumaquamarine":"aiguamarina mitjana","lavender":"lavanda","mediumseagreen":"verd marí mitjà","maroon":"marró vermellós","darkslategray":"gris pissarra fosc","mediumturquoise":"turquesa mitjana","ghostwhite":"blanc fantasma","darkblue":"blau fosc","mediumvioletred":"vermell violeta mitjà","brown":"marró","lightgray":"gris clar","sandybrown":"marró arenós","pink":"rosa","firebrick":"maó refractari","indigo":"índigo","snow":"neu","darkorchid":"orquídia fosc","turquoise":"turquesa","chocolate":"xocolata","springgreen":"verd de primavera","moccasin":"mocassí","navy":"blau marí","lemonchiffon":"groc brisa","teal":"verd blavós","floralwhite":"blanc floral","cornflowerblue":"blau blauet","paleturquoise":"turquesa pàl·lid","purple":"porpra","gainsboro":"gainsboro","plum":"pruna","red":"vermell","blue":"blau","forestgreen":"verd bosc","darkgreen":"verd fosc","honeydew":"rosada de mel","darkseagreen":"verd marí fosc","lightcoral":"corall clar","palevioletred":"vermell porpra pàl·lid","mediumpurple":"porpra mitjana","saddlebrown":"marró mitjà","darkmagenta":"magenta fosc","thistle":"card","whitesmoke":"blanc fumat","wheat":"blat","violet":"violeta","lightskyblue":"blau cel clar","goldenrod":"ocre","mediumblue":"blau mitjà","skyblue":"blau cel","crimson":"carmesí","darksalmon":"salmó fosc","darkred":"vermell fosc","darkslategrey":"gris pissarra fosc","peru":"Perú","lightgrey":"gris clar","lightgoldenrodyellow":"groc ocre clar","blanchedalmond":"ametlla pàl·lid","aliceblue":"blau cian clar","bisque":"crema","slategray":"gris pissarra","palegoldenrod":"ocre pàl·lid","darkorange":"taronja fosc","aquamarine":"aiguamarina","lightgreen":"verd clar","burlywood":"marró arenós","dodgerblue":"blau Dodger","darkgray":"gris fosc","lightcyan":"cian clar","powderblue":"blau grisós","blueviolet":"blau violeta","orchid":"orquídia","dimgray":"gris fosc","beige":"beix","fuchsia":"fúcsia","lavenderblush":"lavanda vermellosa","hotpink":"rosa fúcsia","steelblue":"blau acer","tomato":"tomàquet","lightpink":"rosa clar","limegreen":"verd llimona verda","indianred":"vermell indi","papayawhip":"préssec pastel","lightslategray":"gris pissarra clar","gray":"gris","mediumorchid":"orquídia mitjana","cornsilk":"cru","black":"negre","seagreen":"verd marí","darkslateblue":"blau pissarra fosc","khaki":"caqui","lightblue":"blau clar","palegreen":"verd pàl·lid","azure":"atzur","peachpuff":"préssec","darkolivegreen":"verd oliva fosc","yellowgreen":"verd grogós"};dojo.provide("dijit.nls.loading");dijit.nls.loading._built=true;dojo.provide("dijit.nls.loading.ca");dijit.nls.loading.ca={"loadingState":"S'està carregant...","errorState":"Ens sap greu. S'ha produït un error."};dojo.provide("dijit.nls.common");dijit.nls.common._built=true;dojo.provide("dijit.nls.common.ca");dijit.nls.common.ca={"buttonOk":"D'acord","buttonCancel":"Cancel·la","buttonSave":"Desa","itemClose":"Tanca"};dojo.provide("dijit._editor.nls.commands");dijit._editor.nls.commands._built=true;dojo.provide("dijit._editor.nls.commands.ca");dijit._editor.nls.commands.ca={"removeFormat":"Elimina el format","copy":"Copia","paste":"Enganxa","selectAll":"Selecciona-ho tot","insertOrderedList":"Llista numerada","insertTable":"Insereix/edita la taula","underline":"Subratllat","foreColor":"Color de primer pla","htmlToggle":"Font HTML","formatBlock":"Estil de paràgraf","insertHorizontalRule":"Regle horitzontal","delete":"Suprimeix","insertUnorderedList":"Llista de vinyetes","tableProp":"Propietat de taula","insertImage":"Insereix imatge","superscript":"Superíndex","subscript":"Subíndex","createLink":"Crea un enllaç","undo":"Desfés","italic":"Cursiva","fontName":"Nom del tipus de lletra","justifyLeft":"Alinea a la esquerra","unlink":"Elimina l'enllaç","toggleTableBorder":"Inverteix els contorns de taula","ctrlKey":"control+${0}","fontSize":"Cos de la lletra","systemShortcut":"L'acció \"${0}\" és l'única disponible al navegador utilitzant una drecera del teclat. Utilitzeu ${1}.","indent":"Sagnat","redo":"Refés","strikethrough":"Ratllat","justifyFull":"Justifica","justifyCenter":"Centra","hiliteColor":"Color de fons","deleteTable":"Suprimeix la taula","outdent":"Sagna a l'esquerra","cut":"Retalla","plainFormatBlock":"Estil de paràgraf","toggleDir":"Inverteix la direcció","bold":"Negreta","tabIndent":"Sagnat","justifyRight":"Alinea a la dreta","print":"Print","newPage":"New Page","appleKey":"⌘${0}","fullScreen":"Toggle Full Screen","viewSource":"View HTML Source"};dojo.provide("dojo.cldr.nls.number");dojo.cldr.nls.number._built=true;dojo.provide("dojo.cldr.nls.number.ca");dojo.cldr.nls.number.ca={"group":".","percentSign":"%","exponential":"E","percentFormat":"#,##0%","scientificFormat":"#E0","list":";","infinity":"∞","patternDigit":"#","minusSign":"-","decimal":",","nan":"NaN","nativeZeroDigit":"0","perMille":"‰","decimalFormat":"#,##0.###","currencyFormat":"#,##0.00 ¤","plusSign":"+","currencySpacing-afterCurrency-currencyMatch":"[:letter:]","currencySpacing-beforeCurrency-surroundingMatch":"[:digit:]","currencySpacing-afterCurrency-insertBetween":" ","currencySpacing-afterCurrency-surroundingMatch":"[:digit:]","currencySpacing-beforeCurrency-currencyMatch":"[:letter:]","currencySpacing-beforeCurrency-insertBetween":" "};dojo.provide("dijit.form.nls.validate");dijit.form.nls.validate._built=true;dojo.provide("dijit.form.nls.validate.ca");dijit.form.nls.validate.ca={"rangeMessage":"Aquest valor és fora de l'interval","invalidMessage":"El valor introduït no és vàlid","missingMessage":"Aquest valor és necessari"};dojo.provide("dojo.cldr.nls.currency");dojo.cldr.nls.currency._built=true;dojo.provide("dojo.cldr.nls.currency.ca");dojo.cldr.nls.currency.ca={"HKD_displayName":"dòlar de Hong Kong","CHF_displayName":"franc suís","CAD_displayName":"dòlar canadenc","CNY_displayName":"iuan renmimbi xinès","AUD_displayName":"dòlar australià","JPY_displayName":"ien japonès","USD_displayName":"dòlar dels Estats Units","GBP_displayName":"lliura esterlina britànica","EUR_displayName":"euro","CHF_symbol":"Fr.","JPY_symbol":"JP¥","HKD_symbol":"HK$","USD_symbol":"US$","CAD_symbol":"CA$","EUR_symbol":"€","CNY_symbol":"CN¥","GBP_symbol":"£","AUD_symbol":"AU$"};dojo.provide("dojo.cldr.nls.gregorian");dojo.cldr.nls.gregorian._built=true;dojo.provide("dojo.cldr.nls.gregorian.ca");dojo.cldr.nls.gregorian.ca={"dateFormatItem-yM":"M/yyyy","field-dayperiod":"a.m./p.m.","dateFormatItem-yQ":"Q yyyy","field-minute":"minut","eraNames":["aC","dC"],"dateFormatItem-MMMEd":"E d MMM","field-weekday":"dia de la setmana","dateFormatItem-yQQQ":"QQQ y","days-standAlone-wide":["diumenge","dilluns","dimarts","dimecres","dijous","divendres","dissabte"],"dateFormatItem-MMM":"LLL","months-standAlone-narrow":["g","f","m","a","m","j","j","a","s","o","n","d"],"field-era":"era","field-hour":"hora","quarters-standAlone-abbr":["1T","2T","3T","4T"],"dateFormatItem-y":"y","timeFormat-full":"H:mm:ss zzzz","months-standAlone-abbr":["gen.","febr.","març","abr.","maig","juny","jul.","ag.","set.","oct.","nov.","des."],"dateFormatItem-yMMM":"MMM y","days-standAlone-narrow":["g","l","t","c","j","v","s"],"eraAbbr":["aC","dC"],"dateFormatItem-yyyyMM":"MM/yyyy","dateFormat-long":"d 'de' MMMM 'de' y","timeFormat-medium":"H:mm:ss","dateFormatItem-EEEd":"EEE d","field-zone":"zona","dateFormatItem-Hm":"H:mm","dateFormat-medium":"dd/MM/yyyy","quarters-standAlone-wide":["1r trimestre","2n trimestre","3r trimestre","4t trimestre"],"dateFormatItem-yMMMM":"LLLL 'del' y","dateFormatItem-ms":"mm:ss","field-year":"any","quarters-standAlone-narrow":["1","2","3","4"],"dateFormatItem-HHmmss":"HH:mm:ss","months-standAlone-wide":["gener","febrer","març","abril","maig","juny","juliol","agost","setembre","octubre","novembre","desembre"],"field-week":"setmana","dateFormatItem-MMMMEd":"E d MMMM","dateFormatItem-MMMd":"d MMM","dateFormatItem-yyQ":"Q yy","timeFormat-long":"H:mm:ss z","months-format-abbr":["gen.","febr.","març","abr.","maig","juny","jul.","ag.","set.","oct.","nov.","des."],"timeFormat-short":"H:mm","field-month":"mes","dateFormatItem-MMMMd":"d 'de' MMMM","quarters-format-abbr":["1T","2T","3T","4T"],"days-format-abbr":["dg.","dl.","dt.","dc.","dj.","dv.","ds."],"pm":"p.m.","dateFormatItem-mmss":"mm:ss","dateFormatItem-M":"L","days-format-narrow":["g","l","t","c","j","v","s"],"field-second":"segon","field-day":"dia","dateFormatItem-MEd":"E d/M","months-format-narrow":["g","f","m","a","m","j","j","a","s","o","n","d"],"am":"a.m.","days-standAlone-abbr":["dg","dl","dt","dc","dj","dv","ds"],"dateFormat-short":"dd/MM/yy","dateFormatItem-yMMMEd":"EEE d MMM y","dateFormat-full":"EEEE d 'de' MMMM 'de' y","dateFormatItem-Md":"d/M","dateFormatItem-yMEd":"E d/M/yyyy","months-format-wide":["gener","febrer","març","abril","maig","juny","juliol","agost","setembre","octubre","novembre","desembre"],"dateFormatItem-d":"d","quarters-format-wide":["1r trimestre","2n trimestre","3r trimestre","4t trimestre"],"days-format-wide":["diumenge","dilluns","dimarts","dimecres","dijous","divendres","dissabte"],"eraNarrow":["aC","dC"],"dateTimeFormats-appendItem-Day-Of-Week":"{0} {1}","dateTimeFormat-medium":"{1} {0}","dateTimeFormats-appendItem-Second":"{0} ({2}: {1})","dateTimeFormats-appendItem-Era":"{0} {1}","dateTimeFormats-appendItem-Week":"{0} ({2}: {1})","quarters-format-narrow":["1","2","3","4"],"dateTimeFormat-long":"{1} {0}","dateTimeFormat-full":"{1} {0}","dateTimeFormats-appendItem-Day":"{0} ({2}: {1})","dateFormatItem-hm":"h:mm a","dateTimeFormats-appendItem-Year":"{0} {1}","dateTimeFormats-appendItem-Hour":"{0} ({2}: {1})","dateTimeFormats-appendItem-Quarter":"{0} ({2}: {1})","dateTimeFormats-appendItem-Month":"{0} ({2}: {1})","dateTimeFormats-appendItem-Minute":"{0} ({2}: {1})","dateTimeFormats-appendItem-Timezone":"{0} {1}","dateTimeFormat-short":"{1} {0}","dateFormatItem-Hms":"H:mm:ss","dateFormatItem-hms":"h:mm:ss a"};dojo.provide("dijit.form.nls.ComboBox");dijit.form.nls.ComboBox._built=true;dojo.provide("dijit.form.nls.ComboBox.ca");dijit.form.nls.ComboBox.ca={"previousMessage":"Opcions anteriors","nextMessage":"Més opcions"};
PypiClean
/Flask-Manifest-0.2.0.tar.gz/Flask-Manifest-0.2.0/flask_manifest.py
import os from flask import (json, send_from_directory, current_app, url_for as flask_url_for) __all__ = ['Manifest'] CONFIG_PREFIX = 'MANIFEST_' DEFAULT_CONFIG = { 'MANIFEST_DEBUG': False, 'MANIFEST_ROOT': 'dist', 'MANIFEST_SERVE_ROOT': True, 'MANIFEST_FILENAME': 'rev-manifest.json', 'MANIFEST_ENDPOINT': 'manifest-dist', 'MANIFEST_URL_PATH': '/dist', 'MANIFEST_DOMAIN': None, } FOREVER = 10 * 365 * 24 * 60 * 60 EXTENSION_KEY = 'manifest' def get_state(): return current_app.extensions[EXTENSION_KEY] def send_static_file(filename): return send_from_directory(get_state().root, filename, cache_timeout=FOREVER) def url_for(endpoint, **values): if endpoint == 'static': state = get_state() try: rev_file = state.manifest_contents[values['filename']] except KeyError: pass else: values['filename'] = rev_file if state.serve_root: endpoint = state.endpoint else: # TODO: Handle external domains values['_external'] = True return flask_url_for(endpoint, **values) class ManifestState(object): def __init__(self, app): for key, value in DEFAULT_CONFIG.items(): attr_name = key.replace(CONFIG_PREFIX, '').lower() setattr(self, attr_name, app.config.get(key, value)) self.manifest = {} class Manifest(object): def __init__(self, app=None): self.app = app if self.app is not None: self.init_app(app) def init_app(self, app): state = ManifestState(app) if app.debug and not state.debug: return app.extensions[EXTENSION_KEY] = state path = os.path.join(state.root, state.filename) with app.open_resource(path) as manifest: state.manifest_contents = json.load(manifest) if state.serve_root: app.add_url_rule(state.url_path + '/<path:filename>', endpoint=state.endpoint, view_func=send_static_file) app.jinja_env.globals['url_for'] = url_for
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/mode/fortran/fortran.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; CodeMirror.defineMode("fortran", function() { function words(array) { var keys = {}; for (var i = 0; i < array.length; ++i) { keys[array[i]] = true; } return keys; } var keywords = words([ "abstract", "accept", "allocatable", "allocate", "array", "assign", "asynchronous", "backspace", "bind", "block", "byte", "call", "case", "class", "close", "common", "contains", "continue", "cycle", "data", "deallocate", "decode", "deferred", "dimension", "do", "elemental", "else", "encode", "end", "endif", "entry", "enumerator", "equivalence", "exit", "external", "extrinsic", "final", "forall", "format", "function", "generic", "go", "goto", "if", "implicit", "import", "include", "inquire", "intent", "interface", "intrinsic", "module", "namelist", "non_intrinsic", "non_overridable", "none", "nopass", "nullify", "open", "optional", "options", "parameter", "pass", "pause", "pointer", "print", "private", "program", "protected", "public", "pure", "read", "recursive", "result", "return", "rewind", "save", "select", "sequence", "stop", "subroutine", "target", "then", "to", "type", "use", "value", "volatile", "where", "while", "write"]); var builtins = words(["abort", "abs", "access", "achar", "acos", "adjustl", "adjustr", "aimag", "aint", "alarm", "all", "allocated", "alog", "amax", "amin", "amod", "and", "anint", "any", "asin", "associated", "atan", "besj", "besjn", "besy", "besyn", "bit_size", "btest", "cabs", "ccos", "ceiling", "cexp", "char", "chdir", "chmod", "clog", "cmplx", "command_argument_count", "complex", "conjg", "cos", "cosh", "count", "cpu_time", "cshift", "csin", "csqrt", "ctime", "c_funloc", "c_loc", "c_associated", "c_null_ptr", "c_null_funptr", "c_f_pointer", "c_null_char", "c_alert", "c_backspace", "c_form_feed", "c_new_line", "c_carriage_return", "c_horizontal_tab", "c_vertical_tab", "dabs", "dacos", "dasin", "datan", "date_and_time", "dbesj", "dbesj", "dbesjn", "dbesy", "dbesy", "dbesyn", "dble", "dcos", "dcosh", "ddim", "derf", "derfc", "dexp", "digits", "dim", "dint", "dlog", "dlog", "dmax", "dmin", "dmod", "dnint", "dot_product", "dprod", "dsign", "dsinh", "dsin", "dsqrt", "dtanh", "dtan", "dtime", "eoshift", "epsilon", "erf", "erfc", "etime", "exit", "exp", "exponent", "extends_type_of", "fdate", "fget", "fgetc", "float", "floor", "flush", "fnum", "fputc", "fput", "fraction", "fseek", "fstat", "ftell", "gerror", "getarg", "get_command", "get_command_argument", "get_environment_variable", "getcwd", "getenv", "getgid", "getlog", "getpid", "getuid", "gmtime", "hostnm", "huge", "iabs", "iachar", "iand", "iargc", "ibclr", "ibits", "ibset", "ichar", "idate", "idim", "idint", "idnint", "ieor", "ierrno", "ifix", "imag", "imagpart", "index", "int", "ior", "irand", "isatty", "ishft", "ishftc", "isign", "iso_c_binding", "is_iostat_end", "is_iostat_eor", "itime", "kill", "kind", "lbound", "len", "len_trim", "lge", "lgt", "link", "lle", "llt", "lnblnk", "loc", "log", "logical", "long", "lshift", "lstat", "ltime", "matmul", "max", "maxexponent", "maxloc", "maxval", "mclock", "merge", "move_alloc", "min", "minexponent", "minloc", "minval", "mod", "modulo", "mvbits", "nearest", "new_line", "nint", "not", "or", "pack", "perror", "precision", "present", "product", "radix", "rand", "random_number", "random_seed", "range", "real", "realpart", "rename", "repeat", "reshape", "rrspacing", "rshift", "same_type_as", "scale", "scan", "second", "selected_int_kind", "selected_real_kind", "set_exponent", "shape", "short", "sign", "signal", "sinh", "sin", "sleep", "sngl", "spacing", "spread", "sqrt", "srand", "stat", "sum", "symlnk", "system", "system_clock", "tan", "tanh", "time", "tiny", "transfer", "transpose", "trim", "ttynam", "ubound", "umask", "unlink", "unpack", "verify", "xor", "zabs", "zcos", "zexp", "zlog", "zsin", "zsqrt"]); var dataTypes = words(["c_bool", "c_char", "c_double", "c_double_complex", "c_float", "c_float_complex", "c_funptr", "c_int", "c_int16_t", "c_int32_t", "c_int64_t", "c_int8_t", "c_int_fast16_t", "c_int_fast32_t", "c_int_fast64_t", "c_int_fast8_t", "c_int_least16_t", "c_int_least32_t", "c_int_least64_t", "c_int_least8_t", "c_intmax_t", "c_intptr_t", "c_long", "c_long_double", "c_long_double_complex", "c_long_long", "c_ptr", "c_short", "c_signed_char", "c_size_t", "character", "complex", "double", "integer", "logical", "real"]); var isOperatorChar = /[+\-*&=<>\/\:]/; var litOperator = /^\.(and|or|eq|lt|le|gt|ge|ne|not|eqv|neqv)\./i; function tokenBase(stream, state) { if (stream.match(litOperator)){ return 'operator'; } var ch = stream.next(); if (ch == "!") { stream.skipToEnd(); return "comment"; } if (ch == '"' || ch == "'") { state.tokenize = tokenString(ch); return state.tokenize(stream, state); } if (/[\[\]\(\),]/.test(ch)) { return null; } if (/\d/.test(ch)) { stream.eatWhile(/[\w\.]/); return "number"; } if (isOperatorChar.test(ch)) { stream.eatWhile(isOperatorChar); return "operator"; } stream.eatWhile(/[\w\$_]/); var word = stream.current().toLowerCase(); if (keywords.hasOwnProperty(word)){ return 'keyword'; } if (builtins.hasOwnProperty(word) || dataTypes.hasOwnProperty(word)) { return 'builtin'; } return "variable"; } function tokenString(quote) { return function(stream, state) { var escaped = false, next, end = false; while ((next = stream.next()) != null) { if (next == quote && !escaped) { end = true; break; } escaped = !escaped && next == "\\"; } if (end || !escaped) state.tokenize = null; return "string"; }; } // Interface return { startState: function() { return {tokenize: null}; }, token: function(stream, state) { if (stream.eatSpace()) return null; var style = (state.tokenize || tokenBase)(stream, state); if (style == "comment" || style == "meta") return style; return style; } }; }); CodeMirror.defineMIME("text/x-fortran", "fortran"); });
PypiClean
/DXC-RL-1.0.3.5.tar.gz/DXC-RL-1.0.3.5/dxc/ai/publish_microservice/publish_microservice.py
import Algorithmia from Algorithmia.errors import AlgorithmException import shutil #serializing models import urllib.parse #input data from git import Git, Repo, remote import os import pickle from IPython.display import YouTubeVideo from IPython.core.magic import register_line_cell_magic import urllib.request, json from dxc.ai.global_variables import globals_file from dxc.ai.logging import microservice_logging def publish_microservice(microservice_design, trained_model, verbose = False): #Capture microservice_design in log microservice_logging.microservice_design_log(microservice_design) # create a connection to algorithmia client=Algorithmia.client(microservice_design["api_key"]) api = client.algo(microservice_design["execution_environment_username"] + "/" + microservice_design["microservice_name"]) # create the algorithm if it doesn't exist try: api.create( details = { "label": microservice_design["microservice_name"], }, settings = { "language": "python3-1", "source_visibility": "closed", "license": "apl", "network_access": "full", "pipeline_enabled": True, "environment": "cpu" } ) except Exception as error: print(error) # create data collection if it doesn't exist if not client.dir(microservice_design["model_path"]).exists(): client.dir(microservice_design["model_path"]).create() # define a local work directory local_dir = microservice_design["microservice_name"] # delete local directory if it already exists if os.path.exists(local_dir): shutil.rmtree(local_dir) # create local work directory os.makedirs(local_dir) # serialize the model locally local_model = "{}/{}".format(local_dir, "mdl") # open a file in a specified location file = open(local_model, 'wb') # dump information to that file pickle.dump(trained_model, file) # close the file file.close() # upload our model file to our data collection api_model = "{}/{}".format(microservice_design["model_path"], microservice_design["microservice_name"]) client.file(api_model).putFile(local_model) if globals_file.run_experiment_encoder_used: encode_model = 'encode_file.pkl' encode_output = open(encode_model, 'wb') pickle.dump(globals_file.run_experiment_encoder, encode_output) encode_output.close() encode_folder = microservice_design["microservice_name"] + '_encoder' encode_path = "{}/{}".format(microservice_design["model_path"], encode_folder) client.file(encode_path).putFile(encode_model) if globals_file.run_experiment_target_encoder_used: target_encode_model = 'target_encode_file.pkl' target_encode_output = open(target_encode_model, 'wb') pickle.dump(globals_file.run_experiment_target_encoder, target_encode_output) target_encode_output.close() target_encode_folder = microservice_design["microservice_name"] + '_target_encoder' target_encode_path = "{}/{}".format(microservice_design["model_path"], target_encode_folder) client.file(target_encode_path).putFile(target_encode_model) # encode API key, so we can use it in the git URL encoded_api_key = urllib.parse.quote_plus(microservice_design["api_key"]) algo_repo = "https://{}:{}@git.algorithmia.com/git/{}/{}.git".format( microservice_design["execution_environment_username"], encoded_api_key, microservice_design["execution_environment_username"], microservice_design["microservice_name"] ) class Progress(remote.RemoteProgress): if verbose == False: def line_dropped(self, line): pass def update(self, *args): pass else: def line_dropped(self, line): print(line) def update(self, *args): print(self._cur_line) p = Progress() try: Repo.clone_from(algo_repo, "{}/{}".format(local_dir, microservice_design["microservice_name"]), progress=p) cloned_repo = Repo("{}/{}".format(local_dir, microservice_design["microservice_name"])) except Exception as error: print("here") print(error) api_script_path = "{}/{}/src/{}.py".format(local_dir, microservice_design["microservice_name"], microservice_design["microservice_name"]) dependency_file_path = "{}/{}/{}".format(local_dir, microservice_design["microservice_name"], "requirements.txt") # defines the source for the microservice results = "{'results':prediction}" file_path = "'" + api_model + "'" if globals_file.run_experiment_encoder_used: encodefile_path = "'" + encode_path + "'" if globals_file.run_experiment_target_encoder_used: target_encodefile_path = "'" + target_encode_path + "'" ##Don't change the structure of below docstring ##this is the source code needed for the microservice src_code_content = """import Algorithmia import auto_ml import pandas as pd import pickle # create an Algorithmia client client = Algorithmia.client() def load_model(): # Get file by name # Open file and load model \tfile_path = {file_path} \tmodel_path = client.file(file_path).getFile().name # Open file and load model \twith open(model_path, 'rb') as f: \t\tmodel = pickle.load(f) \t\treturn model trained_model = load_model() def apply(input): \tprediction = trained_model.predict(input) \treturn {results}""" ## source code for customized model src_code_generalized = """import Algorithmia import auto_ml import pandas as pd import pickle import json import numpy as np # create an Algorithmia client client = Algorithmia.client() def load_model(): # Get file by name # Open file and load model \tfile_path = {file_path} \tmodel_path = client.file(file_path).getFile().name # Open file and load model \twith open(model_path, 'rb') as f: \t\tmodel = pickle.load(f) \t\treturn model trained_model = load_model() def default(obj): if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() raise TypeError('Unknown type:', type(obj)) def apply(input): \tprediction = trained_model.predict(input) \tprediction = json.dumps(prediction, default=default) \treturn {results}""" ## source code for generalized tpot model src_code_generalized_encode = """import Algorithmia import auto_ml import pandas as pd import pickle import json import numpy as np import feature_engine # create an Algorithmia client client = Algorithmia.client() def load_model(): # Get file by name # Open file and load model \tfile_path = {file_path} \tmodel_path = client.file(file_path).getFile().name # Open file and load model \twith open(model_path, 'rb') as f: \t\tmodel = pickle.load(f) \t\treturn model trained_model = load_model() def load_encode(): # Get file by name # Open file and load encoder \tencodefile_path = {encodefile_path} \tencode_path = client.file(encodefile_path).getFile().name # Open file and load encoder \twith open(encode_path, 'rb') as f: \t\tencoder = pickle.load(f) \t\treturn encoder encode = load_encode() def default(obj): if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() raise TypeError('Unknown type:', type(obj)) def apply(input): \tinput = pd.DataFrame([input]) \ttry: \t\tinput = encode.transform(input) \texcept: \t\tpass \tprediction = trained_model.predict(input) \tprediction = json.dumps(prediction[0], default=default) \treturn {results}""" ## source code for generalized tpot model src_code_generalized_target_encode = """import Algorithmia import auto_ml import pandas as pd import pickle import json import numpy as np import feature_engine # create an Algorithmia client client = Algorithmia.client() def load_model(): # Get file by name # Open file and load model \tfile_path = {file_path} \tmodel_path = client.file(file_path).getFile().name # Open file and load model \twith open(model_path, 'rb') as f: \t\tmodel = pickle.load(f) \t\treturn model trained_model = load_model() def load_encode(): # Get file by name # Open file and load encoder \tencodefile_path = {encodefile_path} \tencode_path = client.file(encodefile_path).getFile().name # Open file and load encoder \twith open(encode_path, 'rb') as f: \t\tencoder = pickle.load(f) \t\treturn encoder encode = load_encode() def load_target_encode(): # Get file by name # Open file and load target encoder \ttarget_encodefile_path = {target_encodefile_path} \ttarget_encode_path = client.file(target_encodefile_path).getFile().name # Open file and load target encoder \twith open(target_encode_path, 'rb') as f: \t\ttarget_encoder = pickle.load(f) \t\treturn target_encoder target_encode = load_target_encode() def default(obj): if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() raise TypeError('Unknown type:', type(obj)) def apply(input): \tinput = pd.DataFrame([input]) \ttry: \t\tinput = encode.transform(input) \texcept: \t\tpass \tprediction = trained_model.predict(input) \ttry: \t\tprediction = target_encode.inverse_transform(prediction) \t\tprediction = prediction[0] \texcept: \t\tprediction = json.dumps(prediction[0], default=default) \treturn {results}""" if globals_file.run_experiment_used: src_code_content = src_code_generalized if globals_file.run_experiment_encoder_used: src_code_content = src_code_generalized_encode if globals_file.run_experiment_target_encoder_used: src_code_content = src_code_generalized_target_encode splitted=src_code_content.split('\n') ##writes the source into the local, cloned GitHub repository with open(api_script_path, "w") as f: for line in splitted: if line.strip()=="file_path = {file_path}": line="\tfile_path = {}".format(file_path) if line.strip()=="encodefile_path = {encodefile_path}": line="\tencodefile_path = {}".format(encodefile_path) if line.strip()=="target_encodefile_path = {target_encodefile_path}": line="\ttarget_encodefile_path = {}".format(target_encodefile_path) if line.strip()=="return {results}": line="\treturn {}".format(results) f.write(line + '\n') ##Don't change the structure of below docstring ##this is the requirements needed for microservice requirements_file_content="""algorithmia>=1.0.0,<2.0 six auto_ml pandas numpy feature-engine bottleneck==1.2.1""" post_split=requirements_file_content.split('\n') #writes the requirements file into the local, cloned GitHub repository. with open(dependency_file_path, "w") as f: for line in post_split: line = line.lstrip() f.write(line + '\n') # Publish the microservice files = ["src/{}.py".format(microservice_design["microservice_name"]), "requirements.txt"] cloned_repo.index.add(files) cloned_repo.index.commit("Add algorithm files") origin = cloned_repo.remote(name='origin') p = Progress() origin.push(progress=p) # publish/deploy our algorithm client.algo(microservice_design["api_namespace"]).publish() # code generates the api endpoint for the newly published microservice latest_version = client.algo(microservice_design["api_namespace"]).info().version_info.semantic_version api_url = "https://api.algorithmia.com/v1/algo/{}/{}".format(microservice_design["api_namespace"], latest_version) return api_url
PypiClean
/NagAconda-0.2.1.tar.gz/NagAconda-0.2.1/docs/_build/html/_static/doctools.js
* select a different prefix for underscore */ $u = _.noConflict(); /** * make the code below compatible with browsers without * an installed firebug like debugger if (!window.console || !console.firebug) { var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"]; window.console = {}; for (var i = 0; i < names.length; ++i) window.console[names[i]] = function() {}; } */ /** * small helper function to urldecode strings */ jQuery.urldecode = function(x) { return decodeURIComponent(x).replace(/\+/g, ' '); } /** * small helper function to urlencode strings */ jQuery.urlencode = encodeURIComponent; /** * This function returns the parsed url parameters of the * current request. Multiple values per key are supported, * it will always return arrays of strings for the value parts. */ jQuery.getQueryParameters = function(s) { if (typeof s == 'undefined') s = document.location.search; var parts = s.substr(s.indexOf('?') + 1).split('&'); var result = {}; for (var i = 0; i < parts.length; i++) { var tmp = parts[i].split('=', 2); var key = jQuery.urldecode(tmp[0]); var value = jQuery.urldecode(tmp[1]); if (key in result) result[key].push(value); else result[key] = [value]; } return result; }; /** * small function to check if an array contains * a given item. */ jQuery.contains = function(arr, item) { for (var i = 0; i < arr.length; i++) { if (arr[i] == item) return true; } return false; }; /** * highlight a given string on a jquery object by wrapping it in * span elements with the given class name. */ jQuery.fn.highlightText = function(text, className) { function highlight(node) { if (node.nodeType == 3) { var val = node.nodeValue; var pos = val.toLowerCase().indexOf(text); if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { var span = document.createElement("span"); span.className = className; span.appendChild(document.createTextNode(val.substr(pos, text.length))); node.parentNode.insertBefore(span, node.parentNode.insertBefore( document.createTextNode(val.substr(pos + text.length)), node.nextSibling)); node.nodeValue = val.substr(0, pos); } } else if (!jQuery(node).is("button, select, textarea")) { jQuery.each(node.childNodes, function() { highlight(this); }); } } return this.each(function() { highlight(this); }); }; /** * Small JavaScript module for the documentation. */ var Documentation = { init : function() { this.fixFirefoxAnchorBug(); this.highlightSearchWords(); this.initIndexTable(); }, /** * i18n support */ TRANSLATIONS : {}, PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, LOCALE : 'unknown', // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) gettext : function(string) { var translated = Documentation.TRANSLATIONS[string]; if (typeof translated == 'undefined') return string; return (typeof translated == 'string') ? translated : translated[0]; }, ngettext : function(singular, plural, n) { var translated = Documentation.TRANSLATIONS[singular]; if (typeof translated == 'undefined') return (n == 1) ? singular : plural; return translated[Documentation.PLURALEXPR(n)]; }, addTranslations : function(catalog) { for (var key in catalog.messages) this.TRANSLATIONS[key] = catalog.messages[key]; this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); this.LOCALE = catalog.locale; }, /** * add context elements like header anchor links */ addContextElements : function() { $('div[id] > :header:first').each(function() { $('<a class="headerlink">\u00B6</a>'). attr('href', '#' + this.id). attr('title', _('Permalink to this headline')). appendTo(this); }); $('dt[id]').each(function() { $('<a class="headerlink">\u00B6</a>'). attr('href', '#' + this.id). attr('title', _('Permalink to this definition')). appendTo(this); }); }, /** * workaround a firefox stupidity */ fixFirefoxAnchorBug : function() { if (document.location.hash && $.browser.mozilla) window.setTimeout(function() { document.location.href += ''; }, 10); }, /** * highlight the search words provided in the url in the text */ highlightSearchWords : function() { var params = $.getQueryParameters(); var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; if (terms.length) { var body = $('div.body'); window.setTimeout(function() { $.each(terms, function() { body.highlightText(this.toLowerCase(), 'highlighted'); }); }, 10); $('<p class="highlight-link"><a href="javascript:Documentation.' + 'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>') .appendTo($('#searchbox')); } }, /** * init the domain index toggle buttons */ initIndexTable : function() { var togglers = $('img.toggler').click(function() { var src = $(this).attr('src'); var idnum = $(this).attr('id').substr(7); $('tr.cg-' + idnum).toggle(); if (src.substr(-9) == 'minus.png') $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); else $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); }).css('display', ''); if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { togglers.click(); } }, /** * helper function to hide the search marks again */ hideSearchWords : function() { $('#searchbox .highlight-link').fadeOut(300); $('span.highlighted').removeClass('highlighted'); }, /** * make the url absolute */ makeURL : function(relativeURL) { return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; }, /** * get the current relative url */ getCurrentURL : function() { var path = document.location.pathname; var parts = path.split(/\//); $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { if (this == '..') parts.pop(); }); var url = parts.join('/'); return path.substring(url.lastIndexOf('/') + 1, path.length - 1); } }; // quick alias for translations _ = Documentation.gettext; $(document).ready(function() { Documentation.init(); });
PypiClean
/GlobalPayments.Api-1.0.9.tar.gz/GlobalPayments.Api-1.0.9/LICENSE.md
The GNU General Public License, Version 2, June 1991 (GPLv2) ============================================================ > Copyright (C) 1989, 1991 Free Software Foundation, Inc. > 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble -------- The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. Terms And Conditions For Copying, Distribution And Modification --------------------------------------------------------------- **0.** This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. **1.** You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. **2.** You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: * **a)** You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. * **b)** You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. * **c)** If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. **3.** You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: * **a)** Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, * **b)** Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, * **c)** Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. **4.** You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. **5.** You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. **6.** Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. **7.** If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. **8.** If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. **9.** The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. **10.** If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. No Warranty ----------- **11.** BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. **12.** IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
PypiClean
/NESTML-5.3.0-py3-none-any.whl/NESTML-5.3.0.data/data/doc/models_library/iaf_cond_beta.rst
iaf_cond_beta ############# iaf_cond_beta - Simple conductance based leaky integrate-and-fire neuron model Description +++++++++++ iaf_cond_beta is an implementation of a spiking neuron using IAF dynamics with conductance-based synapses. Incoming spike events induce a post-synaptic change of conductance modelled by a beta function. The beta function is normalised such that an event of weight 1.0 results in a peak current of 1 nS at :math:`t = \tau_{rise\_[ex|in]}`. References ++++++++++ .. [1] Meffin H, Burkitt AN, Grayden DB (2004). An analytical model for the large, fluctuating synaptic conductance state typical of neocortical neurons in vivo. Journal of Computational Neuroscience, 16:159-175. DOI: https://doi.org/10.1023/B:JCNS.0000014108.03012.81 .. [2] Bernander O, Douglas RJ, Martin KAC, Koch C (1991). Synaptic background activity influences spatiotemporal integration in single pyramidal cells. Proceedings of the National Academy of Science USA, 88(24):11569-11573. DOI: https://doi.org/10.1073/pnas.88.24.11569 .. [3] Kuhn A, Rotter S (2004) Neuronal integration of synaptic input in the fluctuation- driven regime. Journal of Neuroscience, 24(10):2345-2356 DOI: https://doi.org/10.1523/JNEUROSCI.3349-03.2004 .. [4] Rotter S, Diesmann M (1999). Exact simulation of time-invariant linear systems with applications to neuronal modeling. Biologial Cybernetics 81:381-402. DOI: https://doi.org/10.1007/s004220050570 .. [5] Roth A and van Rossum M (2010). Chapter 6: Modeling synapses. in De Schutter, Computational Modeling Methods for Neuroscientists, MIT Press. See also ++++++++ iaf_cond_exp, iaf_cond_alpha Parameters ++++++++++ .. csv-table:: :header: "Name", "Physical unit", "Default value", "Description" :widths: auto "E_L", "mV", "-70mV", "Leak reversal potential (aka resting potential)" "C_m", "pF", "250pF", "Capacitance of the membrane" "t_ref", "ms", "2ms", "Refractory period" "V_th", "mV", "-55mV", "Threshold potential" "V_reset", "mV", "-60mV", "Reset potential" "E_ex", "mV", "0mV", "Excitatory reversal potential" "E_in", "mV", "-85mV", "Inhibitory reversal potential" "g_L", "nS", "16.6667nS", "Leak conductance" "tau_syn_rise_I", "ms", "0.2ms", "Synaptic time constant excitatory synapse" "tau_syn_decay_I", "ms", "2ms", "Synaptic time constant for inhibitory synapse" "tau_syn_rise_E", "ms", "0.2ms", "Synaptic time constant excitatory synapse" "tau_syn_decay_E", "ms", "2ms", "Synaptic time constant for inhibitory synapse" "F_E", "nS", "0nS", "Constant external input conductance (excitatory)." "F_I", "nS", "0nS", "Constant external input conductance (inhibitory)." "I_e", "pA", "0pA", "constant external input current" State variables +++++++++++++++ .. csv-table:: :header: "Name", "Physical unit", "Default value", "Description" :widths: auto "r", "integer", "0", "counts number of tick during the refractory period" "V_m", "mV", "E_L", "membrane potential" "g_in", "real", "0", "inputs from the inhibitory conductance" "g_in$", "real", "g_I_const * (1 / tau_syn_rise_I - 1 / tau_syn_decay_I)", "" "g_ex", "real", "0", "inputs from the excitatory conductance" "g_ex$", "real", "g_E_const * (1 / tau_syn_rise_E - 1 / tau_syn_decay_E)", "" Equations +++++++++ .. math:: \frac{ dV_{m} } { dt }= \frac 1 { C_{m} } \left( { (-I_{leak} - I_{syn,exc} - I_{syn,inh} + I_{e} + I_{stim}) } \right) Source code +++++++++++ The model source code can be found in the NESTML models repository here: `iaf_cond_beta <https://github.com/nest/nestml/tree/master/models/neurons/iaf_cond_beta.nestml>`_. Characterisation ++++++++++++++++ .. include:: iaf_cond_beta_characterisation.rst .. footer:: Generated at 2023-03-22 17:48:48.761456
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/data/AtomReadStore.js.uncompressed.js
define("dojox/data/AtomReadStore", ["dojo", "dojox", "dojo/data/util/filter", "dojo/data/util/simpleFetch", "dojo/date/stamp"], function(dojo, dojox) { dojo.experimental("dojox.data.AtomReadStore"); dojo.declare("dojox.data.AtomReadStore", null, { // summary: // A read only data store for Atom XML based services or documents // description: // A data store for Atom XML based services or documents. This store is still under development // and doesn't support wildcard filtering yet. Attribute filtering is limited to category or id. constructor: function(/* object */ args){ // summary: // Constructor for the AtomRead store. // args: // An anonymous object to initialize properties. It expects the following values: // url: The url to a service or an XML document that represents the store // unescapeHTML: A boolean to specify whether or not to unescape HTML text // sendQuery: A boolean indicate to add a query string to the service URL if(args){ this.url = args.url; this.rewriteUrl = args.rewriteUrl; this.label = args.label || this.label; this.sendQuery = (args.sendQuery || args.sendquery || this.sendQuery); this.unescapeHTML = args.unescapeHTML; if("urlPreventCache" in args){ this.urlPreventCache = args.urlPreventCache?true:false; } } if(!this.url){ throw new Error("AtomReadStore: a URL must be specified when creating the data store"); } }, //Values that may be set by the parser. //Ergo, have to be instantiated to something //So the parser knows how to set them. url: "", label: "title", sendQuery: false, unescapeHTML: false, //Configurable preventCache option for the URL. urlPreventCache: false, /* dojo.data.api.Read */ getValue: function(/* item */ item, /* attribute || attribute-name-string */ attribute, /* value? */ defaultValue){ // summary: // Return an attribute value // description: // 'item' must be an instance of an object created by the AtomReadStore instance. // Accepted attributes are id, subtitle, title, summary, content, author, updated, // published, category, link and alternate // item: // An item returned by a call to the 'fetch' method. // attribute: // A attribute of the Atom Entry // defaultValue: // A default value // returns: // An attribute value found, otherwise 'defaultValue' this._assertIsItem(item); this._assertIsAttribute(attribute); this._initItem(item); attribute = attribute.toLowerCase(); //If the attribute has previously been retrieved, then return it if(!item._attribs[attribute] && !item._parsed){ this._parseItem(item); item._parsed = true; } var retVal = item._attribs[attribute]; if(!retVal && attribute == "summary"){ var content = this.getValue(item, "content"); var regexp = new RegExp("/(<([^>]+)>)/g", "i"); var text = content.text.replace(regexp,""); retVal = { text: text.substring(0, Math.min(400, text.length)), type: "text" }; item._attribs[attribute] = retVal; } if(retVal && this.unescapeHTML){ if((attribute == "content" || attribute == "summary" || attribute == "subtitle") && !item["_"+attribute+"Escaped"]){ retVal.text = this._unescapeHTML(retVal.text); item["_"+attribute+"Escaped"] = true; } } return retVal ? dojo.isArray(retVal) ? retVal[0]: retVal : defaultValue; }, getValues: function(/* item */ item, /* attribute || attribute-name-string */ attribute){ // summary: // Return an attribute value // description: // 'item' must be an instance of an object created by the AtomReadStore instance. // Accepted attributes are id, subtitle, title, summary, content, author, updated, // published, category, link and alternate // item: // An item returned by a call to the 'fetch' method. // attribute: // A attribute of the Atom Entry // returns: // An array of values for the attribute value found, otherwise 'defaultValue' this._assertIsItem(item); this._assertIsAttribute(attribute); this._initItem(item); attribute = attribute.toLowerCase(); //If the attribute has previously been retrieved, then return it if(!item._attribs[attribute]){ this._parseItem(item); } var retVal = item._attribs[attribute]; return retVal ? ((retVal.length !== undefined && typeof(retVal) !== "string") ? retVal : [retVal]) : undefined; }, getAttributes: function(/* item */ item){ // summary: // Return an array of attribute names // description: // 'item' must be have been created by the AtomReadStore instance. // tag names of child elements and XML attribute names of attributes // specified to the element are returned along with special attribute // names applicable to the element including "tagName", "childNodes" // if the element has child elements, "text()" if the element has // child text nodes, and attribute names in '_attributeMap' that match // the tag name of the element. // item: // An XML element // returns: // An array of attributes found this._assertIsItem(item); if(!item._attribs){ this._initItem(item); this._parseItem(item); } var attrNames = []; for(var x in item._attribs){ attrNames.push(x); } return attrNames; //array }, hasAttribute: function(/* item */ item, /* attribute || attribute-name-string */ attribute){ // summary: // Check whether an element has the attribute // item: // 'item' must be created by the AtomReadStore instance. // attribute: // An attribute of an Atom Entry item. // returns: // True if the element has the attribute, otherwise false return (this.getValue(item, attribute) !== undefined); //boolean }, containsValue: function(/* item */ item, /* attribute || attribute-name-string */ attribute, /* anything */ value){ // summary: // Check whether the attribute values contain the value // item: // 'item' must be an instance of a dojox.data.XmlItem from the store instance. // attribute: // A tag name of a child element, An XML attribute name or one of // special names // returns: // True if the attribute values contain the value, otherwise false var values = this.getValues(item, attribute); for(var i = 0; i < values.length; i++){ if((typeof value === "string")){ if(values[i].toString && values[i].toString() === value){ return true; } }else if(values[i] === value){ return true; //boolean } } return false;//boolean }, isItem: function(/* anything */ something){ // summary: // Check whether the object is an item (XML element) // item: // An object to check // returns: // True if the object is an XML element, otherwise false if(something && something.element && something.store && something.store === this){ return true; //boolean } return false; //boolran }, isItemLoaded: function(/* anything */ something){ // summary: // Check whether the object is an item (XML element) and loaded // item: // An object to check // returns: // True if the object is an XML element, otherwise false return this.isItem(something); //boolean }, loadItem: function(/* object */ keywordArgs){ // summary: // Load an item (XML element) // keywordArgs: // object containing the args for loadItem. See dojo.data.api.Read.loadItem() }, getFeatures: function(){ // summary: // Return supported data APIs // returns: // "dojo.data.api.Read" and "dojo.data.api.Write" var features = { "dojo.data.api.Read": true }; return features; //array }, getLabel: function(/* item */ item){ // summary: // See dojo.data.api.Read.getLabel() if((this.label !== "") && this.isItem(item)){ var label = this.getValue(item,this.label); if(label && label.text){ return label.text; }else if(label){ return label.toString(); }else{ return undefined; } } return undefined; //undefined }, getLabelAttributes: function(/* item */ item){ // summary: // See dojo.data.api.Read.getLabelAttributes() if(this.label !== ""){ return [this.label]; //array } return null; //null }, getFeedValue: function(attribute, defaultValue){ // summary: // Non-API method for retrieving values regarding the Atom feed, // rather than the Atom entries. var values = this.getFeedValues(attribute, defaultValue); if(dojo.isArray(values)){ return values[0]; } return values; }, getFeedValues: function(attribute, defaultValue){ // summary: // Non-API method for retrieving values regarding the Atom feed, // rather than the Atom entries. if(!this.doc){ return defaultValue; } if(!this._feedMetaData){ this._feedMetaData = { element: this.doc.getElementsByTagName("feed")[0], store: this, _attribs: {} }; this._parseItem(this._feedMetaData); } return this._feedMetaData._attribs[attribute] || defaultValue; }, _initItem: function(item){ // summary: // Initializes an item before it can be parsed. if(!item._attribs){ item._attribs = {}; } }, _fetchItems: function(request, fetchHandler, errorHandler){ // summary: // Retrieves the items from the Atom XML document. var url = this._getFetchUrl(request); if(!url){ errorHandler(new Error("No URL specified.")); return; } var localRequest = (!this.sendQuery ? request : null); // use request for _getItems() var _this = this; var docHandler = function(data){ _this.doc = data; var items = _this._getItems(data, localRequest); var query = request.query; if(query){ if(query.id){ items = dojo.filter(items, function(item){ return (_this.getValue(item, "id") == query.id); }); }else if(query.category){ items = dojo.filter(items, function(entry){ var cats = _this.getValues(entry, "category"); if(!cats){ return false; } return dojo.some(cats, "return item.term=='"+query.category+"'"); }); } } if(items && items.length > 0){ fetchHandler(items, request); }else{ fetchHandler([], request); } }; if(this.doc){ docHandler(this.doc); }else{ var getArgs = { url: url, handleAs: "xml", preventCache: this.urlPreventCache }; var getHandler = dojo.xhrGet(getArgs); getHandler.addCallback(docHandler); getHandler.addErrback(function(data){ errorHandler(data, request); }); } }, _getFetchUrl: function(request){ if(!this.sendQuery){ return this.url; } var query = request.query; if(!query){ return this.url; } if(dojo.isString(query)){ return this.url + query; } var queryString = ""; for(var name in query){ var value = query[name]; if(value){ if(queryString){ queryString += "&"; } queryString += (name + "=" + value); } } if(!queryString){ return this.url; } //Check to see if the URL already has query params or not. var fullUrl = this.url; if(fullUrl.indexOf("?") < 0){ fullUrl += "?"; }else{ fullUrl += "&"; } return fullUrl + queryString; }, _getItems: function(document, request){ // summary: // Parses the document in a first pass if(this._items){ return this._items; } var items = []; var nodes = []; if(document.childNodes.length < 1){ this._items = items; console.log("dojox.data.AtomReadStore: Received an invalid Atom document. Check the content type header"); return items; } var feedNodes = dojo.filter(document.childNodes, "return item.tagName && item.tagName.toLowerCase() == 'feed'"); var query = request.query; if(!feedNodes || feedNodes.length != 1){ console.log("dojox.data.AtomReadStore: Received an invalid Atom document, number of feed tags = " + (feedNodes? feedNodes.length : 0)); return items; } nodes = dojo.filter(feedNodes[0].childNodes, "return item.tagName && item.tagName.toLowerCase() == 'entry'"); if(request.onBegin){ request.onBegin(nodes.length, this.sendQuery ? request : {}); } for(var i = 0; i < nodes.length; i++){ var node = nodes[i]; if(node.nodeType != 1 /*ELEMENT_NODE*/){ continue; } items.push(this._getItem(node)); } this._items = items; return items; }, close: function(/*dojo.data.api.Request || keywordArgs || null */ request){ // summary: // See dojo.data.api.Read.close() }, /* internal API */ _getItem: function(element){ return { element: element, store: this }; }, _parseItem: function(item){ var attribs = item._attribs; var _this = this; var text, type; function getNodeText(node){ var txt = node.textContent || node.innerHTML || node.innerXML; if(!txt && node.childNodes[0]){ var child = node.childNodes[0]; if(child && (child.nodeType == 3 || child.nodeType == 4)){ txt = node.childNodes[0].nodeValue; } } return txt; } function parseTextAndType(node){ return {text: getNodeText(node),type: node.getAttribute("type")}; } dojo.forEach(item.element.childNodes, function(node){ var tagName = node.tagName ? node.tagName.toLowerCase() : ""; switch(tagName){ case "title": attribs[tagName] = { text: getNodeText(node), type: node.getAttribute("type") }; break; case "subtitle": case "summary": case "content": attribs[tagName] = parseTextAndType(node); break; case "author": var nameNode ,uriNode; dojo.forEach(node.childNodes, function(child){ if(!child.tagName){ return; } switch(child.tagName.toLowerCase()){ case "name": nameNode = child; break; case "uri": uriNode = child; break; } }); var author = {}; if(nameNode && nameNode.length == 1){ author.name = getNodeText(nameNode[0]); } if(uriNode && uriNode.length == 1){ author.uri = getNodeText(uriNode[0]); } attribs[tagName] = author; break; case "id": attribs[tagName] = getNodeText(node); break; case "updated": attribs[tagName] = dojo.date.stamp.fromISOString(getNodeText(node) ); break; case "published": attribs[tagName] = dojo.date.stamp.fromISOString(getNodeText(node)); break; case "category": if(!attribs[tagName]){ attribs[tagName] = []; } attribs[tagName].push({scheme:node.getAttribute("scheme"), term: node.getAttribute("term")}); break; case "link": if(!attribs[tagName]){ attribs[tagName] = []; } var link = { rel: node.getAttribute("rel"), href: node.getAttribute("href"), type: node.getAttribute("type")}; attribs[tagName].push(link); if(link.rel == "alternate"){ attribs["alternate"] = link; } break; default: break; } }); }, _unescapeHTML : function(text){ //Replace HTML character codes with their unencoded equivalents, e.g. &#8217; with ' text = text.replace(/&#8217;/m , "'").replace(/&#8243;/m , "\"").replace(/&#60;/m,">").replace(/&#62;/m,"<").replace(/&#38;/m,"&"); return text; }, _assertIsItem: function(/* item */ item){ // summary: // This function tests whether the item passed in is indeed an item in the store. // item: // The item to test for being contained by the store. if(!this.isItem(item)){ throw new Error("dojox.data.AtomReadStore: Invalid item argument."); } }, _assertIsAttribute: function(/* attribute-name-string */ attribute){ // summary: // This function tests whether the item passed in is indeed a valid 'attribute' like type for the store. // attribute: // The attribute to test for being contained by the store. if(typeof attribute !== "string"){ throw new Error("dojox.data.AtomReadStore: Invalid attribute argument."); } } }); dojo.extend(dojox.data.AtomReadStore,dojo.data.util.simpleFetch); return dojox.data.AtomReadStore; });
PypiClean
/CMinx-1.1.9.tar.gz/CMinx-1.1.9/docs/source/documenting/types_of_comments.rst
.. Copyright 2021 CMakePP .. .. Licensed under the Apache License, Version 2.0 (the "License"); .. you may not use this file except in compliance with the License. .. You may obtain a copy of the License at .. .. http://www.apache.org/licenses/LICENSE-2.0 .. .. Unless required by applicable law or agreed to in writing, software .. distributed under the License is distributed on an "AS IS" BASIS, .. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. .. See the License for the specific language governing permissions and .. limitations under the License. .. ################# Types of Comments ################# According to CMinx, the content of a CMake file can be broken into three categories: #. Documentation comments - what CMinx extracts #. Annotation comments - Comments, but should not be extracted by CMinx #. CMake source code - Everything else To distinguish between the documentation and annotation comments CMinx borrows the convention that documentation comments start with an additional comment character. Thus to indicate that a CMake comment is documentation use :code:`#[[[` (the block comment should still end with :code:`#]]`). For example: .. literalinclude:: ../../../tests/examples/example.cmake :language: cmake :lines: 7-20 For comparison, an annotation comment looks like: .. literalinclude:: ../../../tests/examples/example.cmake :language: cmake :lines: 1-5 Sometimes documentation comments need additional information in their headers to inform CMinx about the type of documentation the comment represents. This information is contained in the first line of the documentation comment (the line with :code:`#[[[`) and is marked by a special directive start character :code:`@`. For example, a module documentation comment contains the :code:`@module` directive: .. literalinclude:: ../../../tests/test_samples/basic_module.cmake :language: cmake :lines: 1-9 .. note:: Doccomment directives marked by :code:`@` are different from RST directives. They are hints to CMinx for how to parse the doccomment and how to format it in the output, they *are not* passed through to the generated RST and therefore Sphinx is entirely unaware of them. Consider them akin to preprocessor directives.
PypiClean
/AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/cloudapi.py
from aicloud.rest import get, post, put import json import os class AIPlatform(object): model_upload_path = '/model/file' model_download_path = '/model/download/modelfile' create_training_task = '/training/training/form' create_training_duration = '/training/finish/form/' def __init__(self, base_url, version='v1', authorization=None, auth_token=None): self.authorization = authorization self.auth_token = auth_token self.url = base_url.rstrip('/') + '/ai/api/' + version self.train_id_file = '/workspace/trainid.txt' def _make_token_headers(self, content_type=None): headers = { 'Authorization': self.authorization, 'auth-token': self.auth_token } if content_type: headers['Content-Type'] = content_type return headers def upload_model_file(self, user_id, train_id, nbid, model_file): """ upload model file to cloud storage :param user_id: user id :param train_id: training id :param model_file: model file by training :return: """ if os.path.exists(self.train_id_file): with open(self.train_id_file) as f: train_id = f.readline() url = self.url + self.model_upload_path file = {'multipartFile': model_file} data = { 'trainingId': train_id, 'userId': user_id, 'nbid': nbid } return post(url, headers=self._make_token_headers(), data=data, files=file) def download_model_file(self, train_id): """ download model file from cloud storage :param train_id: training id :return: """ url = self.url + self.model_download_path params = {'trainingId': train_id} return get(url, headers=self._make_token_headers(), params=params) def save_model_info(self, training_name, log_path, nbid): """ :param training_name: :param nbid: :param task_url: :return: """ url = self.url + self.create_training_task data = { 'trainingName': training_name, 'notebookId': nbid, 'logAddr': log_path } return post(url, headers=self._make_token_headers(content_type='application/json'), data=json.dumps(data)) def training_duration_info(self): if os.path.exists(self.train_id_file): with open(self.train_id_file) as f: train_id = f.readline() url = self.url + self.create_training_duration + str(train_id) return put(url, headers=self._make_token_headers()) class StorageEngine(object): pass
PypiClean
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/CONTRIBUTING.md
# How to contribute - [Getting help](#getting-help-) - [Submitting bug reports](#submitting-bug-reports-) - [Contributing code](#contributing-code-) ## Getting help [^](#how-to-contribute) Community discussion, questions, and informal bug reporting is done on the [CodeMirror Google group](http://groups.google.com/group/codemirror). ## Submitting bug reports [^](#how-to-contribute) The preferred way to report bugs is to use the [GitHub issue tracker](http://github.com/marijnh/CodeMirror/issues). Before reporting a bug, read these pointers. **Note:** The issue tracker is for *bugs*, not requests for help. Questions should be asked on the [CodeMirror Google group](http://groups.google.com/group/codemirror) instead. ### Reporting bugs effectively - CodeMirror is maintained by volunteers. They don't owe you anything, so be polite. Reports with an indignant or belligerent tone tend to be moved to the bottom of the pile. - Include information about **the browser in which the problem occurred**. Even if you tested several browsers, and the problem occurred in all of them, mention this fact in the bug report. Also include browser version numbers and the operating system that you're on. - Mention which release of CodeMirror you're using. Preferably, try also with the current development snapshot, to ensure the problem has not already been fixed. - Mention very precisely what went wrong. "X is broken" is not a good bug report. What did you expect to happen? What happened instead? Describe the exact steps a maintainer has to take to make the problem occur. We can not fix something that we can not observe. - If the problem can not be reproduced in any of the demos included in the CodeMirror distribution, please provide an HTML document that demonstrates the problem. The best way to do this is to go to [jsbin.com](http://jsbin.com/ihunin/edit), enter it there, press save, and include the resulting link in your bug report. ## Contributing code [^](#how-to-contribute) - Make sure you have a [GitHub Account](https://github.com/signup/free) - Fork [CodeMirror](https://github.com/marijnh/CodeMirror/) ([how to fork a repo](https://help.github.com/articles/fork-a-repo)) - Make your changes - If your changes are easy to test or likely to regress, add tests. Tests for the core go into `test/test.js`, some modes have their own test suite under `mode/XXX/test.js`. Feel free to add new test suites to modes that don't have one yet (be sure to link the new tests into `test/index.html`). - Follow the general code style of the rest of the project (see below). Run `bin/lint` to verify that the linter is happy. - Make sure all tests pass. Visit `test/index.html` in your browser to run them. - Submit a pull request ([how to create a pull request](https://help.github.com/articles/fork-a-repo)) ### Coding standards - 2 spaces per indentation level, no tabs. - Include semicolons after statements. - Note that the linter (`bin/lint`) which is run after each commit complains about unused variables and functions. Prefix their names with an underscore to muffle it.
PypiClean
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/@types/node/repl.d.ts
declare module 'repl' { import { Interface, Completer, AsyncCompleter } from 'node:readline'; import { Context } from 'node:vm'; import { InspectOptions } from 'node:util'; interface ReplOptions { /** * The input prompt to display. * @default "> " */ prompt?: string | undefined; /** * The `Readable` stream from which REPL input will be read. * @default process.stdin */ input?: NodeJS.ReadableStream | undefined; /** * The `Writable` stream to which REPL output will be written. * @default process.stdout */ output?: NodeJS.WritableStream | undefined; /** * If `true`, specifies that the output should be treated as a TTY terminal, and have * ANSI/VT100 escape codes written to it. * Default: checking the value of the `isTTY` property on the output stream upon * instantiation. */ terminal?: boolean | undefined; /** * The function to be used when evaluating each given line of input. * Default: an async wrapper for the JavaScript `eval()` function. An `eval` function can * error with `repl.Recoverable` to indicate the input was incomplete and prompt for * additional lines. * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_default_evaluation * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_custom_evaluation_functions */ eval?: REPLEval | undefined; /** * Defines if the repl prints output previews or not. * @default `true` Always `false` in case `terminal` is falsy. */ preview?: boolean | undefined; /** * If `true`, specifies that the default `writer` function should include ANSI color * styling to REPL output. If a custom `writer` function is provided then this has no * effect. * Default: the REPL instance's `terminal` value. */ useColors?: boolean | undefined; /** * If `true`, specifies that the default evaluation function will use the JavaScript * `global` as the context as opposed to creating a new separate context for the REPL * instance. The node CLI REPL sets this value to `true`. * Default: `false`. */ useGlobal?: boolean | undefined; /** * If `true`, specifies that the default writer will not output the return value of a * command if it evaluates to `undefined`. * Default: `false`. */ ignoreUndefined?: boolean | undefined; /** * The function to invoke to format the output of each command before writing to `output`. * Default: a wrapper for `util.inspect`. * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_customizing_repl_output */ writer?: REPLWriter | undefined; /** * An optional function used for custom Tab auto completion. * * @see https://nodejs.org/dist/latest-v11.x/docs/api/readline.html#readline_use_of_the_completer_function */ completer?: Completer | AsyncCompleter | undefined; /** * A flag that specifies whether the default evaluator executes all JavaScript commands in * strict mode or default (sloppy) mode. * Accepted values are: * - `repl.REPL_MODE_SLOPPY` - evaluates expressions in sloppy mode. * - `repl.REPL_MODE_STRICT` - evaluates expressions in strict mode. This is equivalent to * prefacing every repl statement with `'use strict'`. */ replMode?: typeof REPL_MODE_SLOPPY | typeof REPL_MODE_STRICT | undefined; /** * Stop evaluating the current piece of code when `SIGINT` is received, i.e. `Ctrl+C` is * pressed. This cannot be used together with a custom `eval` function. * Default: `false`. */ breakEvalOnSigint?: boolean | undefined; } type REPLEval = (this: REPLServer, evalCmd: string, context: Context, file: string, cb: (err: Error | null, result: any) => void) => void; type REPLWriter = (this: REPLServer, obj: any) => string; /** * This is the default "writer" value, if none is passed in the REPL options, * and it can be overridden by custom print functions. */ const writer: REPLWriter & { options: InspectOptions; }; type REPLCommandAction = (this: REPLServer, text: string) => void; interface REPLCommand { /** * Help text to be displayed when `.help` is entered. */ help?: string | undefined; /** * The function to execute, optionally accepting a single string argument. */ action: REPLCommandAction; } /** * Instances of `repl.REPLServer` are created using the {@link start} method * or directly using the JavaScript `new` keyword. * * ```js * const repl = require('repl'); * * const options = { useColors: true }; * * const firstInstance = repl.start(options); * const secondInstance = new repl.REPLServer(options); * ``` * @since v0.1.91 */ class REPLServer extends Interface { /** * The `vm.Context` provided to the `eval` function to be used for JavaScript * evaluation. */ readonly context: Context; /** * @deprecated since v14.3.0 - Use `input` instead. */ readonly inputStream: NodeJS.ReadableStream; /** * @deprecated since v14.3.0 - Use `output` instead. */ readonly outputStream: NodeJS.WritableStream; /** * The `Readable` stream from which REPL input will be read. */ readonly input: NodeJS.ReadableStream; /** * The `Writable` stream to which REPL output will be written. */ readonly output: NodeJS.WritableStream; /** * The commands registered via `replServer.defineCommand()`. */ readonly commands: NodeJS.ReadOnlyDict<REPLCommand>; /** * A value indicating whether the REPL is currently in "editor mode". * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_commands_and_special_keys */ readonly editorMode: boolean; /** * A value indicating whether the `_` variable has been assigned. * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_assignment_of_the_underscore_variable */ readonly underscoreAssigned: boolean; /** * The last evaluation result from the REPL (assigned to the `_` variable inside of the REPL). * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_assignment_of_the_underscore_variable */ readonly last: any; /** * A value indicating whether the `_error` variable has been assigned. * * @since v9.8.0 * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_assignment_of_the_underscore_variable */ readonly underscoreErrAssigned: boolean; /** * The last error raised inside the REPL (assigned to the `_error` variable inside of the REPL). * * @since v9.8.0 * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_assignment_of_the_underscore_variable */ readonly lastError: any; /** * Specified in the REPL options, this is the function to be used when evaluating each * given line of input. If not specified in the REPL options, this is an async wrapper * for the JavaScript `eval()` function. */ readonly eval: REPLEval; /** * Specified in the REPL options, this is a value indicating whether the default * `writer` function should include ANSI color styling to REPL output. */ readonly useColors: boolean; /** * Specified in the REPL options, this is a value indicating whether the default `eval` * function will use the JavaScript `global` as the context as opposed to creating a new * separate context for the REPL instance. */ readonly useGlobal: boolean; /** * Specified in the REPL options, this is a value indicating whether the default `writer` * function should output the result of a command if it evaluates to `undefined`. */ readonly ignoreUndefined: boolean; /** * Specified in the REPL options, this is the function to invoke to format the output of * each command before writing to `outputStream`. If not specified in the REPL options, * this will be a wrapper for `util.inspect`. */ readonly writer: REPLWriter; /** * Specified in the REPL options, this is the function to use for custom Tab auto-completion. */ readonly completer: Completer | AsyncCompleter; /** * Specified in the REPL options, this is a flag that specifies whether the default `eval` * function should execute all JavaScript commands in strict mode or default (sloppy) mode. * Possible values are: * - `repl.REPL_MODE_SLOPPY` - evaluates expressions in sloppy mode. * - `repl.REPL_MODE_STRICT` - evaluates expressions in strict mode. This is equivalent to * prefacing every repl statement with `'use strict'`. */ readonly replMode: typeof REPL_MODE_SLOPPY | typeof REPL_MODE_STRICT; /** * NOTE: According to the documentation: * * > Instances of `repl.REPLServer` are created using the `repl.start()` method and * > _should not_ be created directly using the JavaScript `new` keyword. * * `REPLServer` cannot be subclassed due to implementation specifics in NodeJS. * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_class_replserver */ private constructor(); /** * The `replServer.defineCommand()` method is used to add new `.`\-prefixed commands * to the REPL instance. Such commands are invoked by typing a `.` followed by the`keyword`. The `cmd` is either a `Function` or an `Object` with the following * properties: * * The following example shows two new commands added to the REPL instance: * * ```js * const repl = require('repl'); * * const replServer = repl.start({ prompt: '> ' }); * replServer.defineCommand('sayhello', { * help: 'Say hello', * action(name) { * this.clearBufferedCommand(); * console.log(`Hello, ${name}!`); * this.displayPrompt(); * } * }); * replServer.defineCommand('saybye', function saybye() { * console.log('Goodbye!'); * this.close(); * }); * ``` * * The new commands can then be used from within the REPL instance: * * ```console * > .sayhello Node.js User * Hello, Node.js User! * > .saybye * Goodbye! * ``` * @since v0.3.0 * @param keyword The command keyword (_without_ a leading `.` character). * @param cmd The function to invoke when the command is processed. */ defineCommand(keyword: string, cmd: REPLCommandAction | REPLCommand): void; /** * The `replServer.displayPrompt()` method readies the REPL instance for input * from the user, printing the configured `prompt` to a new line in the `output`and resuming the `input` to accept new input. * * When multi-line input is being entered, an ellipsis is printed rather than the * 'prompt'. * * When `preserveCursor` is `true`, the cursor placement will not be reset to `0`. * * The `replServer.displayPrompt` method is primarily intended to be called from * within the action function for commands registered using the`replServer.defineCommand()` method. * @since v0.1.91 */ displayPrompt(preserveCursor?: boolean): void; /** * The `replServer.clearBufferedCommand()` method clears any command that has been * buffered but not yet executed. This method is primarily intended to be * called from within the action function for commands registered using the`replServer.defineCommand()` method. * @since v9.0.0 */ clearBufferedCommand(): void; /** * Initializes a history log file for the REPL instance. When executing the * Node.js binary and using the command-line REPL, a history file is initialized * by default. However, this is not the case when creating a REPL * programmatically. Use this method to initialize a history log file when working * with REPL instances programmatically. * @since v11.10.0 * @param historyPath the path to the history file * @param callback called when history writes are ready or upon error */ setupHistory(path: string, callback: (err: Error | null, repl: this) => void): void; /** * events.EventEmitter * 1. close - inherited from `readline.Interface` * 2. line - inherited from `readline.Interface` * 3. pause - inherited from `readline.Interface` * 4. resume - inherited from `readline.Interface` * 5. SIGCONT - inherited from `readline.Interface` * 6. SIGINT - inherited from `readline.Interface` * 7. SIGTSTP - inherited from `readline.Interface` * 8. exit * 9. reset */ addListener(event: string, listener: (...args: any[]) => void): this; addListener(event: 'close', listener: () => void): this; addListener(event: 'line', listener: (input: string) => void): this; addListener(event: 'pause', listener: () => void): this; addListener(event: 'resume', listener: () => void): this; addListener(event: 'SIGCONT', listener: () => void): this; addListener(event: 'SIGINT', listener: () => void): this; addListener(event: 'SIGTSTP', listener: () => void): this; addListener(event: 'exit', listener: () => void): this; addListener(event: 'reset', listener: (context: Context) => void): this; emit(event: string | symbol, ...args: any[]): boolean; emit(event: 'close'): boolean; emit(event: 'line', input: string): boolean; emit(event: 'pause'): boolean; emit(event: 'resume'): boolean; emit(event: 'SIGCONT'): boolean; emit(event: 'SIGINT'): boolean; emit(event: 'SIGTSTP'): boolean; emit(event: 'exit'): boolean; emit(event: 'reset', context: Context): boolean; on(event: string, listener: (...args: any[]) => void): this; on(event: 'close', listener: () => void): this; on(event: 'line', listener: (input: string) => void): this; on(event: 'pause', listener: () => void): this; on(event: 'resume', listener: () => void): this; on(event: 'SIGCONT', listener: () => void): this; on(event: 'SIGINT', listener: () => void): this; on(event: 'SIGTSTP', listener: () => void): this; on(event: 'exit', listener: () => void): this; on(event: 'reset', listener: (context: Context) => void): this; once(event: string, listener: (...args: any[]) => void): this; once(event: 'close', listener: () => void): this; once(event: 'line', listener: (input: string) => void): this; once(event: 'pause', listener: () => void): this; once(event: 'resume', listener: () => void): this; once(event: 'SIGCONT', listener: () => void): this; once(event: 'SIGINT', listener: () => void): this; once(event: 'SIGTSTP', listener: () => void): this; once(event: 'exit', listener: () => void): this; once(event: 'reset', listener: (context: Context) => void): this; prependListener(event: string, listener: (...args: any[]) => void): this; prependListener(event: 'close', listener: () => void): this; prependListener(event: 'line', listener: (input: string) => void): this; prependListener(event: 'pause', listener: () => void): this; prependListener(event: 'resume', listener: () => void): this; prependListener(event: 'SIGCONT', listener: () => void): this; prependListener(event: 'SIGINT', listener: () => void): this; prependListener(event: 'SIGTSTP', listener: () => void): this; prependListener(event: 'exit', listener: () => void): this; prependListener(event: 'reset', listener: (context: Context) => void): this; prependOnceListener(event: string, listener: (...args: any[]) => void): this; prependOnceListener(event: 'close', listener: () => void): this; prependOnceListener(event: 'line', listener: (input: string) => void): this; prependOnceListener(event: 'pause', listener: () => void): this; prependOnceListener(event: 'resume', listener: () => void): this; prependOnceListener(event: 'SIGCONT', listener: () => void): this; prependOnceListener(event: 'SIGINT', listener: () => void): this; prependOnceListener(event: 'SIGTSTP', listener: () => void): this; prependOnceListener(event: 'exit', listener: () => void): this; prependOnceListener(event: 'reset', listener: (context: Context) => void): this; } /** * A flag passed in the REPL options. Evaluates expressions in sloppy mode. */ const REPL_MODE_SLOPPY: unique symbol; /** * A flag passed in the REPL options. Evaluates expressions in strict mode. * This is equivalent to prefacing every repl statement with `'use strict'`. */ const REPL_MODE_STRICT: unique symbol; /** * The `repl.start()` method creates and starts a {@link REPLServer} instance. * * If `options` is a string, then it specifies the input prompt: * * ```js * const repl = require('repl'); * * // a Unix style prompt * repl.start('$ '); * ``` * @since v0.1.91 */ function start(options?: string | ReplOptions): REPLServer; /** * Indicates a recoverable error that a `REPLServer` can use to support multi-line input. * * @see https://nodejs.org/dist/latest-v10.x/docs/api/repl.html#repl_recoverable_errors */ class Recoverable extends SyntaxError { err: Error; constructor(err: Error); } } declare module 'node:repl' { export * from 'repl'; }
PypiClean
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/rmic.py
__revision__ = "src/engine/SCons/Tool/rmic.py 2014/07/05 09:42:21 garyo" import os.path import SCons.Action import SCons.Builder import SCons.Node.FS import SCons.Util def emit_rmic_classes(target, source, env): """Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files. """ class_suffix = env.get('JAVACLASSSUFFIX', '.class') classdir = env.get('JAVACLASSDIR') if not classdir: try: s = source[0] except IndexError: classdir = '.' else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = '.' classdir = env.Dir(classdir).rdir() if str(classdir) == '.': c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except AttributeError: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[:-len(class_suffix)] == class_suffix: classname = classname[-len(class_suffix):] s = src.rfile() s.attributes.java_classdir = classdir s.attributes.java_classname = classname slist.append(s) stub_suffixes = ['_Stub'] if env.get('JAVAVERSION') == '1.4': stub_suffixes.append('_Skel') tlist = [] for s in source: for suff in stub_suffixes: fname = s.attributes.java_classname.replace('.', os.sep) + \ suff + class_suffix t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source RMICAction = SCons.Action.Action('$RMICCOM', '$RMICCOMSTR') RMICBuilder = SCons.Builder.Builder(action = RMICAction, emitter = emit_rmic_classes, src_suffix = '$JAVACLASSSUFFIX', target_factory = SCons.Node.FS.Dir, source_factory = SCons.Node.FS.File) def generate(env): """Add Builders and construction variables for rmic to an Environment.""" env['BUILDERS']['RMIC'] = RMICBuilder env['RMIC'] = 'rmic' env['RMICFLAGS'] = SCons.Util.CLVar('') env['RMICCOM'] = '$RMIC $RMICFLAGS -d ${TARGET.attributes.java_lookupdir} -classpath ${SOURCE.attributes.java_classdir} ${SOURCES.attributes.java_classname}' env['JAVACLASSSUFFIX'] = '.class' def exists(env): # As reported by Jan Nijtmans in issue #2730, the simple # return env.Detect('rmic') # doesn't always work during initialization. For now, we # stop trying to detect an executable (analogous to the # javac Builder). # TODO: Come up with a proper detect() routine...and enable it. return 1 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/bill_account_meter_statistics_response.py
from msrest.serialization import Model class BillAccountMeterStatisticsResponse(Model): """BillAccountMeterStatisticsResponse. :param use_per_day: :type use_per_day: ~energycap.sdk.models.StatisticsResponse :param cost_per_day: :type cost_per_day: ~energycap.sdk.models.StatisticsResponse :param demand_per_day: :type demand_per_day: ~energycap.sdk.models.StatisticsResponse :param bill_id: The bill identifier :type bill_id: int :param billing_period: The bill's billing period :type billing_period: int :param begin_date: The bill's begin date :type begin_date: datetime :param end_date: The bill's end date :type end_date: datetime :param days: The number of days the bill covers :type days: int :param account: :type account: ~energycap.sdk.models.AccountChild :param meter: :type meter: ~energycap.sdk.models.MeterChild """ _attribute_map = { 'use_per_day': {'key': 'usePerDay', 'type': 'StatisticsResponse'}, 'cost_per_day': {'key': 'costPerDay', 'type': 'StatisticsResponse'}, 'demand_per_day': {'key': 'demandPerDay', 'type': 'StatisticsResponse'}, 'bill_id': {'key': 'billId', 'type': 'int'}, 'billing_period': {'key': 'billingPeriod', 'type': 'int'}, 'begin_date': {'key': 'beginDate', 'type': 'iso-8601'}, 'end_date': {'key': 'endDate', 'type': 'iso-8601'}, 'days': {'key': 'days', 'type': 'int'}, 'account': {'key': 'account', 'type': 'AccountChild'}, 'meter': {'key': 'meter', 'type': 'MeterChild'}, } def __init__(self, **kwargs): super(BillAccountMeterStatisticsResponse, self).__init__(**kwargs) self.use_per_day = kwargs.get('use_per_day', None) self.cost_per_day = kwargs.get('cost_per_day', None) self.demand_per_day = kwargs.get('demand_per_day', None) self.bill_id = kwargs.get('bill_id', None) self.billing_period = kwargs.get('billing_period', None) self.begin_date = kwargs.get('begin_date', None) self.end_date = kwargs.get('end_date', None) self.days = kwargs.get('days', None) self.account = kwargs.get('account', None) self.meter = kwargs.get('meter', None)
PypiClean
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/utils/Execution.py
import os from contextlib import contextmanager from nuitka.__past__ import subprocess from nuitka.Tracing import general from .Download import getCachedDownloadedMinGW64 from .FileOperations import getExternalUsePath from .Utils import getArchitecture, isWin32OrPosixWindows, isWin32Windows # Cache, so we avoid repeated command lookups. _executable_command_cache = {} # We emulate and use APIs of stdlib, spell-checker: ignore popenargs,creationflags,preexec_fn,setsid def _getExecutablePath(filename, search_path): # Append ".exe" suffix on Windows if not already present. if isWin32OrPosixWindows() and not filename.lower().endswith((".exe", ".cmd")): filename += ".exe" # Now check in each path element, much like the shell will. path_elements = search_path.split(os.pathsep) for path_element in path_elements: path_element = path_element.strip('"') path_element = os.path.expanduser(path_element) candidate = None if os.path.isfile(path_element): if os.path.normcase(os.path.basename(path_element)) == os.path.normcase( filename ): candidate = path_element else: full = os.path.join(path_element, filename) if os.path.exists(full): candidate = full if candidate is not None: if os.access(candidate, os.X_OK): return candidate def getExecutablePath(filename, extra_dir=None): """Find an execute in PATH environment.""" # Search in PATH environment. search_path = os.environ.get("PATH", "") if extra_dir is not None: search_path = extra_dir + os.pathsep + search_path key = (filename, search_path) if key not in _executable_command_cache: _executable_command_cache[key] = _getExecutablePath(filename, search_path) return _executable_command_cache[key] def isExecutableCommand(command): return getExecutablePath(command) is not None class NuitkaCalledProcessError(subprocess.CalledProcessError): def __init__(self, exit_code, cmd, output, stderr): # False alarm, pylint: disable=super-init-not-called subprocess.CalledProcessError(self, exit_code, cmd) # Python2 doesn't have this otherwise, but needs it. self.stderr = stderr self.output = output self.cmd = cmd self.returncode = exit_code def __str__(self): result = subprocess.CalledProcessError.__str__(self) if self.output: result += " Output was %r." % self.output.strip() if self.stderr: result += " Error was %r." % self.stderr.strip() return result def check_output(*popenargs, **kwargs): """Call a process and check result code. This is for Python 2.6 compatibility, which doesn't have that in its standard library. Note: We use same name as in Python stdlib, violating our rules to make it more recognizable what this does. """ if "stdout" in kwargs: raise ValueError("stdout argument not allowed, it will be overridden.") if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, stderr = process.communicate() exit_code = process.poll() if exit_code: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise NuitkaCalledProcessError(exit_code, cmd, output=output, stderr=stderr) return output def check_call(*popenargs, **kwargs): """Call a process and check result code. Note: This catches the error, and makes it nicer, and an error exit. So this is for tooling only. Note: We use same name as in Python stdlib, violating our rules to make it more recognizable what this does. """ logger = kwargs.pop("logger", None) if logger is not None: logger.info("Executing command '%s'." % popenargs[0]) try: subprocess.check_call(*popenargs, **kwargs) except OSError: general.sysexit( "Error, failed to execute '%s'. Is it installed?" % popenargs[0] ) def callProcess(*popenargs, **kwargs): """Call a process and return result code.""" logger = kwargs.pop("logger", None) if logger is not None: logger.info("Executing command '%s'." % popenargs[0]) return subprocess.call(*popenargs, **kwargs) @contextmanager def withEnvironmentPathAdded(env_var_name, *paths): assert os.path.sep not in env_var_name paths = [path for path in paths if path] path = os.pathsep.join(paths) if path: if str is not bytes and type(path) is bytes: path = path.decode("utf8") if env_var_name in os.environ: old_path = os.environ[env_var_name] os.environ[env_var_name] += os.pathsep + path else: old_path = None os.environ[env_var_name] = path yield if path: if old_path is None: del os.environ[env_var_name] else: os.environ[env_var_name] = old_path @contextmanager def withEnvironmentVarOverridden(env_var_name, value): """Change an environment and restore it after context.""" if env_var_name in os.environ: old_value = os.environ[env_var_name] else: old_value = None if value is not None: os.environ[env_var_name] = value elif old_value is not None: del os.environ[env_var_name] yield if old_value is None: if value is not None: del os.environ[env_var_name] else: os.environ[env_var_name] = old_value @contextmanager def withEnvironmentVarsOverridden(mapping): """Change multiple environment variables and restore them after context.""" old_values = {} for env_var_name, value in mapping.items(): if env_var_name in os.environ: old_values[env_var_name] = os.environ[env_var_name] else: old_values[env_var_name] = None if value is not None: os.environ[env_var_name] = value elif old_values[env_var_name] is not None: del os.environ[env_var_name] yield for env_var_name, value in mapping.items(): if old_values[env_var_name] is None: if value is not None: del os.environ[env_var_name] else: os.environ[env_var_name] = old_values[env_var_name] def wrapCommandForDebuggerForExec(*args): """Wrap a command for system debugger to call exec Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome. """ gdb_path = getExecutablePath("gdb") lldb_path = None # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run", "-ex=where", "-ex=quit", "--args") + args else: args = (lldb_path, "lldb", "-o", "run", "-o", "bt", "-o", "quit", "--") + args return args def wrapCommandForDebuggerForSubprocess(*args): """Wrap a command for system debugger with subprocess module. Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome. """ args = wrapCommandForDebuggerForExec(*args) # Discard exec only argument. args = args[0:1] + args[2:] return args def getNullOutput(): try: return subprocess.NULLDEV except AttributeError: return open(os.devnull, "wb") def getNullInput(): # spell-checker: ignore NULLDEV try: return subprocess.NULLDEV except AttributeError: # File is supposed to stay open, pylint: disable=consider-using-with subprocess.NULLDEV = open(os.devnull, "rb") return subprocess.NULLDEV def executeToolChecked( logger, command, absence_message, stderr_filter=None, optional=False ): """Execute external tool, checking for success and no error outputs, returning result.""" command = list(command) tool = command[0] if not isExecutableCommand(tool): if optional: logger.warning(absence_message) return 0, b"", b"" else: logger.sysexit(absence_message) # Allow to avoid repeated scans in PATH for the tool. command[0] = getExecutablePath(tool) process = subprocess.Popen( command, stdin=getNullInput(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, ) stdout, stderr = process.communicate() result = process.poll() if stderr_filter is not None: new_result, stderr = stderr_filter(stderr) if new_result is not None: result = new_result if result != 0: logger.sysexit( "Error, call to '%s' failed: %s -> %s." % (tool, command, stderr) ) elif stderr: logger.sysexit( "Error, call to '%s' gave warnings: %s -> %s." % (tool, command, stderr) ) return stdout def createProcess( command, env=None, stdin=False, stdout=None, stderr=None, shell=False, external_cwd=False, new_group=False, ): if not env: env = os.environ kw_args = {} if new_group: if isWin32Windows(): kw_args["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP else: kw_args["preexec_fn"] = os.setsid process = subprocess.Popen( command, # Note: Empty string should also be allowed for stdin, therefore check # for default "False" and "None" precisely. stdin=subprocess.PIPE if stdin not in (False, None) else getNullInput(), stdout=subprocess.PIPE if stdout is None else stdout, stderr=subprocess.PIPE if stderr is None else stderr, shell=shell, # On Windows, closing file descriptions is not working with capturing outputs. close_fds=not isWin32Windows(), env=env, # For tools that want short paths to work. cwd=getExternalUsePath(os.getcwd()) if external_cwd else None, **kw_args ) return process def executeProcess( command, env=None, stdin=False, shell=False, external_cwd=False, timeout=None ): process = createProcess( command=command, env=env, stdin=stdin, shell=shell, external_cwd=external_cwd ) if stdin is True: process_input = None elif stdin is not False: process_input = stdin else: process_input = None kw_args = {} if timeout is not None: # Apply timeout if possible. if "timeout" in subprocess.Popen.communicate.__code__.co_varnames: kw_args["timeout"] = timeout stdout, stderr = process.communicate(input=process_input) exit_code = process.wait() return stdout, stderr, exit_code
PypiClean
/GenBankQC-0.2a0.tar.gz/GenBankQC-0.2a0/genbankqc/Species.py
import os import re from subprocess import DEVNULL, Popen import pandas as pd from ete3 import Tree class Species: """Represents a collection of genomes in `path` :Parameters: path : str The path to the directory of related genomes you wish to analyze. """ def __init__(self, path, max_unknowns=200, contigs=3.0, assembly_size=3.0, mash=3.0): self.path = path self.species = path self.max_unknowns = max_unknowns self.contigs = contigs self.assembly_size = assembly_size self.mash = mash self.qc_dir = os.path.join(self.path, "qc") self.label = '{}-{}-{}-{}'.format( max_unknowns, contigs, assembly_size, mash) self.qc_results_dir = os.path.join(self.qc_dir, self.label) self.stats_path = os.path.join(self.qc_dir, 'stats.csv') self.nw_path = os.path.join(self.qc_dir, 'tree.nw') self.dmx_path = os.path.join(self.qc_dir, 'dmx.csv') self.failed_path = os.path.join(self.qc_results_dir, "failed.csv") self.tree_img = os.path.join(self.qc_results_dir, "tree.svg") self.summary_path = os.path.join(self.qc_results_dir, "summary.txt") self.allowed_path = os.path.join(self.qc_results_dir, "allowed.p") self.paste_file = os.path.join(self.qc_dir, 'all.msh') self.tree = None self.stats = None self.dmx = None if '/' in self.species: self.species = path.strip('/').split('/')[-1] if not os.path.isdir(self.qc_dir): os.mkdir(self.qc_dir) if not os.path.isdir(self.qc_results_dir): os.mkdir(self.qc_results_dir) if os.path.isfile(self.stats_path): self.stats = pd.read_csv(self.stats_path, index_col=0) if os.path.isfile(self.nw_path): self.tree = Tree(self.nw_path, 1) if os.path.isfile(self.dmx_path): self.dmx = pd.read_csv(self.dmx_path, index_col=0, sep="\t") if os.path.isfile(self.failed_path): self.failed_report = pd.read_csv(self.failed_path, index_col=0) self.criteria = ["unknowns", "contigs", "assembly_size", "distance"] self.tolerance = {"unknowns": max_unknowns, "contigs": contigs, "assembly_size": assembly_size, "distance": mash} self.passed = self.stats self.failed = {} self.med_abs_devs = {} self.dev_refs = {} self.allowed = {"unknowns": max_unknowns} # Enable user defined colors self.colors = {"unknowns": "red", "contigs": "green", "distance": "purple", "assembly_size": "orange"} self.assess_tree() def __str__(self): self.message = [ "Species: {}".format(self.species), "Tolerance Levels:", "Unknown bases: {}".format(self.max_unknowns), "Contigs: {}".format(self.contigs), "Assembly Size: {}".format(self.assembly_size), "MASH: {}".format(self.mash)] return '\n'.join(self.message) def assess(f): import pickle from functools import wraps @wraps(f) def wrapper(self): try: assert self.stats is not None assert os.path.isfile(self.allowed_path) assert (sorted(self.genome_ids().tolist()) == sorted(self.stats.index.tolist())) self.complete = True with open(self.allowed_path, 'rb') as p: self.allowed = pickle.load(p) except AssertionError: self.complete = False f(self) with open(self.allowed_path, 'wb') as p: pickle.dump(self.allowed, p) self.summary() self.write_failed_report() return wrapper def assess_tree(self): try: assert self.tree is not None assert self.stats is not None assert (sorted(self.tree.get_leaf_names()) == sorted(self.stats.index.tolist()) == sorted(self.genome_ids().tolist())) self.tree_complete = True except AssertionError: self.tree_complete = False def genomes(self, ext="fasta"): # TODO: Maybe this should return a tuple (genome-path, genome-id) """Returns a generator for every file ending with `ext` :param ext: File extension of genomes in species directory :returns: Generator of Genome objects for all genomes in species dir :rtype: generator """ from genbankqc import Genome genomes = (Genome(os.path.join(self.path, f)) for f in os.listdir(self.path) if f.endswith(ext)) return genomes def sketches(self): return (i.msh for i in self.genomes()) def genome_ids(self): ids = [i.name for i in self.genomes()] return pd.Index(ids) def sketch(self): for genome in self.genomes(): genome.sketch() def mash_paste(self): if os.path.isfile(self.paste_file): os.remove(self.paste_file) sketches = os.path.join(self.qc_dir, "GCA*msh") cmd = "mash paste {} {}".format(self.paste_file, sketches) Popen(cmd, shell="True", stdout=DEVNULL).wait() if not os.path.isfile(self.paste_file): self.paste_file = None def mash_dist(self): cmd = "mash dist -t '{}' '{}' > '{}'".format( self.paste_file, self.paste_file, self.dmx_path) Popen(cmd, shell="True", stdout=DEVNULL).wait() self.dmx = pd.read_csv(self.dmx_path, index_col=0, sep="\t") # Make distance matrix more readable p = re.compile('.*(GCA_\d+\.\d.*)(.fasta)') names = [re.match(p, i).group(1) for i in self.dmx.index] self.dmx.index = names self.dmx.columns = names self.dmx.to_csv(self.dmx_path, sep="\t") def run_mash(self): """Run all mash related functions.""" self.sketch() self.mash_paste() self.mash_dist() def get_tree(self): if self.tree_complete is False: import numpy as np # import matplotlib as mpl # mpl.use('TkAgg') from skbio.tree import TreeNode from scipy.cluster.hierarchy import weighted ids = self.dmx.index.tolist() triu = np.triu(self.dmx.as_matrix()) hclust = weighted(triu) t = TreeNode.from_linkage_matrix(hclust, ids) nw = t.__str__().replace("'", "") self.tree = Tree(nw) # midpoint root tree self.tree.set_outgroup(self.tree.get_midpoint_outgroup()) self.tree.write(outfile=self.nw_path) def get_stats(self): """Get stats for all genomes. Concat the results into a DataFrame """ dmx_mean = self.dmx.mean() for genome in self.genomes(): genome.get_stats(dmx_mean) species_stats = [genome.stats_df for genome in self.genomes()] self.stats = pd.concat(species_stats) self.stats.to_csv(self.stats_path) def MAD(self, df, col): """Get the median absolute deviation for col """ MAD = abs(df[col] - df[col].median()).mean() return MAD def MAD_ref(MAD, tolerance): """Get the reference value for median absolute deviation """ dev_ref = MAD * tolerance return dev_ref def bound(df, col, dev_ref): lower = df[col].median() - dev_ref upper = df[col].median() + dev_ref return lower, upper def filter_unknown_bases(self): """Filter out genomes with too many unknown bases.""" self.failed["unknowns"] = self.stats.index[ self.stats["unknowns"] > self.tolerance["unknowns"]] self.passed = self.stats.drop(self.failed["unknowns"]) def filter_contigs(self): # Only look at genomes with > 10 contigs to avoid throwing off the # median absolute deviation # Extract genomes with < 10 contigs to add them back in later. eligible_contigs = self.passed.contigs[self.passed.contigs > 10] not_enough_contigs = self.passed.contigs[self.passed.contigs <= 10] # Median absolute deviation - Average absolute difference between # number of contigs and the median for all genomes # TODO Define separate function for this med_abs_dev = abs(eligible_contigs - eligible_contigs.median()).mean() self.med_abs_devs["contigs"] = med_abs_dev # Define separate function for this # The "deviation reference" # Multiply dev_ref = med_abs_dev * self.contigs self.dev_refs["contigs"] = dev_ref self.allowed["contigs"] = eligible_contigs.median() + dev_ref # self.passed["contigs"] = eligible_contigs[ # abs(eligible_contigs - eligible_contigs.median()) <= dev_ref] self.failed["contigs"] = eligible_contigs[ abs(eligible_contigs - eligible_contigs.median()) > dev_ref].index eligible_contigs = eligible_contigs[ abs(eligible_contigs - eligible_contigs.median()) <= dev_ref] # Add genomes with < 10 contigs back in eligible_contigs = pd.concat([eligible_contigs, not_enough_contigs]) eligible_contigs = eligible_contigs.index self.passed = self.passed.loc[eligible_contigs] def filter_MAD_range(self, criteria): """Filter based on median absolute deviation. Passing values fall within a lower and upper bound.""" # Get the median absolute deviation med_abs_dev = abs(self.passed[criteria] - self.passed[criteria].median()).mean() dev_ref = med_abs_dev * self.tolerance[criteria] lower = self.passed[criteria].median() - dev_ref upper = self.passed[criteria].median() + dev_ref allowed_range = (str(int(x)) for x in [lower, upper]) allowed_range = '-'.join(allowed_range) self.allowed[criteria] = allowed_range self.failed[criteria] = self.passed[ abs(self.passed[criteria] - self.passed[criteria].median()) > dev_ref].index self.passed = self.passed[ abs(self.passed[criteria] - self.passed[criteria].median()) <= dev_ref] def filter_MAD_upper(self, criteria): """Filter based on median absolute deviation. Passing values fall under the upper bound.""" # Get the median absolute deviation med_abs_dev = abs(self.passed[criteria] - self.passed[criteria].median()).mean() dev_ref = med_abs_dev * self.tolerance[criteria] upper = self.passed[criteria].median() + dev_ref self.failed[criteria] = self.passed[ self.passed[criteria] > upper].index self.passed = self.passed[ self.passed[criteria] <= upper] upper = "{:.4f}".format(upper) self.allowed[criteria] = upper def base_node_style(self): from ete3 import NodeStyle, AttrFace nstyle = NodeStyle() nstyle["shape"] = "sphere" nstyle["size"] = 2 nstyle["fgcolor"] = "black" for n in self.tree.traverse(): n.set_style(nstyle) if re.match('^GCA', n.name): nf = AttrFace('name', fsize=8) nf.margin_right = 150 nf.margin_left = 3 n.add_face(nf, column=0) # Might be better in a layout function def style_and_render_tree(self, file_types=["svg", "pdf"]): from ete3 import TreeStyle, TextFace, CircleFace ts = TreeStyle() title_face = TextFace(self.species.replace('_', ' '), fsize=20) title_face.margin_bottom = 10 ts.title.add_face(title_face, column=0) ts.branch_vertical_margin = 10 ts.show_leaf_name = False # Legend ts.legend.add_face(TextFace(""), column=1) for category in ["Allowed", "Tolerance", "Filtered", "Color"]: category = TextFace(category, fsize=8, bold=True) category.margin_bottom = 2 category.margin_right = 40 ts.legend.add_face(category, column=1) for i, criteria in enumerate(self.criteria, 2): title = criteria.replace("_", " ").title() title = TextFace(title, fsize=8, bold=True) title.margin_bottom = 2 title.margin_right = 40 cf = CircleFace(4, self.colors[criteria], style="sphere") cf.margin_bottom = 5 filtered_count = len(list( filter(None, self.failed_report.criteria == criteria))) filtered = TextFace(filtered_count, fsize=8) filtered.margin_bottom = 5 allowed = TextFace(self.allowed[criteria], fsize=8) allowed.margin_bottom = 5 allowed.margin_right = 25 tolerance = TextFace(self.tolerance[criteria], fsize=8) tolerance.margin_bottom = 5 ts.legend.add_face(title, column=i) ts.legend.add_face(allowed, column=i) ts.legend.add_face(tolerance, column=i) ts.legend.add_face(filtered, column=i) ts.legend.add_face(cf, column=i) for f in file_types: out_tree = os.path.join(self.qc_results_dir, 'tree.{}'.format(f)) self.tree.render(out_tree, tree_style=ts) def color_tree(self): from ete3 import NodeStyle self.base_node_style() for genome in self.failed_report.index: n = self.tree.get_leaves_by_name(genome).pop() nstyle = NodeStyle() nstyle["fgcolor"] = self.colors[ self.failed_report.loc[genome, 'criteria']] nstyle["size"] = 9 n.set_style(nstyle) self.style_and_render_tree() @assess def filter(self): self.filter_unknown_bases() if check_df_len(self.passed, "unknowns"): self.filter_contigs() if check_df_len(self.passed, "assembly_size"): self.filter_MAD_range("assembly_size") if check_df_len(self.passed, "distance"): self.filter_MAD_upper("distance") def write_failed_report(self): from itertools import chain if os.path.isfile(self.failed_path): os.remove(self.failed_path) ixs = chain.from_iterable([i for i in self.failed.values()]) self.failed_report = pd.DataFrame(index=ixs, columns=["criteria"]) for criteria in self.failed.keys(): self.failed_report.loc[self.failed[criteria], 'criteria'] = criteria self.failed_report.to_csv(self.failed_path) def summary(self): summary = [ "Unknown Bases", "Allowed: {}".format(self.allowed["unknowns"]), "Tolerance: {}".format(self.tolerance["unknowns"]), "Filtered: {}".format(len(self.failed["unknowns"])), "Contigs", "Allowed: {}".format(self.allowed["contigs"]), "Tolerance: {}".format(self.tolerance["contigs"]), "Filtered: {}".format(len(self.failed["contigs"])), "Assembly Size", "Allowed: {}".format(self.allowed["assembly_size"]), "Tolerance: {}".format(self.tolerance["assembly_size"]), "Filtered: {}".format(len(self.failed["assembly_size"])), "MASH", "Allowed: {}".format(self.allowed["distance"]), "Tolerance: {}".format(self.tolerance["distance"]), "Filtered: {}".format(len(self.failed["distance"]))] summary = '\n'.join(summary) with open(os.path.join(self.summary_path), "w") as f: f.write(summary) return summary def qc(self): self.run_mash() self.get_stats() self.filter() self.get_tree() self.color_tree() def check_df_len(df, criteria, num=5): """ Verify that df has > than num genomes """ if len(df) > num: return True else: # TODO: Just pass and return false here. # info in this print statement will be apparent in summary print("Filtering based on {} resulted in less than 5 genomes.") return False
PypiClean
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/alignqc_to_novel_transcriptome.py
import argparse, sys, os, gzip, inspect from shutil import rmtree, copy from multiprocessing import cpu_count from tempfile import mkdtemp, gettempdir from subprocess import PIPE, Popen from Bio.Format.GPD import GPDStream from Bio.Stream import LocusStream import classify_reads #bring in the folder to the path for our utilities #pythonfolder_loc = "../pyutil" pythonfolder_loc = "../../Au-public/iron/utilities" cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],pythonfolder_loc))) if cmd_subfolder not in sys.path: sys.path.insert(0,cmd_subfolder) import gpd_to_nr import gpd_annotate def main(): #do our inputs args = do_inputs() # first we need to run the classify classify_reads.external_cmd('classify_reads.py '+args.input_annot+' '+args.input_gpd+' -o '+args.tempdir+'/classify.txt.gz') get_novel_sets(args.tempdir+'/classify.txt.gz',args.input_gpd,args.tempdir+'/novel_isoform_reads.gpd.gz',args.tempdir+'/novel_locus_reads.gpd.gz',args) # Now we can make a new non-redundant set of genpreds from the novel isoforms sys.stderr.write("making NR novel isoforms\n") cmd = 'gpd_to_nr.py '+args.tempdir+'/novel_isoform_reads.gpd.gz '+\ ' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\ ' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\ ' --minimum_support '+str(args.minimum_support)+\ ' --gene_names '+\ ' -o '+args.tempdir+'/novel_isoforms_nr.gpd.gz' gpd_to_nr.external_cmd(cmd) sys.stderr.write("reannotating novel based on our new gpd\n") # Now we reannotate the novel based on the these newly annotated isoforms cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_locus_reads.gpd.gz '+\ ' --threads '+str(1)+' '+\ ' -r '+args.tempdir+'/novel_isoforms_nr.gpd.gz '+\ ' -o '+args.tempdir+'/novel_locus_reads.annot.txt.gz' gpd_annotate.external_cmd(cmd) # now this new annotation should be classified # the new isoform will be in novel_isoform_reads.gpd.gz cmd = 'classify_reads.py '+args.tempdir+'/novel_locus_reads.annot.txt.gz '+args.tempdir+'/novel_locus_reads.gpd.gz -o '+args.tempdir+'/classify_novel.txt.gz' sys.stderr.write(cmd+"\n") classify_reads.external_cmd(cmd) get_novel_sets(args.tempdir+'/classify_novel.txt.gz',args.tempdir+'/novel_locus_reads.gpd.gz',args.tempdir+'/novel_isoform_reads2.gpd.gz',args.tempdir+'/novel_locus_reads2.gpd.gz',args) # now lets combine our novel isoform reads making sure to sort them of = open(args.tempdir+'/new_novel_isoform_reads.gpd.gz','w') cmd2 = 'gzip' p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE) cmd1 = 'sort -k3,3 -k5,5n -k6,6n' p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE) inf = gzip.open(args.tempdir+'/novel_isoform_reads.gpd.gz') for line in inf: p1.stdin.write(line) inf.close() inf = gzip.open(args.tempdir+'/novel_isoform_reads2.gpd.gz') for line in inf: p1.stdin.write(line) inf.close() p1.communicate() p2.communicate() of.close() # Now we can make a new non-redundant set of genpreds from the novel isoforms sys.stderr.write("making NR novel isoforms\n") cmd = 'gpd_to_nr.py '+args.tempdir+'/new_novel_isoform_reads.gpd.gz '+\ ' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\ ' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\ ' --minimum_support '+str(args.minimum_support)+\ ' --gene_names '+\ ' -o '+args.tempdir+'/novel_isoforms_nr2.gpd.gz' gpd_to_nr.external_cmd(cmd) #Only need to reannotate if we are interested in whats left over #sys.stderr.write("reannotating novel based on our new gpd\n") ## Now we reannotate the novel based on the these newly annotated isoforms #cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_locus_reads.gpd.gz '+\ # ' --threads '+str(args.threads)+' '+\ # ' -r '+args.tempdir+'/novel_isoforms_nr2.gpd.gz '+\ # ' -o '+args.tempdir+'/novel_locus_reads.annot.txt.gz' #gpd_annotate.external_cmd(cmd) sys.stderr.write("now work on the novel loci\n") # Now lets work on the novel locus of = open(args.tempdir+'/sorted_novel_locus_reads.gpd.gz','w') cmd2 = 'gzip' p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE) cmd1 = 'sort -k3,3 -k5,5n -k6,6n' p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE) inf = gzip.open(args.tempdir+'/novel_locus_reads2.gpd.gz') for line in inf: p1.stdin.write(line) inf.close() p1.communicate() p2.communicate() of.close() sys.stderr.write("making NR novel loci\n") cmd = 'gpd_to_nr.py '+args.tempdir+'/sorted_novel_locus_reads.gpd.gz '+\ ' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\ ' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\ ' --minimum_support '+str(args.minimum_support)+\ ' -o '+args.tempdir+'/novel_locus_nr.gpd.gz' gpd_to_nr.external_cmd(cmd) sys.stderr.write("sort the novel isoforms\n") of = open(args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz','w') cmd2 = 'gzip' p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE) cmd1 = 'sort -k3,3 -k5,5n -k6,6n' p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE) inf = gzip.open(args.tempdir+'/novel_isoforms_nr2.gpd.gz') for line in inf: p1.stdin.write(line) inf.close() p1.communicate() p2.communicate() of.close() sys.stderr.write("sort the novel loci\n") of = open(args.tempdir+'/novel_loci_nr.sorted.gpd.gz','w') cmd2 = 'gzip' p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE) cmd1 = 'sort -k3,3 -k5,5n -k6,6n' p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE) inf = gzip.open(args.tempdir+'/novel_locus_nr.gpd.gz') for line in inf: p1.stdin.write(line) inf.close() p1.communicate() p2.communicate() of.close() # Now we can rename totally novel genes based on locus overlap of = open(args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz','w') cmd2 = 'gzip' p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE) cmd1 = 'sort -k3,3 -k5,5n -k6,6n' p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE) inf = gzip.open(args.tempdir+'/novel_loci_nr.sorted.gpd.gz') gs = GPDStream(inf) ls = LocusStream(gs) z = 0 for rng in ls: z+=1 rng_string = rng.get_range_string() gpds = rng.get_payload() for gpd in gpds: gene_name = 'LOC'+str(z)+'|'+str(len(gpds))+'|'+rng_string f = gpd.get_gpd_line().rstrip().split("\t") f[0] = gene_name gpd_line = "\t".join(f) p1.stdin.write(gpd_line+"\n") p1.communicate() p2.communicate() of.close() # we are almost done but we need to make sure these genepreds aren't subsets of known genes sys.stderr.write("reannotating novel-isoform by reference\n") cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz '+\ ' --threads '+str(1)+' '+\ ' -r '+args.reference_annotation_gpd+\ ' -o '+args.tempdir+'/novel_isoforms_nr.annot.txt.gz' gpd_annotate.external_cmd(cmd) cmd = 'classify_reads.py '+args.tempdir+'/novel_isoforms_nr.annot.txt.gz '+args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz -o '+args.tempdir+'/classify_novel_isoform_ref.txt.gz' sys.stderr.write(cmd+"\n") classify_reads.external_cmd(cmd) # now we can screen to make sure things in the novel isoform file really are novel isoforms blacklist = set() finf = gzip.open(args.tempdir+'/classify_novel_isoform_ref.txt.gz') for line in finf: f = line.rstrip().split("\t") if f[2]=='subset' or f[2]=='full': blacklist.add(f[0]) finf.close() fof = gzip.open(args.tempdir+'/novel_isoforms_nr.filtered.sorted.gpd.gz','w') finf = gzip.open(args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz') for line in finf: f = line.rstrip().split("\t") if f[1] in blacklist: continue fof.write(line) finf.close() fof.close() sys.stderr.write("reannotating novel-locus by reference\n") cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz '+\ ' --threads '+str(1)+' '+\ ' -r '+args.reference_annotation_gpd+\ ' -o '+args.tempdir+'/novel_loci_nr_named.annot.txt.gz' gpd_annotate.external_cmd(cmd) cmd = 'classify_reads.py '+args.tempdir+'/novel_loci_nr_named.annot.txt.gz '+args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz -o '+args.tempdir+'/classify_novel_loci.txt.gz' sys.stderr.write(cmd+"\n") classify_reads.external_cmd(cmd) # now we can screen to make sure things in the novel isoform file really are novel isoforms blacklist = set() finf = gzip.open(args.tempdir+'/classify_novel_loci.txt.gz') for line in finf: f = line.rstrip().split("\t") if f[2]=='subset' or f[2]=='full': blacklist.add(f[0]) finf.close() fof = gzip.open(args.tempdir+'/novel_loci_nr_named.filtered.sorted.gpd.gz','w') finf = gzip.open(args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz') for line in finf: f = line.rstrip().split("\t") if f[1] in blacklist: continue fof.write(line) finf.close() fof.close() if not os.path.exists(args.output): os.makedirs(args.output) copy(args.tempdir+'/novel_loci_nr_named.filtered.sorted.gpd.gz',args.output+'/novel_loci_nr_named.sorted.gpd.gz') copy(args.tempdir+'/novel_isoforms_nr.filtered.sorted.gpd.gz',args.output+'/novel_isoforms_nr.sorted.gpd.gz') # Temporary working directory step 3 of 3 - Cleanup if not args.specific_tempdir: rmtree(args.tempdir) def get_novel_sets(classification,input_gpd,out_iso,out_locus,args): # now we want to create a non redundant version of the novel isoforms novel_isoforms = set() novel_isoform_genes = {} novel_loci = set() inf = gzip.open(classification) for line in inf: f = line.rstrip().split("\t") if f[2] == 'novel-isoform': novel_isoforms.add(f[0]) novel_isoform_genes[f[0]]=f[1] # save the gene name elif f[2] == 'novel-locus': novel_loci.add(f[0]) inf.close() sys.stderr.write("outputing novel isoforms to a file\n") tof = gzip.open(out_iso,'w') lof = gzip.open(out_locus,'w') inf_gpd = None; if input_gpd[-3:]=='.gz': inf_gpd = gzip.open(input_gpd) else: inf_gpd = open(input_gpd) z = 0 for line in inf_gpd: z += 1 if z % 1000 == 0: sys.stderr.write(str(z)+" reads processed\r") f = line.rstrip().split("\t") if f[1] in novel_isoforms: f[0] = novel_isoform_genes[f[0]] newline = "\t".join(f) tof.write(newline+"\n") elif f[1] in novel_loci: lof.write(line) inf_gpd.close() tof.close() lof.close() sys.stderr.write("\n") def do_inputs(): # Setup command line inputs parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_annot',help="<input annotbest.txt>") parser.add_argument('input_gpd',help="<input best.sorted.gpd>") parser.add_argument('-a','--reference_annotation_gpd',required=True,help="Reference annotation GPD") parser.add_argument('-o','--output',required=True,help="OUTPUT DIRECTORY") parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count") # Run parameters parser.add_argument('-j','--junction_tolerance',type=int,default=10,help="number of bp to tolerate junction mismatch on either side") parser.add_argument('--minimum_junction_end_support',type=int,default=2,help="minimum coverage of end exons") parser.add_argument('--minimum_support',type=int,default=2,help="minimum supporting reads") # Temporary working directory step 1 of 3 - Definition group = parser.add_mutually_exclusive_group() group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.") group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.") args = parser.parse_args() # Temporary working directory step 2 of 3 - Creation setup_tempdir(args) return args def setup_tempdir(args): if args.specific_tempdir: if not os.path.exists(args.specific_tempdir): os.makedirs(args.specific_tempdir.rstrip('/')) args.tempdir = args.specific_tempdir.rstrip('/') if not os.path.exists(args.specific_tempdir.rstrip('/')): sys.stderr.write("ERROR: Problem creating temporary directory\n") sys.exit() else: args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/')) if not os.path.exists(args.tempdir.rstrip('/')): sys.stderr.write("ERROR: Problem creating temporary directory\n") sys.exit() if not os.path.exists(args.tempdir): sys.stderr.write("ERROR: Problem creating temporary directory\n") sys.exit() return if __name__=="__main__": main()
PypiClean
/Flask-Scaffold-0.5.1.tar.gz/Flask-Scaffold-0.5.1/app/templates/static/node_modules/angular-ui-router/CHANGELOG.md
<a name="0.2.14"></a> ### 0.2.14 (2015-04-23) #### Bug Fixes * **$StateRefDirective:** resolve missing support for svg anchor elements #1667 ([0149a7bb](https://github.com/angular-ui/ui-router/commit/0149a7bb38b7af99388a1ad7cc9909a7b7c4439d)) * **$urlMatcherFactory:** * regex params should respect case-sensitivity ([1e10519f](https://github.com/angular-ui/ui-router/commit/1e10519f3be6bbf0cefdcce623cd2ade06e649e5), closes [#1671](https://github.com/angular-ui/ui-router/issues/1671)) * unquote all dashes from array params ([06664d33](https://github.com/angular-ui/ui-router/commit/06664d330f882390655dcfa83e10276110d0d0fa)) * add Type.$normalize function ([b0c6aa23](https://github.com/angular-ui/ui-router/commit/b0c6aa2350fdd3ce8483144774adc12f5a72b7e9)) * make optional params regex grouping optional ([06f73794](https://github.com/angular-ui/ui-router/commit/06f737945e83e668d09cfc3bcffd04a500ff1963), closes [#1576](https://github.com/angular-ui/ui-router/issues/1576)) * **$state:** allow about.*.** glob patterns ([e39b27a2](https://github.com/angular-ui/ui-router/commit/e39b27a2cb7d88525c446a041f9fbf1553202010)) * **uiSref:** * use Object's toString instead of Window's toString ([2aa7f4d1](https://github.com/angular-ui/ui-router/commit/2aa7f4d139dbd5b9fcc4afdcf2ab6642c87f5671)) * add absolute to allowed transition options ([ae1b3c4e](https://github.com/angular-ui/ui-router/commit/ae1b3c4eedc37983400d830895afb50457c63af4)) * **uiSrefActive:** Apply active classes on lazy loaded states ([f0ddbe7b](https://github.com/angular-ui/ui-router/commit/f0ddbe7b4a91daf279c3b7d0cee732bb1f3be5b4)) * **uiView:** add `$element` to locals for view controller ([db68914c](https://github.com/angular-ui/ui-router/commit/db68914cd6c821e7dec8155bd33142a3a97f5453)) #### Features * **$state:** * support URLs with #fragments ([3da0a170](https://github.com/angular-ui/ui-router/commit/3da0a17069e27598c0f9d9164e104dd5ce05cdc6)) * inject resolve params into controllerProvider ([b380c223](https://github.com/angular-ui/ui-router/commit/b380c223fe12e2fde7582c0d6b1ed7b15a23579b), closes [#1131](https://github.com/angular-ui/ui-router/issues/1131)) * added 'state' to state reload method (feat no.1612) - modiefied options.reload ([b8f04575](https://github.com/angular-ui/ui-router/commit/b8f04575a8557035c1858c4d5c8dbde3e1855aaa)) * broadcast $stateChangeCancel event when event.preventDefault() is called in $sta ([ecefb758](https://github.com/angular-ui/ui-router/commit/ecefb758cb445e41620b62a272aafa3638613d7a)) * **$uiViewScroll:** change function to return promise ([c2a9a311](https://github.com/angular-ui/ui-router/commit/c2a9a311388bb212e5a2e820536d1d739f829ccd), closes [#1702](https://github.com/angular-ui/ui-router/issues/1702)) * **uiSrefActive:** Added support for multiple nested uiSref directives ([b1844948](https://github.com/angular-ui/ui-router/commit/b18449481d152b50705abfce2493a444eb059fa5)) <a name="0.2.13"></a> ### 0.2.13 (2014-11-20) This release primarily fixes issues reported against 0.2.12 #### Bug Fixes * **$state:** fix $state.includes/.is to apply param types before comparisions fix(uiSref): ma ([19715d15](https://github.com/angular-ui/ui-router/commit/19715d15e3cbfff724519e9febedd05b49c75baa), closes [#1513](https://github.com/angular-ui/ui-router/issues/1513)) * Avoid re-synchronizing from url after .transitionTo ([b267ecd3](https://github.com/angular-ui/ui-router/commit/b267ecd348e5c415233573ef95ebdbd051875f52), closes [#1573](https://github.com/angular-ui/ui-router/issues/1573)) * **$urlMatcherFactory:** * Built-in date type uses local time zone ([d726bedc](https://github.com/angular-ui/ui-router/commit/d726bedcbb5f70a5660addf43fd52ec730790293)) * make date type fn check .is before running ([aa94ce3b](https://github.com/angular-ui/ui-router/commit/aa94ce3b86632ad05301530a2213099da73a3dc0), closes [#1564](https://github.com/angular-ui/ui-router/issues/1564)) * early binding of array handler bypasses type resolution ([ada4bc27](https://github.com/angular-ui/ui-router/commit/ada4bc27df5eff3ba3ab0de94a09bd91b0f7a28c)) * add 'any' Type for non-encoding non-url params ([3bfd75ab](https://github.com/angular-ui/ui-router/commit/3bfd75ab445ee2f1dd55275465059ed116b10b27), closes [#1562](https://github.com/angular-ui/ui-router/issues/1562)) * fix encoding slashes in params ([0c983a08](https://github.com/angular-ui/ui-router/commit/0c983a08e2947f999683571477debd73038e95cf), closes [#1119](https://github.com/angular-ui/ui-router/issues/1119)) * fix mixed path/query params ordering problem ([a479fbd0](https://github.com/angular-ui/ui-router/commit/a479fbd0b8eb393a94320973e5b9a62d83912ee2), closes [#1543](https://github.com/angular-ui/ui-router/issues/1543)) * **ArrayType:** * specify empty array mapping corner case ([74aa6091](https://github.com/angular-ui/ui-router/commit/74aa60917e996b0b4e27bbb4eb88c3c03832021d), closes [#1511](https://github.com/angular-ui/ui-router/issues/1511)) * fix .equals for array types ([5e6783b7](https://github.com/angular-ui/ui-router/commit/5e6783b77af9a90ddff154f990b43dbb17eeda6e), closes [#1538](https://github.com/angular-ui/ui-router/issues/1538)) * **Param:** fix default value shorthand declaration ([831d812a](https://github.com/angular-ui/ui-router/commit/831d812a524524c71f0ee1c9afaf0487a5a66230), closes [#1554](https://github.com/angular-ui/ui-router/issues/1554)) * **common:** fixed the _.filter clone to not create sparse arrays ([750f5cf5](https://github.com/angular-ui/ui-router/commit/750f5cf5fd91f9ada96f39e50d39aceb2caf22b6), closes [#1563](https://github.com/angular-ui/ui-router/issues/1563)) * **ie8:** fix calls to indexOf and filter ([dcb31b84](https://github.com/angular-ui/ui-router/commit/dcb31b843391b3e61dee4de13f368c109541813e), closes [#1556](https://github.com/angular-ui/ui-router/issues/1556)) #### Features * add json parameter Type ([027f1fcf](https://github.com/angular-ui/ui-router/commit/027f1fcf9c0916cea651e88981345da6f9ff214a)) <a name="0.2.12"></a> ### 0.2.12 (2014-11-13) #### Bug Fixes * **$resolve:** use resolve fn result, not parent resolved value of same name ([67f5e00c](https://github.com/angular-ui/ui-router/commit/67f5e00cc9aa006ce3fe6cde9dff261c28eab70a), closes [#1317], [#1353]) * **$state:** * populate default params in .transitionTo. ([3f60fbe6](https://github.com/angular-ui/ui-router/commit/3f60fbe6d65ebeca8d97952c05aa1d269f1b7ba1), closes [#1396]) * reload() now reinvokes controllers ([73443420](https://github.com/angular-ui/ui-router/commit/7344342018847902594dc1fc62d30a5c30f01763), closes [#582]) * do not emit $viewContentLoading if notify: false ([74255feb](https://github.com/angular-ui/ui-router/commit/74255febdf48ae082a02ca1e735165f2c369a463), closes [#1387](https://github.com/angular-ui/ui-router/issues/1387)) * register states at config-time ([4533fe36](https://github.com/angular-ui/ui-router/commit/4533fe36e0ab2f0143edd854a4145deaa013915a)) * handle parent.name when parent is obj ([4533fe36](https://github.com/angular-ui/ui-router/commit/4533fe36e0ab2f0143edd854a4145deaa013915a)) * **$urlMatcherFactory:** * register types at config ([4533fe36](https://github.com/angular-ui/ui-router/commit/4533fe36e0ab2f0143edd854a4145deaa013915a), closes [#1476]) * made path params default value "" for backwards compat ([8f998e71](https://github.com/angular-ui/ui-router/commit/8f998e71e43a0b31293331c981f5db0f0097b8ba)) * Pre-replace certain param values for better mapping ([6374a3e2](https://github.com/angular-ui/ui-router/commit/6374a3e29ab932014a7c77d2e1ab884cc841a2e3)) * fixed ParamSet.$$keys() ordering ([9136fecb](https://github.com/angular-ui/ui-router/commit/9136fecbc2bfd4fda748a9914f0225a46c933860)) * empty string policy now respected in Param.value() ([db12c85c](https://github.com/angular-ui/ui-router/commit/db12c85c16f2d105415f9bbbdeb11863f64728e0)) * "string" type now encodes/decodes slashes ([3045e415](https://github.com/angular-ui/ui-router/commit/3045e41577a8b8b8afc6039f42adddf5f3c061ec), closes [#1119]) * allow arrays in both path and query params ([fdd2f2c1](https://github.com/angular-ui/ui-router/commit/fdd2f2c191c4a67c874fdb9ec9a34f8dde9ad180), closes [#1073], [#1045], [#1486], [#1394]) * typed params in search ([8d4cab69](https://github.com/angular-ui/ui-router/commit/8d4cab69dd67058e1a716892cc37b7d80a57037f), closes [#1488](https://github.com/angular-ui/ui-router/issues/1488)) * no longer generate unroutable urls ([cb9fd9d8](https://github.com/angular-ui/ui-router/commit/cb9fd9d8943cb26c7223f6990db29c82ae8740f8), closes [#1487](https://github.com/angular-ui/ui-router/issues/1487)) * handle optional parameter followed by required parameter in url format. ([efc72106](https://github.com/angular-ui/ui-router/commit/efc72106ddcc4774b48ea176a505ef9e95193b41)) * default to parameter string coersion. ([13a468a7](https://github.com/angular-ui/ui-router/commit/13a468a7d54c2fb0751b94c0c1841d580b71e6dc), closes [#1414](https://github.com/angular-ui/ui-router/issues/1414)) * concat respects strictMode/caseInsensitive ([dd72e103](https://github.com/angular-ui/ui-router/commit/dd72e103edb342d9cf802816fe127e1bbd68fd5f), closes [#1395]) * **ui-sref:** * Allow sref state options to take a scope object ([b5f7b596](https://github.com/angular-ui/ui-router/commit/b5f7b59692ce4933e2d63eb5df3f50a4ba68ccc0)) * replace raw href modification with attrs. ([08c96782](https://github.com/angular-ui/ui-router/commit/08c96782faf881b0c7ab00afc233ee6729548fa0)) * nagivate to state when url is "" fix($state.href): generate href for state with ([656b5aab](https://github.com/angular-ui/ui-router/commit/656b5aab906e5749db9b5a080c6a83b95f50fd91), closes [#1363](https://github.com/angular-ui/ui-router/issues/1363)) * Check that state is defined in isMatch() ([92aebc75](https://github.com/angular-ui/ui-router/commit/92aebc7520f88babdc6e266536086e07263514c3), closes [#1314](https://github.com/angular-ui/ui-router/issues/1314), [#1332](https://github.com/angular-ui/ui-router/issues/1332)) * **uiView:** * allow inteprolated ui-view names ([81f6a19a](https://github.com/angular-ui/ui-router/commit/81f6a19a432dac9198fd33243855bfd3b4fea8c0), closes [#1324](https://github.com/angular-ui/ui-router/issues/1324)) * Made anim work with angular 1.3 ([c3bb7ad9](https://github.com/angular-ui/ui-router/commit/c3bb7ad903da1e1f3c91019cfd255be8489ff4ef), closes [#1367](https://github.com/angular-ui/ui-router/issues/1367), [#1345](https://github.com/angular-ui/ui-router/issues/1345)) * **urlRouter:** html5Mode accepts an object from angular v1.3.0-rc.3 ([7fea1e9d](https://github.com/angular-ui/ui-router/commit/7fea1e9d0d8c6e09cc6c895ecb93d4221e9adf48)) * **stateFilters:** mark state filters as stateful. ([a00b353e](https://github.com/angular-ui/ui-router/commit/a00b353e3036f64a81245c4e7898646ba218f833), closes [#1479]) * **ui-router:** re-add IE8 compatibility for map/filter/keys ([8ce69d9f](https://github.com/angular-ui/ui-router/commit/8ce69d9f7c886888ab53eca7e53536f36b428aae), closes [#1518], [#1383]) * **package:** point 'main' to a valid filename ([ac903350](https://github.com/angular-ui/ui-router/commit/ac9033501debb63364539d91fbf3a0cba4579f8e)) * **travis:** make CI build faster ([0531de05](https://github.com/angular-ui/ui-router/commit/0531de052e414a8d839fbb4e7635e923e94865b3)) #### Features ##### Default and Typed params This release includes a lot of bug fixes around default/optional and typed parameters. As such, 0.2.12 is the first release where we recommend those features be used. * **$state:** * add state params validation ([b1379e6a](https://github.com/angular-ui/ui-router/commit/b1379e6a4d38f7ed7436e05873932d7c279af578), closes [#1433](https://github.com/angular-ui/ui-router/issues/1433)) * is/includes/get work on relative stateOrName ([232e94b3](https://github.com/angular-ui/ui-router/commit/232e94b3c2ca2c764bb9510046e4b61690c87852)) * .reload() returns state transition promise ([639e0565](https://github.com/angular-ui/ui-router/commit/639e0565dece9d5544cc93b3eee6e11c99bd7373)) * **$templateFactory:** request templateURL as text/html ([ccd60769](https://github.com/angular-ui/ui-router/commit/ccd6076904a4b801d77b47f6e2de4c06ce9962f8), closes [#1287]) * **$urlMatcherFactory:** Made a Params and ParamSet class ([0cc1e6cc](https://github.com/angular-ui/ui-router/commit/0cc1e6cc461a4640618e2bb594566551c54834e2)) <a name="0.2.11"></a> ### 0.2.11 (2014-08-26) #### Bug Fixes * **$resolve:** Resolves only inherit from immediate parent fixes #702 ([df34e20c](https://github.com/angular-ui/ui-router/commit/df34e20c576299e7a3c8bd4ebc68d42341c0ace9)) * **$state:** * change $state.href default options.inherit to true ([deea695f](https://github.com/angular-ui/ui-router/commit/deea695f5cacc55de351ab985144fd233c02a769)) * sanity-check state lookups ([456fd5ae](https://github.com/angular-ui/ui-router/commit/456fd5aec9ea507518927bfabd62b4afad4cf714), closes [#980](https://github.com/angular-ui/ui-router/issues/980)) * didn't comply to inherit parameter ([09836781](https://github.com/angular-ui/ui-router/commit/09836781f126c1c485b06551eb9cfd4fa0f45c35)) * allow view content loading broadcast ([7b78edee](https://github.com/angular-ui/ui-router/commit/7b78edeeb52a74abf4d3f00f79534033d5a08d1a)) * **$urlMatcherFactory:** * detect injected functions ([91f75ae6](https://github.com/angular-ui/ui-router/commit/91f75ae66c4d129f6f69e53bd547594e9661f5d5)) * syntax ([1ebed370](https://github.com/angular-ui/ui-router/commit/1ebed37069bae8614d41541d56521f5c45f703f3)) * **UrlMatcher:** * query param function defaults ([f9c20530](https://github.com/angular-ui/ui-router/commit/f9c205304f10d8a4ebe7efe9025e642016479a51)) * don't decode default values ([63607bdb](https://github.com/angular-ui/ui-router/commit/63607bdbbcb432d3fb37856a1cb3da0cd496804e)) * **travis:** update Node version to fix build ([d6b95ef2](https://github.com/angular-ui/ui-router/commit/d6b95ef23d9dacb4eba08897f5190a0bcddb3a48)) * **uiSref:** * Generate an href for states with a blank url. closes #1293 ([691745b1](https://github.com/angular-ui/ui-router/commit/691745b12fa05d3700dd28f0c8d25f8a105074ad)) * should inherit params by default ([b973dad1](https://github.com/angular-ui/ui-router/commit/b973dad155ad09a7975e1476bd096f7b2c758eeb)) * cancel transition if preventDefault() has been called ([2e6d9167](https://github.com/angular-ui/ui-router/commit/2e6d9167d3afbfbca6427e53e012f94fb5fb8022)) * **uiView:** Fixed infinite loop when is called .go() from a controller. ([e13988b8](https://github.com/angular-ui/ui-router/commit/e13988b8cd6231d75c78876ee9d012cc87f4a8d9), closes [#1194](https://github.com/angular-ui/ui-router/issues/1194)) * **docs:** * Fixed link to milestones ([6c0ae500](https://github.com/angular-ui/ui-router/commit/6c0ae500cc238ea9fc95adcc15415c55fc9e1f33)) * fix bug in decorator example ([4bd00af5](https://github.com/angular-ui/ui-router/commit/4bd00af50b8b88a49d1545a76290731cb8e0feb1)) * Removed an incorrect semi-colon ([af97cef8](https://github.com/angular-ui/ui-router/commit/af97cef8b967f2e32177e539ef41450dca131a7d)) * Explain return value of rule as function ([5e887890](https://github.com/angular-ui/ui-router/commit/5e8878900a6ffe59a81aed531a3925e34a297377)) #### Features * **$state:** * allow parameters to pass unharmed ([8939d057](https://github.com/angular-ui/ui-router/commit/8939d0572ab1316e458ef016317ecff53131a822)) * **BREAKING CHANGE**: state parameters are no longer automatically coerced to strings, and unspecified parameter values are now set to undefined rather than null. * allow prevent syncUrl on failure ([753060b9](https://github.com/angular-ui/ui-router/commit/753060b910d5d2da600a6fa0757976e401c33172)) * **typescript:** Add typescript definitions for component builds ([521ceb3f](https://github.com/angular-ui/ui-router/commit/521ceb3fd7850646422f411921e21ce5e7d82e0f)) * **uiSref:** extend syntax for ui-sref ([71cad3d6](https://github.com/angular-ui/ui-router/commit/71cad3d636508b5a9fe004775ad1f1adc0c80c3e)) * **uiSrefActive:** * Also activate for child states. ([bf163ad6](https://github.com/angular-ui/ui-router/commit/bf163ad6ce176ce28792696c8302d7cdf5c05a01), closes [#818](https://github.com/angular-ui/ui-router/issues/818)) * **BREAKING CHANGE** Since ui-sref-active now activates even when child states are active you may need to swap out your ui-sref-active with ui-sref-active-eq, thought typically we think devs want the auto inheritance. * uiSrefActiveEq: new directive with old ui-sref-active behavior * **$urlRouter:** * defer URL change interception ([c72d8ce1](https://github.com/angular-ui/ui-router/commit/c72d8ce11916d0ac22c81b409c9e61d7048554d7)) * force URLs to have valid params ([d48505cd](https://github.com/angular-ui/ui-router/commit/d48505cd328d83e39d5706e085ba319715f999a6)) * abstract $location handling ([08b4636b](https://github.com/angular-ui/ui-router/commit/08b4636b294611f08db35f00641eb5211686fb50)) * **$urlMatcherFactory:** * fail on bad parameters ([d8f124c1](https://github.com/angular-ui/ui-router/commit/d8f124c10d00c7e5dde88c602d966db261aea221)) * date type support ([b7f074ff](https://github.com/angular-ui/ui-router/commit/b7f074ff65ca150a3cdbda4d5ad6cb17107300eb)) * implement type support ([450b1f0e](https://github.com/angular-ui/ui-router/commit/450b1f0e8e03c738174ff967f688b9a6373290f4)) * **UrlMatcher:** * handle query string arrays ([9cf764ef](https://github.com/angular-ui/ui-router/commit/9cf764efab45fa9309368688d535ddf6e96d6449), closes [#373](https://github.com/angular-ui/ui-router/issues/373)) * injectable functions as defaults ([00966ecd](https://github.com/angular-ui/ui-router/commit/00966ecd91fb745846039160cab707bfca8b3bec)) * default values & type decoding for query params ([a472b301](https://github.com/angular-ui/ui-router/commit/a472b301389fbe84d1c1fa9f24852b492a569d11)) * allow shorthand definitions ([5b724304](https://github.com/angular-ui/ui-router/commit/5b7243049793505e44b6608ea09878c37c95b1f5)) * validates whole interface ([32b27db1](https://github.com/angular-ui/ui-router/commit/32b27db173722e9194ef1d5c0ea7d93f25a98d11)) * implement non-strict matching ([a3e21366](https://github.com/angular-ui/ui-router/commit/a3e21366bee0475c9795a1ec76f70eec41c5b4e3)) * add per-param config support ([07b3029f](https://github.com/angular-ui/ui-router/commit/07b3029f4d409cf955780113df92e36401b47580)) * **BREAKING CHANGE**: the `params` option in state configurations must now be an object keyed by parameter name. ### 0.2.10 (2014-03-12) #### Bug Fixes * **$state:** use $browser.baseHref() when generating urls with .href() ([cbcc8488](https://github.com/angular-ui/ui-router/commit/cbcc84887d6b6d35258adabb97c714cd9c1e272d)) * **bower.json:** JS files should not be ignored ([ccdab193](https://github.com/angular-ui/ui-router/commit/ccdab193315f304eb3be5f5b97c47a926c79263e)) * **dev:** karma:background task is missing, can't run grunt:dev. ([d9f7b898](https://github.com/angular-ui/ui-router/commit/d9f7b898e8e3abb8c846b0faa16a382913d7b22b)) * **sample:** Contacts menu button not staying active when navigating to detail states. Need t ([2fcb8443](https://github.com/angular-ui/ui-router/commit/2fcb84437cb43ade12682a92b764f13cac77dfe7)) * **uiSref:** support mock-clicks/events with no data ([717d3ff7](https://github.com/angular-ui/ui-router/commit/717d3ff7d0ba72d239892dee562b401cdf90e418)) * **uiView:** * Do NOT autoscroll when autoscroll attr is missing ([affe5bd7](https://github.com/angular-ui/ui-router/commit/affe5bd785cdc3f02b7a9f64a52e3900386ec3a0), closes [#807](https://github.com/angular-ui/ui-router/issues/807)) * Refactoring uiView directive to copy ngView logic ([548fab6a](https://github.com/angular-ui/ui-router/commit/548fab6ab9debc9904c5865c8bc68b4fc3271dd0), closes [#857](https://github.com/angular-ui/ui-router/issues/857), [#552](https://github.com/angular-ui/ui-router/issues/552)) #### Features * **$state:** includes() allows glob patterns for state matching. ([2d5f6b37](https://github.com/angular-ui/ui-router/commit/2d5f6b37191a3135f4a6d9e8f344c54edcdc065b)) * **UrlMatcher:** Add support for case insensitive url matching ([642d5247](https://github.com/angular-ui/ui-router/commit/642d524799f604811e680331002feec7199a1fb5)) * **uiSref:** add support for transition options ([2ed7a728](https://github.com/angular-ui/ui-router/commit/2ed7a728cee6854b38501fbc1df6139d3de5b28a)) * **uiView:** add controllerAs config with function ([1ee7334a](https://github.com/angular-ui/ui-router/commit/1ee7334a73efeccc9b95340e315cdfd59944762d)) ### 0.2.9 (2014-01-17) This release is identical to 0.2.8. 0.2.8 was re-tagged in git to fix a problem with bower. ### 0.2.8 (2014-01-16) #### Bug Fixes * **$state:** allow null to be passed as 'params' param ([094dc30e](https://github.com/angular-ui/ui-router/commit/094dc30e883e1bd14e50a475553bafeaade3b178)) * **$state.go:** param inheritance shouldn't inherit from siblings ([aea872e0](https://github.com/angular-ui/ui-router/commit/aea872e0b983cb433436ce5875df10c838fccedb)) * **bower.json:** fixes bower.json ([eed3cc4d](https://github.com/angular-ui/ui-router/commit/eed3cc4d4dfef1d3ef84b9fd063127538ebf59d3)) * **uiSrefActive:** annotate controller injection ([85921422](https://github.com/angular-ui/ui-router/commit/85921422ff7fb0effed358136426d616cce3d583), closes [#671](https://github.com/angular-ui/ui-router/issues/671)) * **uiView:** * autoscroll tests pass on 1.2.4 & 1.1.5 ([86eacac0](https://github.com/angular-ui/ui-router/commit/86eacac09ca5e9000bd3b9c7ba6e2cc95d883a3a)) * don't animate initial load ([83b6634d](https://github.com/angular-ui/ui-router/commit/83b6634d27942ca74766b2b1244a7fc52c5643d9)) * test pass against 1.0.8 and 1.2.4 ([a402415a](https://github.com/angular-ui/ui-router/commit/a402415a2a28b360c43b9fe8f4f54c540f6c33de)) * it should autoscroll when expr is missing. ([8bb9e27a](https://github.com/angular-ui/ui-router/commit/8bb9e27a2986725f45daf44c4c9f846385095aff)) #### Features * **uiSref:** add target attribute behaviour ([c12bf9a5](https://github.com/angular-ui/ui-router/commit/c12bf9a520d30d70294e3d82de7661900f8e394e)) * **uiView:** * merge autoscroll expression test. ([b89e0f87](https://github.com/angular-ui/ui-router/commit/b89e0f871d5cc35c10925ede986c10684d5c9252)) * cache and test autoscroll expression ([ee262282](https://github.com/angular-ui/ui-router/commit/ee2622828c2ce83807f006a459ac4e11406d9258))
PypiClean
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Debug.py
__revision__ = "src/engine/SCons/Debug.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan" import os import sys import time import weakref import inspect # Global variable that gets set to 'True' by the Main script, # when the creation of class instances should get tracked. track_instances = False # List of currently tracked classes tracked_classes = {} def logInstanceCreation(instance, name=None): if name is None: name = instance.__class__.__name__ if name not in tracked_classes: tracked_classes[name] = [] if hasattr(instance, '__dict__'): tracked_classes[name].append(weakref.ref(instance)) else: # weakref doesn't seem to work when the instance # contains only slots... tracked_classes[name].append(instance) def string_to_classes(s): if s == '*': return sorted(tracked_classes.keys()) else: return s.split() def fetchLoggedInstances(classes="*"): classnames = string_to_classes(classes) return [(cn, len(tracked_classes[cn])) for cn in classnames] def countLoggedInstances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write("%s: %d\n" % (classname, len(tracked_classes[classname]))) def listLoggedInstances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write('\n%s:\n' % classname) for ref in tracked_classes[classname]: if inspect.isclass(ref): obj = ref() else: obj = ref if obj is not None: file.write(' %s\n' % repr(obj)) def dumpLoggedInstances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write('\n%s:\n' % classname) for ref in tracked_classes[classname]: obj = ref() if obj is not None: file.write(' %s:\n' % obj) for key, value in obj.__dict__.items(): file.write(' %20s : %s\n' % (key, value)) if sys.platform[:5] == "linux": # Linux doesn't actually support memory usage stats from getrusage(). def memory(): with open('/proc/self/stat') as f: mstr = f.read() mstr = mstr.split()[22] return int(mstr) elif sys.platform[:6] == 'darwin': #TODO really get memory stats for OS X def memory(): return 0 else: try: import resource except ImportError: try: import win32process import win32api except ImportError: def memory(): return 0 else: def memory(): process_handle = win32api.GetCurrentProcess() memory_info = win32process.GetProcessMemoryInfo( process_handle ) return memory_info['PeakWorkingSetSize'] else: def memory(): res = resource.getrusage(resource.RUSAGE_SELF) return res[4] # returns caller's stack def caller_stack(): import traceback tb = traceback.extract_stack() # strip itself and the caller from the output tb = tb[:-2] result = [] for back in tb: # (filename, line number, function name, text) key = back[:3] result.append('%s:%d(%s)' % func_shorten(key)) return result caller_bases = {} caller_dicts = {} def caller_trace(back=0): """ Trace caller stack and save info into global dicts, which are printed automatically at the end of SCons execution. """ global caller_bases, caller_dicts import traceback tb = traceback.extract_stack(limit=3+back) tb.reverse() callee = tb[1][:3] caller_bases[callee] = caller_bases.get(callee, 0) + 1 for caller in tb[2:]: caller = callee + caller[:3] try: entry = caller_dicts[callee] except KeyError: caller_dicts[callee] = entry = {} entry[caller] = entry.get(caller, 0) + 1 callee = caller # print a single caller and its callers, if any def _dump_one_caller(key, file, level=0): leader = ' '*level for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]): file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:]))) if c in caller_dicts: _dump_one_caller(c, file, level+1) # print each call tree def dump_caller_counts(file=sys.stdout): for k in sorted(caller_bases.keys()): file.write("Callers of %s:%d(%s), %d calls:\n" % (func_shorten(k) + (caller_bases[k],))) _dump_one_caller(k, file) shorten_list = [ ( '/scons/SCons/', 1), ( '/src/engine/SCons/', 1), ( '/usr/lib/python', 0), ] if os.sep != '/': shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list] def func_shorten(func_tuple): f = func_tuple[0] for t in shorten_list: i = f.find(t[0]) if i >= 0: if t[1]: i = i + len(t[0]) return (f[i:],)+func_tuple[1:] return func_tuple TraceFP = {} if sys.platform == 'win32': TraceDefault = 'con' else: TraceDefault = '/dev/tty' TimeStampDefault = None StartTime = time.time() PreviousTime = StartTime def Trace(msg, file=None, mode='w', tstamp=None): """Write a trace message to a file. Whenever a file is specified, it becomes the default for the next call to Trace().""" global TraceDefault global TimeStampDefault global PreviousTime if file is None: file = TraceDefault else: TraceDefault = file if tstamp is None: tstamp = TimeStampDefault else: TimeStampDefault = tstamp try: fp = TraceFP[file] except KeyError: try: fp = TraceFP[file] = open(file, mode) except TypeError: # Assume we were passed an open file pointer. fp = file if tstamp: now = time.time() fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime)) PreviousTime = now fp.write(msg) fp.flush() fp.close() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/AsynQueue-0.9.8.tar.gz/AsynQueue-0.9.8/asynqueue/interfaces.py
from zope.interface import invariant, Interface, Attribute from twisted.internet.interfaces import IConsumer from asynqueue import errors class IWorker(Interface): """ Provided by worker objects that can have tasks assigned to them for processing. All worker objects are considered qualified to run tasks of the default C{None} series. To indicate that subclasses or subclass instances are qualified to run tasks of user-defined series in addition to the default, the hashable object that identifies the additional series must be listed in the C{cQualified} or C{iQualified} class or instance attributes, respectively. """ cQualified = Attribute( """ A class-attribute list containing all series for which all instances of the subclass are qualified to run tasks. """) iQualified = Attribute( """ An instance-attribute list containing all series for which the subclass instance is qualified to run tasks. """) def _check_qualifications(ob): """ Qualification attributes must be present as lists. """ for attrName in ('cQualified', 'iQualified'): x = getattr(ob, attrName, None) if not isinstance(x, list): raise errors.InvariantError(ob) invariant(_check_qualifications) def setResignator(callableObject): """ Registers the supplied I{callableObject} to be called if the worker deems it necessary to resign, e.g., a remote connection has been lost. """ def run(task): """ Adds the task represented by the specified I{task} object to the list of tasks pending for this worker, to be run however and whenever the worker sees fit. However, workers are expected to run highest-priority tasks before anything else they have lined up in their mini-queues. Unless the worker is constructed with a C{raw=True} keyword or the task includes C{raw=True}, an iterator resulting from the task is converted into an instance of L{iteration.Deferator}. The underlying iteration (possibly across a pipe or wire) must be handled transparently to the user. If the task has a I{consumer} keyword set to an implementor of C{IConsumer}, an L{iteration.IterationProducer} coupled to that consumer will be the end result instead. Make sure that any callbacks you add to the task's internal deferred object C{task.d} return the callback argument. Otherwise, the result of your task will be lost in the callback chain. @return: A C{Deferred} that fires when the worker is ready to be assigned another task. """ def stop(): """ Attempts to gracefully shut down the worker, returning a C{Deferred} that fires when the worker is done with all assigned tasks and will not cause any errors if the reactor is stopped or its object is deleted. The C{Deferred} returned by your implementation of this method must not fire until B{after} the results of all pending tasks have been obtained. Thus the deferred must be chained to each C{task.d} somehow. Make sure that any callbacks you add to the task's internal deferred object C{task.d} return the callback argument. Otherwise, the result of your task will be lost in the callback chain. """ def crash(): """ Takes drastic action to shut down the worker, rudely and synchronously. @return: A list of I{task} objects, one for each task left uncompleted. You shouldn't have to call this method if no tasks are left pending; the L{stop} method should be enough in that case. """
PypiClean
/NeuroRuler-1.7.tar.gz/NeuroRuler-1.7/src/GUI/main.py
import importlib import sys import os import webbrowser from pathlib import Path from typing import Union import SimpleITK as sitk import numpy as np from PyQt6 import QtGui, QtCore from PyQt6.QtGui import QPixmap, QAction, QImage, QIcon, QResizeEvent from PyQt6.QtWidgets import ( QApplication, QDialog, QLabel, QMainWindow, QFileDialog, QMenu, QVBoxLayout, QWidget, QMessageBox, ) from PyQt6.uic.load_ui import loadUi from PyQt6.QtCore import Qt, QSize import pprint from src.utils.constants import View, ThresholdFilter import src.utils.constants as constants # Note, do not use imports like # from src.utils.global_vars import IMAGE_DICT # This would make the global variables not work import src.utils.global_vars as global_vars import src.utils.imgproc as imgproc import src.utils.user_settings as user_settings from src.GUI.helpers import ( string_to_QColor, mask_QImage, sitk_slice_to_qimage, ErrorMessageBox, InformationMessageBox, InformationDialog, ) from src.utils.img_helpers import ( initialize_globals, update_images, get_curr_image, get_curr_image_size, get_curr_rotated_slice, get_curr_smooth_slice, get_curr_metadata, get_curr_binary_thresholded_slice, get_curr_otsu_slice, get_curr_physical_units, get_curr_path, get_curr_properties_tuple, get_middle_dimension, ) import src.utils.img_helpers as img_helpers PATH_TO_UI_FILE: Path = Path("src") / "GUI" / "mainwindow.ui" PATH_TO_HCT_LOGO: Path = Path("src") / "GUI" / "static" / "hct_logo.png" SETTINGS_VIEW_ENABLED: bool = True """Whether the user is able to adjust settings (settings screen) or not (circumference and contoured image screen).""" DEFAULT_CIRCUMFERENCE_LABEL_TEXT: str = "Calculated Circumference: N/A" DEFAULT_IMAGE_PATH_LABEL_TEXT: str = "Image path" GITHUB_LINK: str = "https://github.com/COMP523TeamD/HeadCircumferenceTool" DOCUMENTATION_LINK: str = "https://headcircumferencetool.readthedocs.io/en/latest/" DEFAULT_IMAGE_TEXT: str = "Select images using File > Open!" DEFAULT_IMAGE_NUM_LABEL_TEXT: str = "Image 0 of 0" DEFAULT_IMAGE_STATUS_TEXT: str = "Image path is displayed here." # We assume units are millimeters if we can't find units in metadata MESSAGE_TO_SHOW_IF_UNITS_NOT_FOUND: str = "millimeters (mm)" UNSCALED_QPIXMAP: QPixmap """Unscaled QPixmap from which the scaled version is rendered in the GUI. When any slice (rotated, smoothed, previewed) is rendered from an unscaled QImage, this variable is set to the QPixmap generated from that unscaled QImage. This variable will not change on resizeEvent. resizeEvent will scale this. Otherwise, if scaling self.image's pixmap (which is already scaled), there would be loss of detail.""" class MainWindow(QMainWindow): """Main window of the application. Settings mode and circumference mode.""" def __init__(self): """Load main file and connect GUI events to methods/functions. Sets window title and icon.""" super(MainWindow, self).__init__() loadUi(str(PATH_TO_UI_FILE), self) self.setWindowTitle("NeuroRuler") self.action_open.triggered.connect(lambda: self.browse_files(False)) self.action_add_images.triggered.connect(lambda: self.browse_files(True)) self.action_remove_image.triggered.connect(self.remove_curr_img) self.action_exit.triggered.connect(exit) self.action_github.triggered.connect(lambda: webbrowser.open(GITHUB_LINK)) self.action_documentation.triggered.connect( lambda: webbrowser.open(DOCUMENTATION_LINK) ) self.action_show_credits.triggered.connect( lambda: information_dialog( "Credits", 'Credit to Jesse Wei, Madison Lester, Peifeng "Hank" He, Eric Schneider, and Martin Styner.', ) ) self.action_test_stuff.triggered.connect(self.test_stuff) self.action_print_metadata.triggered.connect(display_metadata) self.action_print_dimensions.triggered.connect(display_dimensions) self.action_print_properties.triggered.connect(display_properties) self.action_print_direction.triggered.connect(display_direction) self.action_print_spacing.triggered.connect(display_spacing) self.action_export_png.triggered.connect( lambda: self.export_curr_slice_as_img("png") ) self.action_export_jpg.triggered.connect( lambda: self.export_curr_slice_as_img("jpg") ) self.action_export_bmp.triggered.connect( lambda: self.export_curr_slice_as_img("bmp") ) self.action_export_ppm.triggered.connect( lambda: self.export_curr_slice_as_img("ppm") ) self.action_export_xbm.triggered.connect( lambda: self.export_curr_slice_as_img("xbm") ) self.action_export_xpm.triggered.connect( lambda: self.export_curr_slice_as_img("xpm") ) self.next_button.clicked.connect(self.next_img) self.previous_button.clicked.connect(self.previous_img) self.apply_button.clicked.connect(self.settings_export_view_toggle) self.x_slider.valueChanged.connect(self.rotate_x) self.y_slider.valueChanged.connect(self.rotate_y) self.z_slider.valueChanged.connect(self.rotate_z) self.slice_slider.valueChanged.connect(self.slice_update) self.reset_button.clicked.connect(self.reset_settings) self.smoothing_preview_button.clicked.connect(self.render_smooth_slice) self.otsu_radio_button.clicked.connect(self.disable_binary_threshold_inputs) self.binary_radio_button.clicked.connect(self.enable_binary_threshold_inputs) self.threshold_preview_button.clicked.connect(self.render_threshold) self.x_view_radio_button.clicked.connect(self.update_view) self.y_view_radio_button.clicked.connect(self.update_view) self.z_view_radio_button.clicked.connect(self.update_view) def enable_elements(self) -> None: """Called after File > Open. Enables GUI elements. Explicitly disables some (e.g., Export CSV menu item and binary threshold inputs, since Otsu is default). :return: None """ # findChildren searches recursively by default for widget in self.findChildren(QWidget): widget.setEnabled(True) # Menu stuff for widget in self.findChildren(QAction): widget.setEnabled(True) self.action_export_csv.setEnabled(not SETTINGS_VIEW_ENABLED) self.export_button.setEnabled(not SETTINGS_VIEW_ENABLED) self.disable_binary_threshold_inputs() def enable_binary_threshold_inputs(self) -> None: """Called when Binary filter button is clicked. Restore binary input box. :return: None """ self.upper_threshold_input.setEnabled(True) self.lower_threshold_input.setEnabled(True) def settings_export_view_toggle(self) -> None: """Called when clicking Apply (in settings mode) or Adjust (in circumference mode). Toggle SETTINGS_VIEW_ENABLED, change apply button text, render stuff depending on the current mode. Enables/disables GUI elements depending on the value of SETTINGS_VIEW_ENABLED. :return: None """ # Unsure sure why this is necessary here but nowhere else... global SETTINGS_VIEW_ENABLED SETTINGS_VIEW_ENABLED = not SETTINGS_VIEW_ENABLED settings_view_enabled = SETTINGS_VIEW_ENABLED if settings_view_enabled: self.apply_button.setText("Apply") self.circumference_label.setText(DEFAULT_CIRCUMFERENCE_LABEL_TEXT) # Render uncontoured slice after pressing adjust self.render_curr_slice() else: self.apply_button.setText("Adjust") # When clicking Apply, the circumference should be calculated only for the axial slice. # Edge case: If the user does a huge rotation, then switching to axial view may not display an # axial slice. For example, after clicking coronal and setting X rotation to 90, # clicking axial will not show an axial slice. # We could just not change the view and re-orient when clicking Apply if this is a valid use case (probably isn't). # TODO: Check with Styner self.set_view_z() self.orient_curr_image() self.update_smoothing_settings() self.update_binary_filter_settings() self.apply_button.setText("Adjust") # Ignore the type annotation warning here. # render_curr_slice() must return np.ndarray since not settings_view_enabled here binary_contour_slice: np.ndarray = self.render_curr_slice() self.render_circumference(binary_contour_slice) # TODO: Call enable_elements and then a disable method (code another one, and it'd be short) # If not settings_view_enabled self.action_open.setEnabled(settings_view_enabled) self.action_add_images.setEnabled(settings_view_enabled) self.action_remove_image.setEnabled(settings_view_enabled) self.x_slider.setEnabled(settings_view_enabled) self.y_slider.setEnabled(settings_view_enabled) self.z_slider.setEnabled(settings_view_enabled) self.slice_slider.setEnabled(settings_view_enabled) self.x_rotation_label.setEnabled(settings_view_enabled) self.y_rotation_label.setEnabled(settings_view_enabled) self.z_rotation_label.setEnabled(settings_view_enabled) self.slice_num_label.setEnabled(settings_view_enabled) self.reset_button.setEnabled(settings_view_enabled) self.smoothing_preview_button.setEnabled(settings_view_enabled) self.otsu_radio_button.setEnabled(settings_view_enabled) self.binary_radio_button.setEnabled(settings_view_enabled) self.lower_threshold.setEnabled(settings_view_enabled) self.lower_threshold_input.setEnabled(settings_view_enabled) self.upper_threshold.setEnabled(settings_view_enabled) self.upper_threshold_input.setEnabled(settings_view_enabled) self.threshold_preview_button.setEnabled(settings_view_enabled) self.action_export_csv.setEnabled(not settings_view_enabled) self.circumference_label.setEnabled(not settings_view_enabled) self.export_button.setEnabled(not settings_view_enabled) self.smoothing_preview_button.setEnabled(settings_view_enabled) self.conductance_parameter_label.setEnabled(settings_view_enabled) self.conductance_parameter_input.setEnabled(settings_view_enabled) self.smoothing_iterations_label.setEnabled(settings_view_enabled) self.smoothing_iterations_input.setEnabled(settings_view_enabled) self.time_step_label.setEnabled(settings_view_enabled) self.time_step_input.setEnabled(settings_view_enabled) self.x_view_radio_button.setEnabled(settings_view_enabled) self.y_view_radio_button.setEnabled(settings_view_enabled) self.z_view_radio_button.setEnabled(settings_view_enabled) self.lower_threshold_input.setEnabled( settings_view_enabled and self.binary_radio_button.isChecked() ) self.upper_threshold_input.setEnabled( settings_view_enabled and self.binary_radio_button.isChecked() ) def disable_binary_threshold_inputs(self) -> None: """Called when Otsu filter button is clicked. Disable binary threshold input boxes. :return: None """ self.upper_threshold_input.setEnabled(False) self.lower_threshold_input.setEnabled(False) def disable_elements(self) -> None: """Called when the list is now empty, i.e. just removed from list of length 1. Explicitly enables elements that should never be disabled and sets default text. :return: None """ central_widget = self.findChildren(QWidget, "centralwidget")[0] menubar = self.menuBar() for gui_element in central_widget.findChildren(QWidget): gui_element.setEnabled(False) # findChildren searches recursively by default for menu in menubar.findChildren(QMenu): for action in menu.actions(): action.setEnabled(False) self.action_open.setEnabled(True) self.circumference_label.setText(DEFAULT_CIRCUMFERENCE_LABEL_TEXT) self.image.setEnabled(True) self.image.clear() self.image.setText(DEFAULT_IMAGE_TEXT) self.image.setStatusTip(DEFAULT_IMAGE_STATUS_TEXT) self.image_path_label.setText(DEFAULT_IMAGE_PATH_LABEL_TEXT) self.image_num_label.setText(DEFAULT_IMAGE_NUM_LABEL_TEXT) self.apply_button.setText("Apply") self.z_view_radio_button.setChecked(True) def browse_files(self, extend: bool) -> None: """Called after File > Open or File > Add Images. If `extend`, then `IMAGE_DICT` will be updated with new images. Else, `IMAGE_DICT` will be cleared and (re)initialized (e.g. when choosing files for the first time or re-opening). Opens file menu. Renders various elements depending on the value of `extend`. :param extend: Whether to clear IMAGE_DICT and (re)initialize or add images to it. Determines which GUI elements are rendered. :type extend: bool :return: None""" file_filter: str = "MRI images " + str(constants.SUPPORTED_EXTENSIONS).replace( "'", "" ).replace(",", "") files = QFileDialog.getOpenFileNames( self, "Open files", str(user_settings.FILE_BROWSER_START_DIR), file_filter ) # list[str] path_list = files[0] if len(path_list) == 0: return # Convert to list[Path]. Slight inefficiency but worth. path_list = list(map(Path, path_list)) differing_images: list[Path] if not extend: differing_images = initialize_globals(path_list) # Set view to z because initialize_globals calls update_images, which orients loaded images # for the axial view self.set_view_z() self.render_all_sliders() self.enable_elements() self.render_curr_slice() if differing_images: newline: str = "\n" error_message_box( f"The image(s) you uploaded have differing properties.\n" f"The first one and all images with properties matching the first one have been loaded.\n" f"The name(s) of the ones with differing properties are\n\n" f"{newline.join([path.name for path in differing_images])}" ) else: # Doesn't need to re-render sliders to set max value of slice slider. # update_images won't change max value of slice slicer. # Does not need to render current slice. Images are added to the end of the dict. # And adding duplicate key doesn't change key order. differing_images = update_images(path_list) if differing_images: newline: str = "\n" error_message_box( f"You have uploaded image(s) with properties that differ from those of the currently loaded ones.\n" f"These image(s) have not been loaded:\n\n" f"{newline.join([path.name for path in differing_images])}" ) # When extending, image num must be updated self.render_image_num_and_path() def update_view(self) -> None: """Called when clicking on any of the three view radio buttons. Sets global_vars.VIEW to the correct value. Then orients the current image and renders. :return: None """ # The three buttons are in a button group in the file # And all have autoExclusive=True if self.x_view_radio_button.isChecked(): global_vars.VIEW = constants.View.X elif self.y_view_radio_button.isChecked(): global_vars.VIEW = constants.View.Y else: global_vars.VIEW = constants.View.Z self.orient_curr_image() self.render_curr_slice() def set_view_z(self) -> None: """Set global_vars.VIEW to View.Z and set the z radio button to checked. :return: None """ global_vars.VIEW = constants.View.Z # TODO: Uncheck x and y are technically unnecessary since these 3 buttons in the view_button_group have # autoExclusive=True self.x_view_radio_button.setChecked(False) self.y_view_radio_button.setChecked(False) self.z_view_radio_button.setChecked(True) def update_smoothing_settings(self) -> None: """Updates global smoothing settings. :return: None """ conductance: str = self.conductance_parameter_input.displayText() try: global_vars.CONDUCTANCE_PARAMETER = float(conductance) except ValueError: if user_settings.DEBUG: print("Conductance must be a float!") self.conductance_parameter_input.setText(str(global_vars.CONDUCTANCE_PARAMETER)) self.conductance_parameter_input.setPlaceholderText( str(global_vars.CONDUCTANCE_PARAMETER) ) global_vars.SMOOTHING_FILTER.SetConductanceParameter( global_vars.CONDUCTANCE_PARAMETER ) iterations: str = self.smoothing_iterations_input.displayText() try: global_vars.SMOOTHING_ITERATIONS = int(iterations) except ValueError: if user_settings.DEBUG: print("Iterations must be an integer!") self.smoothing_iterations_input.setText(str(global_vars.SMOOTHING_ITERATIONS)) self.smoothing_iterations_input.setPlaceholderText( str(global_vars.SMOOTHING_ITERATIONS) ) global_vars.SMOOTHING_FILTER.SetNumberOfIterations( global_vars.SMOOTHING_ITERATIONS ) time_step: str = self.time_step_input.displayText() try: global_vars.TIME_STEP = float(time_step) except ValueError: if user_settings.DEBUG: print("Time step must be a float!") self.time_step_input.setText(str(global_vars.TIME_STEP)) self.time_step_input.setPlaceholderText(str(global_vars.TIME_STEP)) global_vars.SMOOTHING_FILTER.SetTimeStep(global_vars.TIME_STEP) def update_binary_filter_settings(self) -> None: """Updates global binary filter settings. :return: None """ lower_threshold: str = self.lower_threshold_input.displayText() try: global_vars.LOWER_THRESHOLD = float(lower_threshold) except ValueError: pass self.lower_threshold_input.setText(str(global_vars.LOWER_THRESHOLD)) self.lower_threshold_input.setPlaceholderText(str(global_vars.LOWER_THRESHOLD)) global_vars.BINARY_THRESHOLD_FILTER.SetLowerThreshold( global_vars.LOWER_THRESHOLD ) upper_threshold: str = self.upper_threshold_input.displayText() try: global_vars.UPPER_THRESHOLD = float(upper_threshold) except ValueError: pass self.upper_threshold_input.setText(str(global_vars.UPPER_THRESHOLD)) self.upper_threshold_input.setPlaceholderText(str(global_vars.UPPER_THRESHOLD)) global_vars.BINARY_THRESHOLD_FILTER.SetUpperThreshold( global_vars.UPPER_THRESHOLD ) def render_scaled_qpixmap_from_qimage(self, q_img: QImage) -> None: """Convert q_img to QPixmap and set self.image's pixmap to that pixmap scaled to self.image's size. Sets UNSCALED_PIXMAP to the unscaled pixmap generated from the q_img. :param q_img: :type q_img: QImage :return: None""" global UNSCALED_QPIXMAP UNSCALED_QPIXMAP = QPixmap(q_img) self.image.setPixmap( UNSCALED_QPIXMAP.scaled( self.image.size(), aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatio, transformMode=Qt.TransformationMode.SmoothTransformation, ) ) def resizeEvent(self, event: QResizeEvent) -> None: """This method is called every time the window is resized. Overrides PyQt6's resizeEvent. Sets pixmap to UNSCALED_QPIXMAP scaled to self.image's size. :param event: :type event: QResizeEvent :return: None""" if global_vars.IMAGE_DICT: self.image.setPixmap( UNSCALED_QPIXMAP.scaled( self.image.size(), aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatio, transformMode=Qt.TransformationMode.SmoothTransformation, ) ) QMainWindow.resizeEvent(self, event) def render_curr_slice(self) -> Union[np.ndarray, None]: """Resamples the currently selected image using its rotation and slice settings, then renders the resulting slice (scaled to the size of self.image) in the GUI. DOES NOT set text for `image_num_label` and file path labels. If `not SETTINGS_VIEW_ENABLED`, also calls `imgproc.contour()` and outlines the contour of the QImage (mutating it). Additionally, also returns a view of the binary contoured slice if `not SETTINGS_VIEW_ENABLED`. This saves work when computing circumference. :return: np.ndarray if `not SETTINGS_VIEW_ENABLED` else None :rtype: np.ndarray or None""" if not SETTINGS_VIEW_ENABLED: self.set_view_z() rotated_slice: sitk.Image = get_curr_rotated_slice() q_img: QImage = sitk_slice_to_qimage(rotated_slice) rv_dummy_var: np.ndarray = np.zeros(0) if not SETTINGS_VIEW_ENABLED: if self.otsu_radio_button.isChecked(): binary_contour_slice: np.ndarray = imgproc.contour( rotated_slice, ThresholdFilter.Otsu ) else: binary_contour_slice: np.ndarray = imgproc.contour( rotated_slice, ThresholdFilter.Binary ) rv_dummy_var = binary_contour_slice mask_QImage( q_img, np.transpose(binary_contour_slice), string_to_QColor(user_settings.CONTOUR_COLOR), ) elif global_vars.VIEW != constants.View.Z: z_indicator: np.ndarray = np.zeros( (rotated_slice.GetSize()[1], rotated_slice.GetSize()[0]) ) z_indicator[get_curr_image_size()[2] - global_vars.SLICE - 1, :] = 1 mask_QImage( q_img, np.transpose(z_indicator), string_to_QColor(user_settings.CONTOUR_COLOR), ) self.render_scaled_qpixmap_from_qimage(q_img) if not SETTINGS_VIEW_ENABLED: return rv_dummy_var def render_smooth_slice(self) -> None: """Renders smooth slice in GUI. Allows user to preview result of smoothing settings. :return: None""" self.update_smoothing_settings() # Preview should apply filter only on axial slice self.set_view_z() smooth_slice: sitk.Image = get_curr_smooth_slice() q_img: QImage = sitk_slice_to_qimage(smooth_slice) self.render_scaled_qpixmap_from_qimage(q_img) def render_threshold(self) -> None: """Render filtered image slice on UI. :return: None""" # Preview should apply filter only on axial slice self.set_view_z() if self.otsu_radio_button.isChecked(): filter_img: sitk.Image = get_curr_otsu_slice() else: self.update_binary_filter_settings() filter_img: sitk.Image = get_curr_binary_thresholded_slice() q_img: QImage = sitk_slice_to_qimage(filter_img) self.render_scaled_qpixmap_from_qimage(q_img) def render_circumference(self, binary_contour_slice: np.ndarray) -> None: """Called after pressing Apply or when (not SETTINGS_VIEW_ENABLED and (pressing Next or Previous or Remove Image)) Computes circumference from binary_contour_slice and renders circumference label. binary_contour_slice is always the return value of render_curr_slice since render_curr_slice must have already been called. If calling this function, render_curr_slice must have been called first. :param binary_contour_slice: Result of previously calling render_curr_slice when `not SETTINGS_VIEW_ENABLED` :type binary_contour_slice: np.ndarray :return: None""" if SETTINGS_VIEW_ENABLED: raise Exception("Rendering circumference label when SETTINGS_VIEW_ENABLED") units: Union[str, None] = get_curr_physical_units() circumference: float = imgproc.length_of_contour(binary_contour_slice) self.circumference_label.setText( f"Calculated Circumference: {round(circumference, constants.NUM_DIGITS_TO_ROUND_TO)} {units if units is not None else MESSAGE_TO_SHOW_IF_UNITS_NOT_FOUND}" ) def render_image_num_and_path(self) -> None: """Set image_num_label, image_path_label, and status tip of the image. Called when pressing Next or Previous (next_img, prev_img), and after File > Open (browse_files). Also called when removing an image. :return: None""" self.image_num_label.setText( f"Image {global_vars.CURR_IMAGE_INDEX + 1} of {len(global_vars.IMAGE_DICT)}" ) self.image_path_label.setText(str(get_curr_path().name)) self.image_path_label.setStatusTip(str(get_curr_path())) self.image.setStatusTip(str(get_curr_path())) def render_all_sliders(self) -> None: """Sets all slider values to the global rotation and slice values. Also updates maximum value of slice slider. Called on reset. Will need to be called when updating batch index, if we implement this. Not called when the user updates a slider. Also updates rotation and slice num labels. :return: None""" self.x_slider.setValue(global_vars.THETA_X) self.y_slider.setValue(global_vars.THETA_Y) self.z_slider.setValue(global_vars.THETA_Z) self.slice_slider.setMaximum(get_curr_image().GetSize()[View.Z.value] - 1) self.slice_slider.setValue(global_vars.SLICE) self.x_rotation_label.setText(f"X rotation: {global_vars.THETA_X}°") self.y_rotation_label.setText(f"Y rotation: {global_vars.THETA_Y}°") self.z_rotation_label.setText(f"Z rotation: {global_vars.THETA_Z}°") self.slice_num_label.setText(f"Slice: {global_vars.SLICE}") def rotate_x(self) -> None: """Called when the user updates the x slider. Render image and set `x_rotation_label`. :return: None""" x_slider_val: int = self.x_slider.value() global_vars.THETA_X = x_slider_val self.render_curr_slice() self.x_rotation_label.setText(f"X rotation: {x_slider_val}°") def rotate_y(self) -> None: """Called when the user updates the y slider. Render image and set `y_rotation_label`. :return: None""" y_slider_val: int = self.y_slider.value() global_vars.THETA_Y = y_slider_val self.render_curr_slice() self.y_rotation_label.setText(f"Y rotation: {y_slider_val}°") def rotate_z(self) -> None: """Called when the user updates the z slider. Render image and set `z_rotation_label`. :return: None""" z_slider_val: int = self.z_slider.value() global_vars.THETA_Z = z_slider_val self.render_curr_slice() self.z_rotation_label.setText(f"Z rotation: {z_slider_val}°") def slice_update(self) -> None: """Called when the user updates the slice slider. Render image and set `slice_num_label`. :return: None""" slice_slider_val: int = self.slice_slider.value() global_vars.SLICE = slice_slider_val self.render_curr_slice() self.slice_num_label.setText(f"Slice: {slice_slider_val}") def reset_settings(self) -> None: """Called when Reset is clicked. Resets rotation values to 0 and slice num to the default `int((z-1)/2)` for the current image, then renders current image and sliders. :return: None""" global_vars.THETA_X = 0 global_vars.THETA_Y = 0 global_vars.THETA_Z = 0 global_vars.SLICE = get_middle_dimension(get_curr_image(), View.Z) self.render_curr_slice() self.render_all_sliders() def next_img(self) -> None: """Called when Next button is clicked. Advance index and render. :return: None""" img_helpers.next_img() # TODO: This feels inefficient... self.orient_curr_image() binary_contour_or_none: Union[np.ndarray, None] = self.render_curr_slice() self.render_image_num_and_path() if not SETTINGS_VIEW_ENABLED: # Ignore the type annotation warning. binary_contour_or_none must be binary_contour since not SETTINGS_VIEW_ENABLED self.render_circumference(binary_contour_or_none) def previous_img(self) -> None: """Called when Previous button is clicked. Decrement index and render. :return: None""" img_helpers.previous_img() # TODO: This feels inefficient... self.orient_curr_image() binary_contour_or_none: Union[np.ndarray, None] = self.render_curr_slice() self.render_image_num_and_path() if not SETTINGS_VIEW_ENABLED: # Ignore the type annotation warning. binary_contour_or_none must be binary_contour since not SETTINGS_VIEW_ENABLED self.render_circumference(binary_contour_or_none) # TODO: Due to the images now being a dict, we can # easily let the user remove a range of images if they want def remove_curr_img(self) -> None: """Called after File > Remove File. Removes current image from `IMAGE_DICT`. Since `IMAGE_DICT` is a reference to an image dict in `IMAGE_GROUPS`, it's removed from `IMAGE_GROUPS` as well. :return: None""" img_helpers.del_curr_img() if len(global_vars.IMAGE_DICT) == 0: self.disable_elements() return binary_contour_or_none: Union[np.ndarray, None] = self.render_curr_slice() self.render_image_num_and_path() if not SETTINGS_VIEW_ENABLED: # Ignore the type annotation warning. binary_contour_or_none must be binary_contour since not SETTINGS_VIEW_ENABLED self.render_circumference(binary_contour_or_none) def test_stuff(self) -> None: """Connected to Debug > Test stuff. Dummy button and function for easily testing stuff. Assume that anything you put here will be overwritten freely. :return: None""" self.image.setPixmap(QPixmap(f":/{user_settings.THEME_NAME}/help.svg")) self.image.setStatusTip( "This is intentional, if it's a question mark then that's good :), means we can display icons" ) # TODO: File name should also include circumference when not SETTINGS_VIEW_ENABLED? def export_curr_slice_as_img(self, extension: str) -> None: """Called when an Export as image menu item is clicked. Exports `self.image` to `settings.OUTPUT_DIR/img/`. Thus, calling this when `SETTINGS_VIEW_ENABLED` will save a non-contoured image. Calling this when `not SETTINGS_VIEW_ENABLED` will save a contoured image. Filename has format <file_name>_[contoured_]<theta_x>_<theta_y>_<theta_z>_<slice_num>.<extension> contoured_ will be in the name if `not SETTINGS_VIEW_ENABLED`. Supported formats in this function are the ones supported by QPixmap, namely BMP, JPG, JPEG, PNG, PPM, XBM, XPM. :param extension: BMP, JPG, JPEG, PNG, PPM, XBM, XPM :type extension: str :return: `None`""" file_name = ( global_vars.CURR_IMAGE_INDEX + 1 if user_settings.EXPORTED_FILE_NAMES_USE_INDEX else get_curr_path().name ) path: str = str( constants.IMG_DIR / f"{file_name}_{'contoured_' if not SETTINGS_VIEW_ENABLED else ''}{global_vars.THETA_X}_{global_vars.THETA_Y}_{global_vars.THETA_Z}_{global_vars.SLICE}.{extension}" ) self.image.pixmap().save(path, extension) def orient_curr_image(self) -> None: """Orient the current image for the current view (global_vars.VIEW) by applying ORIENT_FILTER on it. This mutates the image. :return: None""" img_helpers.orient_curr_image(global_vars.VIEW) def error_message_box(message: str) -> None: """Creates a message box with an error message and red warning icon. :param message: the error message to be displayed :type message: str :return: None""" ErrorMessageBox(message).exec() def information_dialog(title: str, message: str) -> None: """Create an informational dialog QDialog window with title and message. :param title: :type title: str :param message: :type message: str :return: None""" InformationDialog(title, message).exec() # TODO: Broken def display_metadata() -> None: """Display metadata in window or terminal. Internally, uses sitk.GetMetaData, which doesn't return all metadata (e.g., doesn't return spacing values whereas sitk.GetSpacing does). Typically, this returns less metadata for NRRD than for NIfTI. :return: None""" if not len(global_vars.IMAGE_DICT): print("Can't print metadata when there's no image!") return message: str = pprint.pformat(get_curr_metadata()) if user_settings.DISPLAY_ADVANCED_MENU_MESSAGES_IN_TERMINAL: print(message) else: information_dialog("Metadata", message) def display_dimensions() -> None: """Display current image's dimensions in window or terminal. :return: None""" if not len(global_vars.IMAGE_DICT): print("Can't print dimensions when there's no image!") return message: str = pprint.pformat(get_curr_image().GetSize()) if user_settings.DISPLAY_ADVANCED_MENU_MESSAGES_IN_TERMINAL: print(message) else: information_dialog("Dimensions", message) # TODO: If updating img_helpers.get_properties(), this needs to be slightly adjusted! def display_properties() -> None: """Display properties in window or terminal. Internally, the properties tuple is a tuple of values only and doesn't contain field names. This function creates a dictionary with field names for printing. But the dictionary doesn't exist in the program. :return: None""" if not len(global_vars.IMAGE_DICT): print("No loaded image!") return curr_properties: tuple = get_curr_properties_tuple() fields: tuple = ("center of rotation", "dimensions", "spacing") if len(fields) != len(curr_properties): print( "Update src/GUI/main.print_properties() !\nNumber of fields and number of properties don't match." ) exit(1) # Pretty sure the dict(zip(...)) goes through fields in alphabetical order message: str = pprint.pformat(dict(zip(fields, curr_properties))) if user_settings.DISPLAY_ADVANCED_MENU_MESSAGES_IN_TERMINAL: print(message) else: information_dialog("Properties", message) def display_direction() -> None: """Display current image's direction in window or terminal. :return: None""" if not len(global_vars.IMAGE_DICT): print("Can't print direction when there's no image!") return message: str = pprint.pformat(get_curr_image().GetDirection()) if user_settings.DISPLAY_ADVANCED_MENU_MESSAGES_IN_TERMINAL: print(message) else: information_dialog("Direction", message) def display_spacing() -> None: """Display current image's spacing in window or terminal. :return: None""" if not len(global_vars.IMAGE_DICT): print("Can't print spacing when there's no image!") return message: str = pprint.pformat(get_curr_image().GetSpacing()) if user_settings.DISPLAY_ADVANCED_MENU_MESSAGES_IN_TERMINAL: print(message) else: information_dialog("Spacing", message) def main() -> None: """Main entrypoint of GUI.""" # This import can't go at the top of the file # because gui.py.parse_gui_cli() has to set THEME_NAME before the import occurs # This imports globally # For example, src/GUI/helpers.py can access resource files without having to import there importlib.import_module(f"src.GUI.themes.{user_settings.THEME_NAME}.resources") app = QApplication(sys.argv) # On macOS, sets the application logo in the dock (but no window icon on macOS) # TODO # On Windows, sets the window icon at the top left of the window (but no dock icon on Windows) app.setWindowIcon(QIcon(str(PATH_TO_HCT_LOGO))) # TODO: Put arrow buttons on the left and right endpoints of the sliders # These arrow buttons already show up if commenting in app.setStyle("Fusion") # And commenting out with open stylesheet and app.setStyleSheet # We should figure out how to get arrow buttons on sliders for (+, -) 1 precise adjustments. # Currently, the sliders allow this (left click on the left or right end), but the arrow buttons # are not in the GUI. # app.setStyle("Fusion") MAIN_WINDOW: MainWindow = MainWindow() with open( constants.THEME_DIR / user_settings.THEME_NAME / "stylesheet.qss", "r" ) as f: MAIN_WINDOW.setStyleSheet(f.read()) # Non-zero min width and height is needed to prevent # this bug https://github.com/COMP523TeamD/HeadCircumferenceTool/issues/42 # However, this also seems to affect startup GUI size or at least GUI element spacing MAIN_WINDOW.setMinimumSize(QSize(1, 1)) MAIN_WINDOW.resize( int( user_settings.STARTUP_WIDTH_RATIO * constants.PRIMARY_MONITOR_DIMENSIONS[0] ), int( user_settings.STARTUP_HEIGHT_RATIO * constants.PRIMARY_MONITOR_DIMENSIONS[1] ), ) MAIN_WINDOW.show() try: # sys.exit will cause a bug when running from terminal # After importing the GUI runner function from __init__, clicking the close window button # (not the menu button) will not close the window # because the Python process wouldn't end os._exit(app.exec()) except: if user_settings.DEBUG: print("Exiting") if __name__ == "__main__": import src.utils.parser as parser parser.parse_config_json() parser.parse_gui_cli() main()
PypiClean
/Flask-HAL-1.0.4.tar.gz/Flask-HAL-1.0.4/flask_hal/__init__.py
# Third Party Libs from flask import Response # First Party Libs from flask_hal.document import Document class HAL(object): """Enables Flask-HAL integration into Flask Applications, either by the Application Factory Pattern or directly into an already created Flask Application instance. This will set a custom ``response_class`` for the Application which handles the conversion of a ``HAL`` document response from a view into it's ``JSON`` representation. """ def __init__(self, app=None, response_class=None): """Initialise Flask-HAL with a Flask Application. Acts as a proxy to :meth:`flask_hal.HAL.init_app`. Example: >>> from flask import Flask >>> from flask_hal import HAL >>> app = Flask(__name__) >>> HAL(app=app) Keyword Args: app (flask.app.Flask): Optional Flask application instance response_class (class): Optional custom ``response_class`` """ if app is not None: self.init_app(app, response_class=response_class) def init_app(self, app, response_class=None): """Initialise Flask-HAL with a Flask Application. This is designed to be used with the Flask Application Factory Pattern. Example: >>> from flask import Flask >>> from flask_hal import HAL >>> app = Flask(__name__) >>> HAL().init_app(app) Args: app (flask.app.Flask): Flask application instance Keyword Args: response_class (class): Optional custom ``response_class`` """ # Set the response class if response_class is None: app.response_class = HALResponse else: app.response_class = response_class class HALResponse(Response): """A custom response class which overrides the default Response class wrapper. Example: >>> from flask import Flask() >>> from flask_hal import HALResponse >>> app = Flask(__name__) >>> app.response_class = HALResponse """ @staticmethod def force_type(rv, env): """Called by ``flask.make_response`` when a view returns a none byte, string or unicode value. This method takes the views return value and converts into a standard `Response`. Args: rv (flask_hal.document.Document): View return value env (dict): Request environment Returns: flask.wrappers.Response: A standard Flask response """ if isinstance(rv, Document): return Response( rv.to_json(), headers={ 'Content-Type': 'application/hal+json' }) return Response.force_type(rv, env)
PypiClean
/BittyTax-0.5.1.tar.gz/BittyTax-0.5.1/src/bittytax/conv/output_excel.py
import platform import re import sys import xlsxwriter from colorama import Fore from xlsxwriter.utility import xl_rowcol_to_cell from ..config import config from ..version import __version__ from .out_record import TransactionOutRecord from .output_csv import OutputBase if platform.system() == "Darwin": # Default size for MacOS FONT_SIZE = 12 else: FONT_SIZE = 11 class OutputExcel(OutputBase): # pylint: disable=too-many-instance-attributes FILE_EXTENSION = "xlsx" DATE_FORMAT = "yyyy-mm-dd hh:mm:ss" DATE_FORMAT_MS = "yyyy-mm-dd hh:mm:ss.000" # Excel can only display milliseconds STR_FORMAT_MS = "%Y-%m-%dT%H:%M:%S.%f" FONT_COLOR_IN_DATA = "#808080" TITLE = "BittyTax Records" PROJECT_URL = "https://github.com/BittyTax/BittyTax" def __init__(self, progname, data_files, args): super(OutputExcel, self).__init__(data_files) self.filename = self.get_output_filename(args.output_filename, self.FILE_EXTENSION) self.workbook = xlsxwriter.Workbook(self.filename) self.workbook.set_size(1800, 1200) self.workbook.formats[0].set_font_size(FONT_SIZE) self.workbook.set_properties( { "title": self.TITLE, "author": "%s v%s" % (progname, __version__), "comments": self.PROJECT_URL, } ) self.format_out_header = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "white", "bold": True, "bg_color": "black", } ) self.format_in_header = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "white", "bold": True, "bg_color": self.FONT_COLOR_IN_DATA, } ) self.format_out_data = self.workbook.add_format( {"font_size": FONT_SIZE, "font_color": "black"} ) self.format_out_data_err = self.workbook.add_format( {"font_size": FONT_SIZE, "font_color": "red"} ) self.format_in_data = self.workbook.add_format( {"font_size": FONT_SIZE, "font_color": self.FONT_COLOR_IN_DATA} ) self.format_in_data_err = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": self.FONT_COLOR_IN_DATA, "diag_type": 3, "diag_border": 7, "diag_color": "red", } ) self.format_num_float = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "black", "num_format": "#,##0." + "#" * 30, } ) self.format_num_int = self.workbook.add_format({"num_format": "#,##0"}) self.format_num_string = self.workbook.add_format( {"font_size": FONT_SIZE, "font_color": "black", "align": "right"} ) self.format_currency = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "black", "num_format": '"' + config.sym() + '"#,##0.00', } ) self.format_timestamp = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "black", "num_format": self.DATE_FORMAT, } ) self.format_timestamp_ms = self.workbook.add_format( { "font_size": FONT_SIZE, "font_color": "black", "num_format": self.DATE_FORMAT_MS, } ) def write_excel(self): data_files = sorted(self.data_files, key=lambda df: df.parser.worksheet_name, reverse=False) for data_file in data_files: worksheet = Worksheet(self, data_file) data_rows = sorted(data_file.data_rows, key=lambda dr: dr.timestamp, reverse=False) for i, data_row in enumerate(data_rows): worksheet.add_row(data_row, i + 1) if data_rows: worksheet.make_table(len(data_rows), data_file.parser.worksheet_name) else: # Just add headings for i, columns in enumerate(worksheet.columns): worksheet.worksheet.write(0, i, columns["header"], columns["header_format"]) worksheet.autofit() self.workbook.close() sys.stderr.write( "%soutput EXCEL file created: %s%s\n" % (Fore.WHITE, Fore.YELLOW, self.filename) ) class Worksheet(object): SHEETNAME_MAX_LEN = 31 MAX_COL_WIDTH = 30 sheet_names = {} table_names = {} def __init__(self, output, data_file): self.output = output self.worksheet = output.workbook.add_worksheet( self._sheet_name(data_file.parser.worksheet_name) ) self.col_width = {} self.columns = self._make_columns(data_file.parser.in_header) self.microseconds, self.milliseconds = self._is_microsecond_timestamp(data_file.data_rows) self.worksheet.freeze_panes(1, len(self.output.BITTYTAX_OUT_HEADER)) def _sheet_name(self, parser_name): # Remove special characters name = re.sub(r"[/\\\?\*\[\]:]", "", parser_name) name = name[: self.SHEETNAME_MAX_LEN] if len(name) > self.SHEETNAME_MAX_LEN else name if name.lower() not in self.sheet_names: self.sheet_names[name.lower()] = 1 sheet_name = name else: self.sheet_names[name.lower()] += 1 sheet_name = "%s(%s)" % (name, self.sheet_names[name.lower()]) if len(sheet_name) > self.SHEETNAME_MAX_LEN: sheet_name = "%s(%s)" % ( name[: len(name) - (len(sheet_name) - self.SHEETNAME_MAX_LEN)], self.sheet_names[name.lower()], ) return sheet_name def _table_name(self, parser_name): # Remove characters which are not allowed name = parser_name.replace(" ", "_") name = re.sub(r"[^a-zA-Z0-9\._]", "", name) if name.lower() not in self.table_names: self.table_names[name.lower()] = 1 else: self.table_names[name.lower()] += 1 name += str(self.table_names[name.lower()]) return name def _make_columns(self, in_header): if sys.version_info[0] < 3: in_header = [h.decode("utf8") for h in in_header] col_names = {} columns = [] for col_num, col_name in enumerate(self.output.BITTYTAX_OUT_HEADER + in_header): if col_name.lower() not in col_names: col_names[col_name.lower()] = 1 else: col_names[col_name.lower()] += 1 col_name += str(col_names[col_name.lower()]) if col_num < len(self.output.BITTYTAX_OUT_HEADER): columns.append({"header": col_name, "header_format": self.output.format_out_header}) else: columns.append({"header": col_name, "header_format": self.output.format_in_header}) self._autofit_calc(col_num, len(col_name)) return columns @staticmethod def _is_microsecond_timestamp(data_rows): milliseconds = bool( [ dr.t_record.timestamp for dr in data_rows if dr.t_record and dr.t_record.timestamp.microsecond % 1000 ] ) microseconds = bool( [ dr.t_record.timestamp for dr in data_rows if dr.t_record and dr.t_record.timestamp.microsecond ] ) return milliseconds, microseconds def add_row(self, data_row, row_num): self.worksheet.set_row(row_num, None, self.output.format_out_data) # Add transaction record if data_row.t_record: self._xl_type(data_row.t_record.t_type, row_num, 0, data_row.t_record) self._xl_quantity(data_row.t_record.buy_quantity, row_num, 1) self._xl_asset(data_row.t_record.buy_asset, row_num, 2) self._xl_value(data_row.t_record.buy_value, row_num, 3) self._xl_quantity(data_row.t_record.sell_quantity, row_num, 4) self._xl_asset(data_row.t_record.sell_asset, row_num, 5) self._xl_value(data_row.t_record.sell_value, row_num, 6) self._xl_quantity(data_row.t_record.fee_quantity, row_num, 7) self._xl_asset(data_row.t_record.fee_asset, row_num, 8) self._xl_value(data_row.t_record.fee_value, row_num, 9) self._xl_wallet(data_row.t_record.wallet, row_num, 10) self._xl_timestamp(data_row.t_record.timestamp, row_num, 11) self._xl_note(data_row.t_record.note, row_num, 12) if sys.version_info[0] < 3: in_row = [r.decode("utf8") for r in data_row.row] else: in_row = data_row.row # Add original data for col_num, col_data in enumerate(in_row): if data_row.failure and data_row.failure.col_num == col_num: self.worksheet.write( row_num, len(self.output.BITTYTAX_OUT_HEADER) + col_num, col_data, self.output.format_in_data_err, ) else: self.worksheet.write( row_num, len(self.output.BITTYTAX_OUT_HEADER) + col_num, col_data, self.output.format_in_data, ) self._autofit_calc(len(self.output.BITTYTAX_OUT_HEADER) + col_num, len(col_data)) def _xl_type(self, t_type, row_num, col_num, t_record): if t_type == TransactionOutRecord.TYPE_TRADE or t_record.buy_asset and t_record.sell_asset: self.worksheet.data_validation( row_num, col_num, row_num, col_num, {"validate": "list", "source": [TransactionOutRecord.TYPE_TRADE]}, ) elif ( t_type in TransactionOutRecord.BUY_TYPES or t_record.buy_asset and not t_record.sell_asset ): self.worksheet.data_validation( row_num, col_num, row_num, col_num, {"validate": "list", "source": list(TransactionOutRecord.BUY_TYPES)}, ) elif ( t_type in TransactionOutRecord.SELL_TYPES or t_record.sell_asset and not t_record.buy_asset ): self.worksheet.data_validation( row_num, col_num, row_num, col_num, {"validate": "list", "source": list(TransactionOutRecord.SELL_TYPES)}, ) self.worksheet.write_string(row_num, col_num, t_type) if t_type not in TransactionOutRecord.ALL_TYPES: self.worksheet.conditional_format( row_num, col_num, row_num, col_num, { "type": "text", "criteria": "begins with", "value": "_", "format": self.output.format_out_data_err, }, ) self._autofit_calc(col_num, len(t_type)) def _xl_quantity(self, quantity, row_num, col_num): if quantity is not None: if len(quantity.normalize().as_tuple().digits) > OutputBase.EXCEL_PRECISION: self.worksheet.write_string( row_num, col_num, "{0:f}".format(quantity.normalize()), self.output.format_num_string, ) else: self.worksheet.write_number( row_num, col_num, quantity.normalize(), self.output.format_num_float ) cell = xl_rowcol_to_cell(row_num, col_num) self.worksheet.conditional_format( row_num, col_num, row_num, col_num, { "type": "formula", "criteria": "=INT(" + cell + ")=" + cell, "format": self.output.format_num_int, }, ) self._autofit_calc(col_num, len("{:0,f}".format(quantity.normalize()))) def _xl_asset(self, asset, row_num, col_num): if sys.version_info[0] < 3: asset = asset.decode("utf8") self.worksheet.write_string(row_num, col_num, asset) self._autofit_calc(col_num, len(asset)) def _xl_value(self, value, row_num, col_num): if value is not None: self.worksheet.write_number( row_num, col_num, value.normalize(), self.output.format_currency ) self._autofit_calc(col_num, len("£{:0,.2f}".format(value))) else: self.worksheet.write_blank(row_num, col_num, None, self.output.format_currency) def _xl_wallet(self, wallet, row_num, col_num): if sys.version_info[0] < 3: wallet = wallet.decode("utf8") self.worksheet.write_string(row_num, col_num, wallet) self._autofit_calc(col_num, len(wallet)) def _xl_timestamp(self, timestamp, row_num, col_num): utc_timestamp = timestamp.astimezone(config.TZ_UTC) utc_timestamp = timestamp.replace(tzinfo=None) if self.microseconds: # Excel datetime can only display milliseconds self.worksheet.write_string( row_num, col_num, utc_timestamp.strftime(self.output.STR_FORMAT_MS), self.output.format_num_string, ) self._autofit_calc(col_num, len(utc_timestamp.strftime(self.output.STR_FORMAT_MS))) elif self.milliseconds: self.worksheet.write_datetime( row_num, col_num, utc_timestamp, self.output.format_timestamp_ms ) self._autofit_calc(col_num, len(self.output.DATE_FORMAT_MS)) else: self.worksheet.write_datetime( row_num, col_num, utc_timestamp, self.output.format_timestamp ) self._autofit_calc(col_num, len(self.output.DATE_FORMAT)) def _xl_note(self, note, row_num, col_num): if sys.version_info[0] < 3: note = note.decode("utf8") self.worksheet.write_string(row_num, col_num, note) self._autofit_calc(col_num, len(note) if note else self.MAX_COL_WIDTH) def _autofit_calc(self, col_num, width): if width > self.MAX_COL_WIDTH: width = self.MAX_COL_WIDTH if col_num in self.col_width: if width > self.col_width[col_num]: self.col_width[col_num] = width else: self.col_width[col_num] = width def autofit(self): for col_num in self.col_width: self.worksheet.set_column(col_num, col_num, self.col_width[col_num]) def make_table(self, rows, parser_name): self.worksheet.add_table( 0, 0, rows, len(self.columns) - 1, { "autofilter": False, "style": "Table Style Medium 13", "columns": self.columns, "name": self._table_name(parser_name), }, )
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/xdcalls/groups/group_call_participant.py
from ..py_object import PyObject class GroupCallParticipant(PyObject): """Info about a group call participant Attributes: user_id (``int``): Unique identifier of participant. muted (``bool``): Whether the participant is muted muted_by_admin (``bool``): Whether the participant is muted by admin video (``bool``): Whether this participant is currently broadcasting a video stream screen_sharing (``bool``): Whether this participant is currently broadcasting a screen sharing video_camera (``bool``): Whether this participant is currently broadcasting a video camera raised_hand (``bool``): Whether this participant have raised the hand volume (``int``): Volume level of the participant Parameters: user_id (``int``): Unique identifier of participant. muted (``bool``): Telegram API parameter. muted_by_admin (``bool``): Telegram API parameter. video (``bool``): Telegram API parameter. screen_sharing (``bool``): Telegram API parameter. video_camera (``bool``): Telegram API parameter. raised_hand (``bool``): Telegram API parameter. volume (``int``): Telegram API parameter. """ def __init__( self, user_id: int, muted: bool, muted_by_admin: bool, video: bool, screen_sharing: bool, video_camera: bool, raised_hand: bool, volume: int, ): self.user_id: int = user_id self.muted: bool = muted self.muted_by_admin: bool = muted_by_admin self.video: bool = video self.screen_sharing: bool = screen_sharing self.video_camera: bool = video_camera self.raised_hand: bool = raised_hand self.volume: int = volume
PypiClean
/HEBO-0.3.2-py3-none-any.whl/hebo/optimizers/bo.py
# This program is free software; you can redistribute it and/or modify it under # the terms of the MIT license. # This program is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the MIT License for more details. import numpy as np import pandas as pd import torch from hebo.design_space.design_space import DesignSpace from hebo.models.model_factory import get_model from hebo.acquisitions.acq import LCB from hebo.acq_optimizers.evolution_optimizer import EvolutionOpt from .abstract_optimizer import AbstractOptimizer class BO(AbstractOptimizer): support_combinatorial = True support_contextual = True def __init__( self, space : DesignSpace, model_name = 'gpy', rand_sample = None, acq_cls = None, acq_conf = None): super().__init__(space) self.space = space self.X = pd.DataFrame(columns = self.space.para_names) self.y = np.zeros((0, 1)) self.model_name = model_name self.rand_sample = 1 + self.space.num_paras if rand_sample is None else max(2, rand_sample) self.acq_cls = LCB if acq_cls is None else acq_cls self.acq_conf = {'kappa' : 2.0} if acq_conf is None else acq_conf def suggest(self, n_suggestions = 1, fix_input = None): assert n_suggestions == 1 if self.X.shape[0] < self.rand_sample: sample = self.space.sample(n_suggestions) if fix_input is not None: for k, v in fix_input.items(): sample[k] = v return sample else: X, Xe = self.space.transform(self.X) y = torch.FloatTensor(self.y) num_uniqs = None if Xe.shape[1] == 0 else [len(self.space.paras[name].categories) for name in self.space.enum_names] model = get_model(self.model_name, X.shape[1], Xe.shape[1], y.shape[1], num_uniqs = num_uniqs, warp = False) model.fit(X, Xe, y) acq = self.acq_cls(model, **self.acq_conf) opt = EvolutionOpt(self.space, acq, pop = 100, iters = 100) suggest = self.X.iloc[[np.argmin(self.y.reshape(-1))]] return opt.optimize(initial_suggest = suggest, fix_input = fix_input) def observe(self, X, y): """Feed an observation back. Parameters ---------- X : pandas DataFrame Places where the objective function has already been evaluated. Each suggestion is a dictionary where each key corresponds to a parameter being optimized. y : array-like, shape (n,1) Corresponding values where objective has been evaluated """ assert(y.shape[1] == 1) valid_id = np.where(np.isfinite(y.reshape(-1)))[0].tolist() XX = X.iloc[valid_id] yy = y[valid_id].reshape(-1, 1) self.X = self.X.append(XX, ignore_index = True) self.y = np.vstack([self.y, yy]) @property def best_x(self)->pd.DataFrame: if self.X.shape[0] == 0: raise RuntimeError('No data has been observed!') else: return self.X.iloc[[self.y.argmin()]] @property def best_y(self)->float: if self.X.shape[0] == 0: raise RuntimeError('No data has been observed!') else: return self.y.min()
PypiClean
/FastCNN2-1.23.425.1716.tar.gz/FastCNN2-1.23.425.1716/FastCNN/api/camera_.py
from flask_restful import Resource from flask import request #from FastCNN.prx.ValidProxy2 import insValider from FastCNN.prx.PathProxy import PathProxy from FastCNN.prx.CameraProxy import CV2Camera cam0 = CV2Camera() class FastCNNApi(Resource): def __init__(self): pass def getStatus(): rtn = {'success':False} rtn['success'] = True return rtn def getProjectNames(): rtn = {'success':False} data = PathProxy.getProjectNames() rtn['data'] = data rtn['success'] = True return rtn def getProjectTags(): rtn = {'success':False} data = PathProxy.getProjectTags("GongDa") rtn['data'] = data rtn['success'] = True return rtn def setModel(): rtn = {'success':False} projectid = request.form.get("projectid") if not projectid: rtn["error"] = "projectid is nesserary" return rtn modeltag = request.form.get("modeltag") if not modeltag: rtn["error"] = "modelid is nesserary" return rtn ckpt = request.form.get("ckpt") if not ckpt: rtn["error"] = "ckpt type is nesserary" return rtn try: data = insValider.setModel(projectid,modeltag,ckpt) except Exception as err: rtn['data'] = str(err) return rtn rtn['data'] = data rtn['success'] = True return rtn def predictSingle(): rtn = {'success':False} imagepath = request.form.get("imagepath") if not imagepath: rtn["error"] = "imagepath is nesserary" return rtn try: data = insValider.predictSingle(imagepath) except Exception as err: rtn['data'] = str(err) return rtn rtn['data'] = data rtn['success'] = True return rtn def setYoloModel(): rtn = {'success':False} projectid = request.form.get("projectid") if not projectid: rtn["error"] = "projectid is nesserary" return rtn modeltag = request.form.get("modeltag") if not modeltag: rtn["error"] = "modelid is nesserary" return rtn try: data = insValider.setYoloModel(projectid,modeltag) except Exception as err: rtn['data'] = str(err) return rtn rtn['data'] = data rtn['success'] = True return rtn def predictYolo(): rtn = {'success':False} imagepath = request.form.get("imagepath") if not imagepath: rtn["error"] = "imagepath is nesserary" return rtn try: data = insValider.predictYolo(imagepath) except Exception as err: rtn['data'] = str(err) return rtn rtn['data'] = data rtn['success'] = True return rtn def predictPicture(): rtn = {'success':False} projectid = request.form.get("projectid") if not projectid: rtn["error"] = "projectid is nesserary" return rtn imagepath = request.form.get("imagepath") if not imagepath: rtn["error"] = "imagepath is nesserary" return rtn modeltag = request.form.get("modeltag") if not modeltag: rtn["error"] = "modeltag is nesserary" return rtn ckpt = request.form.get("ckpt") if not ckpt: rtn["error"] = "ckpt type is nesserary" return rtn try: data = insValider.predictPicture(imagepath,projectid,modeltag,ckpt) except Exception as err: rtn['data'] = str(err) print(err) return rtn rtn['data'] = data rtn['success'] = True return rtn def startGrabbing(): #self.cam0 = CV2Camera() cam0.startCapture() return cam0.isGrabbing() def stopGrabbing(): #self.cam0 = CV2Camera() cam0.stopCapture() return cam0.isGrabbing() def grabCapture(): rtn = {"success":False} if cam0: if cam0.isGrabbing(): rtn["success"] = True rtn["data"] = cam0.getCapture() else: rtn["error"] = "camera is not grabbing" else: rtn["error"] = "camera is not exist" return rtn def post(self): _cmd = request.form.get('cmd') if _cmd == "getProjectNames": return FastCNNApi.getProjectNames() if _cmd == "getProjectTags": return FastCNNApi.getProjectTags() if _cmd == "predictPicture": return FastCNNApi.predictPicture() if _cmd == "setModel": return FastCNNApi.setModel() if _cmd == "predictSingle": return FastCNNApi.predictSingle() if _cmd == "setYoloModel": return FastCNNApi.setYoloModel() if _cmd == "predictYolo": return FastCNNApi.predictYolo() if _cmd == "startGrabbing": return FastCNNApi.startGrabbing() if _cmd == "stopGrabbing": return FastCNNApi.stopGrabbing() if _cmd == "grabCapture": return FastCNNApi.grabCapture()
PypiClean
/MJOLNIR-1.3.1.tar.gz/MJOLNIR-1.3.1/docs/InDepthDocumentation/VoronoiTessellation.rst
Voronoi tessellation and plotting functionality ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With all of the effort but into building a instrument acquiring as much data and data points as possible, it is sensible to have a plotting algorithm that then shows all of these. This is exactly what the two methods plotA3A4 and plotQPatches seek to do. However, performing calculations and plotting all of the measured points make the methods computationally heavy and slow as well as presents challenges for the visualization. Below is a list of difficulties encountered while building the two methods. Difficulties: - Handle (almost) duplicate point positions - Generate suitable patch around all points - Number of points to handle The methods do address some of the above challenges in some way; the almost duplicate points are handled by truncating the precision on the floating point values holding the position. That is, :math:`\vec{q}=\left(0.1423,2.1132\right)` is by default rounded to :math:`\vec{q}=\left(0.142,2.113\right)` and binned with other points at the same position. This rounding is highly relevant when generating patches in :math:`A3`-:math:`A4` coordinates as the discretization is intrinsic to the measurement scans performed. What is of real interest is the generation of a suitable patch work around all of the points for which this page is dedicated. The wanted method is to be agile and robust to be able to handle all of the different scenarios encountered. For these requirements to be met, the voronoi tessellation has been chosen. Voronoi tessellation -------------------- First of all, the voronoi diagram is defined as the splitting of a given space into regions, where all points in one region is closer to one point than to any other point. That is, given an initial list of points, the voronoi algorithm splits of space into just as many regions for which all points in a given region is closest to the initial point inside it than to any other. This method is suitable in many different areas of data treatment, e.g. to divide a city map in to districts depending on which hospital is nearest. This method can however also be used in the specific task for creating pixels around each measurement point in a neutron scattering dataset. The method works in n-dimensional spaces, where hyper-volumes are created, and one can also change the distance metric from the normal Euclidean :math:`d = \sqrt{\Delta x^2+\Delta y^2+\Delta z^2 \cdots}` to other metrics, i.e. the so-called Manhattan distance :math:`d = |\Delta x|+|\Delta y|+|\Delta z|+\cdots`. It has, however, been chosen that using multi-dimensional and non-Euclidean tessellations obscures the visualization of the data rather than enhancing it. Furthermore, the used SciPi-package spatial_ does not natively support changes of metric and a rewriting is far outside of the scope of this software suite. .. _CombiVoronoi: .. image:: Figures/RandomVoronoiCombi.png :width: 98 % **Left**: 50 random points generated and plotted in 2D. **Right**: Voronoi diagram created for the 50 random points. Blue points are initial positions, orange are intersections, full lines are edges (denoted ridges) connecting two intersections, dashed lines go to infinity. As seen above, for a random generated set of points, the voronoi tessellation is also going to produce a somewhat random set of edges. This is of course different, if instead one had a structured set of points as in StructuredVoronoi_ below. However, some of the edges still go to infinity creating infinitely large pixels for all of the outer measurements. This is trivially un-physical and is to be dealt with by cutting or in another way limiting the outer pixels. .. _StructuredVoronoi: .. image:: Figures/StructuredVoronoi.png :width: 49 % Voronoi generated for regular set of data points as for instance an :math:`A3` rotation scan with equidistant :math:`A4` points. From the above, it is even more clear that the edge pixels extend to infinity. This is to be taken care of and two ways comes into mind. First, one could define a boundary such that the pixel edges intersecting this boundary is cut in a suitable manor. Second, one could add an extra set of data points around the actual measurement points in such a way that all of the wanted pixels remain finite. Both of these methods sort of deals with the issue but ends up also creating more; when cutting the boundary it still remains to figure out how and where the infinite lines intersect with it and how to best cut; adding more points is in principle simple but how to choose these suitably in all case. In the reality a combination of the two is what is used. That is, first extra points are added all around the measurement area, generating a bigger voronoi diagram; secondly the outer pixels are cut by the boundary. Thus the requirement on the position of the additional points is loosened as one is free to only add a small amount of extra points (specifically 8 extra points are added: above, below, left, right, and diagonally, with respect to the mean position). .. _spatial: https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.spatial.Voronoi.html
PypiClean
/HOPP-0.0.5-py3-none-any.whl/tools/optimization/problem_parametrization.py
from abc import abstractmethod from typing import ( Tuple, Type, ) class ProblemParametrization: """ Interface between optimization drivers and the simulations being optimized that uses a parametrization of the original problem. """ def __init__(self, inner_problem, inner_problem_type: Type, candidate_type: Type ) -> None: self.inner_problem: inner_problem_type = inner_problem self.candidate_type: Type = candidate_type @abstractmethod def get_prior_params(self, distribution_type: Type ) -> dict: """ Returns the parameters for each parameter's distribution for a given distribution type :param distribution_type: str identifier ("Gaussian", "Bernoulli", ...) :return: dictionary of parameters """ pass @abstractmethod def make_inner_candidate_from_parameters(self, candidate: object, ) -> Tuple[float, any]: """ Transforms parametrized into inner problem candidate :param candidate: :return: a penalty and an inner candidate """ pass @abstractmethod def make_conforming_candidate_and_get_penalty(self, candidate: object ) -> tuple: """ Modifies a candidate's parameters so that it falls within range :param candidate: optimization candidate :return: conforming candidate, parameter error values """ pass def objective(self, parameters: object ) -> (float, float): """ Returns inner problem performance of parametrized candidate :param parameters: parametrized candidate :return: performance """ penalty, inner_candidate = self.make_inner_candidate_from_parameters(parameters) if isinstance(inner_candidate, list) or isinstance(inner_candidate, tuple): inner_candidate = inner_candidate[0] # print('ProblemParametrization::objective() inner_candidate', inner_candidate) evaluation = self.inner_problem.objective(inner_candidate) score = evaluation - penalty # print('ProblemParametrization::objective()', evaluation, penalty, evaluation - penalty) return score, evaluation def plot_candidate(self, parameters: object, *args, **kwargs) -> None: pass
PypiClean
/GoTermAnalysis-0.2.6.tar.gz/GoTermAnalysis-0.2.6/README.txt
This package is for gene ontology analysis. It has 2 main functions: 1. It receives a gene list, and give back the enrichment and merge result. 2. It can update to the newest gene ontology database. First import this package: from gotermanalysis import * 1. ############################Update############################# How to update? (1). #################update database################## Before update database, user must complete the following steps: a. download the newest database dump: http://archive.geneontology.org/latest-lite/ b. add .sql to current database dump file, for example: change "go_20151003-assocdb-data" to "go_20151003-assocdb-data.sql" c. log into database on server and type the following command: DROP DATABASE IF EXISTS assocdb CREATE DATABASE assocdb quit d. type the following command: mysql -h localhost -u username -p assocdb &ltdbdump for example: mysql -h localhost -u username -p assocdb &ltgo_20151003-assocdb-data.sql e. download newest NCBI homo gene file: http://www.ncbi.nlm.nih.gov/gene/ click Download/FTP on left column, directory is Data —> GENE_INFO —> Mammalia —> Homo_sapiens.gene_info.gz, after download it, change file type to .txt Then Create an instance for updating database and call function to update. Parameters: a. homo_gene_directory is the directory that of the previous downloaded NCBI homo gene txt file. Example of updating database: mydb = updateDB.UpdateDB(host, username, password, "assocdb”, homo_gene_directory) mydb.update() (2) #################update pubmeds################## ###download and parse### Parameters: a. pubmed_directory is the directory that user wants to store the pubmed articles b. parsed_pubmed_directory is the directory that user wants to store the parsed pubmed articles Example of updating pubmeds: tool = downloadPubMed.DownloadPubMed(host, username, password, "assocdb”, pubmed_directory, parsed_pubmed_directory) tool.parse() ###Name entity recognition process### The name entity recognition process this package using is ABNER. It was developed by Burr Settles, Department of Computer Sciences, University of Wisconsin-Madison. It was written in Java. For more information, you can go to: http://pages.cs.wisc.edu/~bsettles/abner/ Step of use ABNER. a. find these 3 files: abner.jar, Tagging.java, Tagging.class. They are wrapping up as extra file in the package. b. when you find it and locate in the path, enter the following command in terminal: java -cp .:abner.jar Tagging inputpath outputpath input path indicates where you pubmeds are, outputpath indicates where you want to store the pubmeds after ABNER analysis An example of using ABNER: java -cp .:abner.jar Tagging /Users/YUFAN/Desktop/parsedPubMeds /Users/YUFAN/Desktop/files.xml (3). #################update weights################## This part builds a GOterm graph structure, and calculate the new weights in this structure Parameters: a. input_filepath: parsed pubmeds with ABNER b. output_filepath: directory to store the output file, output file is a GO term graph structure Example of how to update weights: g=goStructure.GoStructure(host, username, password, "assocdb”, input_filepath, output_filepath) g.updateWeights() 3. ############################Analysis############################ How to do gene ontology term analysis? (1). ######enrichment###### Parameters: a. inputfile: genelists in a csv file: every row is a list, the first column is drivers of this gene list. b. outputfile_path: directory to store the enrichment result. The number of outputfiles is same with the numbers of genelists in input file. Each output file is named by the driver of each genelist. c. p_value: minimum p-value required for go terms to be enriched d. top: is an optional parameter for picking up the top number of enrichment result (e.g. top 5 or top 10), by default is none. create an instance for enrichment class, then call the function. Example of how to use this class: tool = enrichment.Enrichment(host, username, password, "assocdb", inputfile, outputfile_path, 0.01) tool.enrich_csv(top = none) (2) ######merge###### Parameters: a. weightGographData: a xml file which represents Gene Ontology structure, for example “weightedGoGraph.xml" b. genelist: a csv file contains a genelist (Each input cvs file must contain only one genelist, which means it only has one row!!) c. output: output_directory d. p_value: minimum p-value required for go terms to be enriched e. subGenelistNo: minimum number of genes required for go terms to be enriched #Create a GoGraph object (Note: every time you use the gotermSummarization(), you need to create a new object) gograph = merge.GoGraph(weightGographData, genelist, output, p_value, subGenelistNo, host, username, password, "assocdb") gograph.gotermSummarization() Result is in the output directory
PypiClean
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/lang/da.js
/* Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.lang.da={dir:'ltr',editorTitle:'Rich text editor, %1, press ALT 0 for help.',toolbar:'Toolbar',editor:'Rich Text Editor',source:'Kilde',newPage:'Ny side',save:'Gem',preview:'Vis eksempel',cut:'Klip',copy:'Kopiér',paste:'Indsæt',print:'Udskriv',underline:'Understreget',bold:'Fed',italic:'Kursiv',selectAll:'Vælg alt',removeFormat:'Fjern formatering',strike:'Gennemstreget',subscript:'Sænket skrift',superscript:'Hævet skrift',horizontalrule:'Indsæt vandret streg',pagebreak:'Indsæt sideskift',unlink:'Fjern hyperlink',undo:'Fortryd',redo:'Annullér fortryd',common:{browseServer:'Gennemse...',url:'URL',protocol:'Protokol',upload:'Upload',uploadSubmit:'Upload',image:'Indsæt billede',flash:'Indsæt Flash',form:'Indsæt formular',checkbox:'Indsæt afkrydsningsfelt',radio:'Indsæt alternativknap',textField:'Indsæt tekstfelt',textarea:'Indsæt tekstboks',hiddenField:'Indsæt skjult felt',button:'Indsæt knap',select:'Indsæt liste',imageButton:'Indsæt billedknap',notSet:'<intet valgt>',id:'Id',name:'Navn',langDir:'Tekstretning',langDirLtr:'Fra venstre mod højre (LTR)',langDirRtl:'Fra højre mod venstre (RTL)',langCode:'Sprogkode',longDescr:'Udvidet beskrivelse',cssClass:'Typografiark (CSS)',advisoryTitle:'Titel',cssStyle:'Typografi (CSS)',ok:'OK',cancel:'Annullér',close:'Close',preview:'Preview',generalTab:'Generelt',advancedTab:'Avanceret',validateNumberFailed:'Værdien er ikke et tal.',confirmNewPage:'Alt indhold, der ikke er blevet gemt, vil gå tabt. Er du sikker på, at du vil indlæse en ny side?',confirmCancel:'Nogle af indstillingerne er blevet ændret. Er du sikker på, at du vil lukke vinduet?',options:'Options',target:'Target',targetNew:'New Window (_blank)',targetTop:'Topmost Window (_top)',targetSelf:'Same Window (_self)',targetParent:'Parent Window (_parent)',langDirLTR:'Left to Right (LTR)',langDirRTL:'Right to Left (RTL)',styles:'Style',cssClasses:'Stylesheet Classes',unavailable:'%1<span class="cke_accessibility">, ikke tilgængelig</span>'},contextmenu:{options:'Context Menu Options'},specialChar:{toolbar:'Indsæt symbol',title:'Vælg symbol',options:'Special Character Options'},link:{toolbar:'Indsæt/redigér hyperlink',other:'<anden>',menu:'Redigér hyperlink',title:'Egenskaber for hyperlink',info:'Generelt',target:'Mål',upload:'Upload',advanced:'Avanceret',type:'Type',toUrl:'URL',toAnchor:'Bogmærke på denne side',toEmail:'E-mail',targetFrame:'<ramme>',targetPopup:'<popup vindue>',targetFrameName:'Destinationsvinduets navn',targetPopupName:'Popup vinduets navn',popupFeatures:'Egenskaber for popup',popupResizable:'Justérbar',popupStatusBar:'Statuslinje',popupLocationBar:'Adresselinje',popupToolbar:'Værktøjslinje',popupMenuBar:'Menulinje',popupFullScreen:'Fuld skærm (IE)',popupScrollBars:'Scrollbar',popupDependent:'Koblet/dependent (Netscape)',popupWidth:'Bredde',popupLeft:'Position fra venstre',popupHeight:'Højde',popupTop:'Position fra toppen',id:'Id',langDir:'Tekstretning',langDirLTR:'Fra venstre mod højre (LTR)',langDirRTL:'Fra højre mod venstre (RTL)',acccessKey:'Genvejstast',name:'Navn',langCode:'Tekstretning',tabIndex:'Tabulator indeks',advisoryTitle:'Titel',advisoryContentType:'Indholdstype',cssClasses:'Typografiark',charset:'Tegnsæt',styles:'Typografi',selectAnchor:'Vælg et anker',anchorName:'Efter anker navn',anchorId:'Efter element Id',emailAddress:'E-mail adresse',emailSubject:'Emne',emailBody:'Besked',noAnchors:'(Ingen bogmærker i dokumentet)',noUrl:'Indtast hyperlink URL!',noEmail:'Indtast e-mail adresse!'},anchor:{toolbar:'Indsæt/redigér bogmærke',menu:'Egenskaber for bogmærke',title:'Egenskaber for bogmærke',name:'Bogmærke navn',errorName:'Indtast bogmærke navn'},list:{numberedTitle:'Numbered List Properties',bulletedTitle:'Bulleted List Properties',type:'Type',start:'Start',validateStartNumber:'List start number must be a whole number.',circle:'Circle',disc:'Disc',square:'Square',none:'None',notset:'<not set>',armenian:'Armenian numbering',georgian:'Georgian numbering (an, ban, gan, etc.)',lowerRoman:'Lower Roman (i, ii, iii, iv, v, etc.)',upperRoman:'Upper Roman (I, II, III, IV, V, etc.)',lowerAlpha:'Lower Alpha (a, b, c, d, e, etc.)',upperAlpha:'Upper Alpha (A, B, C, D, E, etc.)',lowerGreek:'Lower Greek (alpha, beta, gamma, etc.)',decimal:'Decimal (1, 2, 3, etc.)',decimalLeadingZero:'Decimal leading zero (01, 02, 03, etc.)'},findAndReplace:{title:'Søg og erstat',find:'Søg',replace:'Erstat',findWhat:'Søg efter:',replaceWith:'Erstat med:',notFoundMsg:'Søgeteksten blev ikke fundet',matchCase:'Forskel på store og små bogstaver',matchWord:'Kun hele ord',matchCyclic:'Match cyklisk',replaceAll:'Erstat alle',replaceSuccessMsg:'%1 forekomst(er) erstattet.'},table:{toolbar:'Tabel',title:'Egenskaber for tabel',menu:'Egenskaber for tabel',deleteTable:'Slet tabel',rows:'Rækker',columns:'Kolonner',border:'Rammebredde',align:'Justering',alignLeft:'Venstrestillet',alignCenter:'Centreret',alignRight:'Højrestillet',width:'Bredde',widthPx:'pixels',widthPc:'procent',widthUnit:'width unit',height:'Højde',cellSpace:'Celleafstand',cellPad:'Cellemargen',caption:'Titel',summary:'Resumé',headers:'Header',headersNone:'Ingen',headersColumn:'Første kolonne',headersRow:'Første række',headersBoth:'Begge',invalidRows:'Antallet af rækker skal være større end 0.',invalidCols:'Antallet af kolonner skal være større end 0.',invalidBorder:'Rammetykkelse skal være et tal.',invalidWidth:'Tabelbredde skal være et tal.',invalidHeight:'Tabelhøjde skal være et tal.',invalidCellSpacing:'Celleafstand skal være et tal.',invalidCellPadding:'Cellemargen skal være et tal.',cell:{menu:'Celle',insertBefore:'Indsæt celle før',insertAfter:'Indsæt celle efter',deleteCell:'Slet celle',merge:'Flet celler',mergeRight:'Flet til højre',mergeDown:'Flet nedad',splitHorizontal:'Del celle vandret',splitVertical:'Del celle lodret',title:'Celleegenskaber',cellType:'Celletype',rowSpan:'Række span (rows span)',colSpan:'Kolonne span (columns span)',wordWrap:'Tekstombrydning',hAlign:'Vandret justering',vAlign:'Lodret justering',alignTop:'Top',alignMiddle:'Midt',alignBottom:'Bund',alignBaseline:'Grundlinje',bgColor:'Baggrundsfarve',borderColor:'Rammefarve',data:'Data',header:'Header',yes:'Ja',no:'Nej',invalidWidth:'Cellebredde skal være et tal.',invalidHeight:'Cellehøjde skal være et tal.',invalidRowSpan:'Række span skal være et heltal.',invalidColSpan:'Kolonne span skal være et heltal.',chooseColor:'Choose'},row:{menu:'Række',insertBefore:'Indsæt række før',insertAfter:'Indsæt række efter',deleteRow:'Slet række'},column:{menu:'Kolonne',insertBefore:'Indsæt kolonne før',insertAfter:'Indsæt kolonne efter',deleteColumn:'Slet kolonne'}},button:{title:'Egenskaber for knap',text:'Tekst',type:'Type',typeBtn:'Knap',typeSbm:'Send',typeRst:'Nulstil'},checkboxAndRadio:{checkboxTitle:'Egenskaber for afkrydsningsfelt',radioTitle:'Egenskaber for alternativknap',value:'Værdi',selected:'Valgt'},form:{title:'Egenskaber for formular',menu:'Egenskaber for formular',action:'Handling',method:'Metode',encoding:'Kodning (encoding)'},select:{title:'Egenskaber for liste',selectInfo:'Generelt',opAvail:'Valgmuligheder',value:'Værdi',size:'Størrelse',lines:'Linjer',chkMulti:'Tillad flere valg',opText:'Tekst',opValue:'Værdi',btnAdd:'Tilføj',btnModify:'Redigér',btnUp:'Op',btnDown:'Ned',btnSetValue:'Sæt som valgt',btnDelete:'Slet'},textarea:{title:'Egenskaber for tekstboks',cols:'Kolonner',rows:'Rækker'},textfield:{title:'Egenskaber for tekstfelt',name:'Navn',value:'Værdi',charWidth:'Bredde (tegn)',maxChars:'Max. antal tegn',type:'Type',typeText:'Tekst',typePass:'Adgangskode'},hidden:{title:'Egenskaber for skjult felt',name:'Navn',value:'Værdi'},image:{title:'Egenskaber for billede',titleButton:'Egenskaber for billedknap',menu:'Egenskaber for billede',infoTab:'Generelt',btnUpload:'Upload',upload:'Upload',alt:'Alternativ tekst',width:'Bredde',height:'Højde',lockRatio:'Lås størrelsesforhold',unlockRatio:'Unlock Ratio',resetSize:'Nulstil størrelse',border:'Ramme',hSpace:'Vandret margen',vSpace:'Lodret margen',align:'Justering',alignLeft:'Venstre',alignRight:'Højre',alertUrl:'Indtast stien til billedet',linkTab:'Hyperlink',button2Img:'Vil du lave billedknappen om til et almindeligt billede?',img2Button:'Vil du lave billedet om til en billedknap?',urlMissing:'Image source URL is missing.',validateWidth:'Width must be a whole number.',validateHeight:'Height must be a whole number.',validateBorder:'Border must be a whole number.',validateHSpace:'HSpace must be a whole number.',validateVSpace:'VSpace must be a whole number.'},flash:{properties:'Egenskaber for Flash',propertiesTab:'Egenskaber',title:'Egenskaber for Flash',chkPlay:'Automatisk afspilning',chkLoop:'Gentagelse',chkMenu:'Vis Flash menu',chkFull:'Tillad fuldskærm',scale:'Skalér',scaleAll:'Vis alt',scaleNoBorder:'Ingen ramme',scaleFit:'Tilpas størrelse',access:'Script adgang',accessAlways:'Altid',accessSameDomain:'Samme domæne',accessNever:'Aldrig',align:'Justering',alignLeft:'Venstre',alignAbsBottom:'Absolut nederst',alignAbsMiddle:'Absolut centreret',alignBaseline:'Grundlinje',alignBottom:'Nederst',alignMiddle:'Centreret',alignRight:'Højre',alignTextTop:'Toppen af teksten',alignTop:'Øverst',quality:'Kvalitet',qualityBest:'Bedste',qualityHigh:'Høj',qualityAutoHigh:'Auto høj',qualityMedium:'Medium',qualityAutoLow:'Auto lav',qualityLow:'Lav',windowModeWindow:'Vindue',windowModeOpaque:'Gennemsigtig (opaque)',windowModeTransparent:'Transparent',windowMode:'Vinduestilstand',flashvars:'Variabler for Flash',bgcolor:'Baggrundsfarve',width:'Bredde',height:'Højde',hSpace:'Vandret margen',vSpace:'Lodret margen',validateSrc:'Indtast hyperlink URL!',validateWidth:'Bredde skal være et tal.',validateHeight:'Højde skal være et tal.',validateHSpace:'Vandret margen skal være et tal.',validateVSpace:'Lodret margen skal være et tal.'},spellCheck:{toolbar:'Stavekontrol',title:'Stavekontrol',notAvailable:'Stavekontrol er desværre ikke tilgængelig.',errorLoading:'Fejl ved indlæsning af host: %s.',notInDic:'Ikke i ordbogen',changeTo:'Forslag',btnIgnore:'Ignorér',btnIgnoreAll:'Ignorér alle',btnReplace:'Erstat',btnReplaceAll:'Erstat alle',btnUndo:'Tilbage',noSuggestions:'(ingen forslag)',progress:'Stavekontrollen arbejder...',noMispell:'Stavekontrol færdig: Ingen fejl fundet',noChanges:'Stavekontrol færdig: Ingen ord ændret',oneChange:'Stavekontrol færdig: Et ord ændret',manyChanges:'Stavekontrol færdig: %1 ord ændret',ieSpellDownload:'Stavekontrol ikke installeret. Vil du installere den nu?'},smiley:{toolbar:'Smiley',title:'Vælg smiley',options:'Smiley Options'},elementsPath:{eleLabel:'Elements path',eleTitle:'%1 element'},numberedlist:'Talopstilling',bulletedlist:'Punktopstilling',indent:'Forøg indrykning',outdent:'Formindsk indrykning',justify:{left:'Venstrestillet',center:'Centreret',right:'Højrestillet',block:'Lige margener'},blockquote:'Blokcitat',clipboard:{title:'Indsæt',cutError:'Din browsers sikkerhedsindstillinger tillader ikke editoren at få automatisk adgang til udklipsholderen.<br><br>Brug i stedet tastaturet til at klippe teksten (Ctrl/Cmd+X).',copyError:'Din browsers sikkerhedsindstillinger tillader ikke editoren at få automatisk adgang til udklipsholderen.<br><br>Brug i stedet tastaturet til at kopiere teksten (Ctrl/Cmd+C).',pasteMsg:'Indsæt i feltet herunder (<STRONG>Ctrl/Cmd+V</STRONG>) og klik på <STRONG>OK</STRONG>.',securityMsg:'Din browsers sikkerhedsindstillinger tillader ikke editoren at få automatisk adgang til udklipsholderen.<br><br>Du skal indsætte udklipsholderens indhold i dette vindue igen.',pasteArea:'Paste Area'},pastefromword:{confirmCleanup:'Den tekst du forsøger at indsætte ser ud til at komme fra Word. Vil du rense teksten før den indsættes?',toolbar:'Indsæt fra Word',title:'Indsæt fra Word',error:'It was not possible to clean up the pasted data due to an internal error'},pasteText:{button:'Indsæt som ikke-formateret tekst',title:'Indsæt som ikke-formateret tekst'},templates:{button:'Skabeloner',title:'Indholdsskabeloner',options:'Template Options',insertOption:'Erstat det faktiske indhold',selectPromptMsg:'Vælg den skabelon, som skal åbnes i editoren (nuværende indhold vil blive overskrevet):',emptyListMsg:'(Der er ikke defineret nogen skabelon)'},showBlocks:'Vis afsnitsmærker',stylesCombo:{label:'Typografi',panelTitle:'Formatting Styles',panelTitle1:'Block typografi',panelTitle2:'Inline typografi',panelTitle3:'Object typografi'},format:{label:'Formatering',panelTitle:'Formatering',tag_p:'Normal',tag_pre:'Formateret',tag_address:'Adresse',tag_h1:'Overskrift 1',tag_h2:'Overskrift 2',tag_h3:'Overskrift 3',tag_h4:'Overskrift 4',tag_h5:'Overskrift 5',tag_h6:'Overskrift 6',tag_div:'Normal (DIV)'},div:{title:'Create Div Container',toolbar:'Create Div Container',cssClassInputLabel:'Stylesheet Classes',styleSelectLabel:'Style',IdInputLabel:'Id',languageCodeInputLabel:' Language Code',inlineStyleInputLabel:'Inline Style',advisoryTitleInputLabel:'Advisory Title',langDirLabel:'Language Direction',langDirLTRLabel:'Left to Right (LTR)',langDirRTLLabel:'Right to Left (RTL)',edit:'Edit Div',remove:'Remove Div'},font:{label:'Skrifttype',voiceLabel:'Skrifttype',panelTitle:'Skrifttype'},fontSize:{label:'Skriftstørrelse',voiceLabel:'Skriftstørrelse',panelTitle:'Skriftstørrelse'},colorButton:{textColorTitle:'Tekstfarve',bgColorTitle:'Baggrundsfarve',panelTitle:'Colors',auto:'Automatisk',more:'Flere farver...'},colors:{'000':'Black',800000:'Maroon','8B4513':'Saddle Brown','2F4F4F':'Dark Slate Gray','008080':'Teal','000080':'Navy','4B0082':'Indigo',696969:'Dark Gray',B22222:'Fire Brick',A52A2A:'Brown',DAA520:'Golden Rod','006400':'Dark Green','40E0D0':'Turquoise','0000CD':'Medium Blue',800080:'Purple',808080:'Gray',F00:'Red',FF8C00:'Dark Orange',FFD700:'Gold','008000':'Green','0FF':'Cyan','00F':'Blue',EE82EE:'Violet',A9A9A9:'Dim Gray',FFA07A:'Light Salmon',FFA500:'Orange',FFFF00:'Yellow','00FF00':'Lime',AFEEEE:'Pale Turquoise',ADD8E6:'Light Blue',DDA0DD:'Plum',D3D3D3:'Light Grey',FFF0F5:'Lavender Blush',FAEBD7:'Antique White',FFFFE0:'Light Yellow',F0FFF0:'Honeydew',F0FFFF:'Azure',F0F8FF:'Alice Blue',E6E6FA:'Lavender',FFF:'White'},scayt:{title:'Stavekontrol mens du skriver',opera_title:'Not supported by Opera',enable:'Aktivér SCAYT',disable:'Deaktivér SCAYT',about:'Om SCAYT',toggle:'Skift/toggle SCAYT',options:'Indstillinger',langs:'Sprog',moreSuggestions:'Flere forslag',ignore:'Ignorér',ignoreAll:'Ignorér alle',addWord:'Tilføj ord',emptyDic:'Ordbogsnavn må ikke være tom.',optionsTab:'Indstillinger',allCaps:'Ignore All-Caps Words',ignoreDomainNames:'Ignore Domain Names',mixedCase:'Ignore Words with Mixed Case',mixedWithDigits:'Ignore Words with Numbers',languagesTab:'Sprog',dictionariesTab:'Ordbøger',dic_field_name:'Dictionary name',dic_create:'Create',dic_restore:'Restore',dic_delete:'Delete',dic_rename:'Rename',dic_info:"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type it's name and click the Restore button.",aboutTab:'Om'},about:{title:'Om CKEditor',dlgTitle:'Om CKEditor',moreInfo:'For informationer omkring licens, se venligst vores hjemmeside (på engelsk):',copy:'Copyright &copy; $1. Alle rettigheder forbeholdes.'},maximize:'Maximér',minimize:'Minimize',fakeobjects:{anchor:'Anker',flash:'Flashanimation',div:'Sideskift',unknown:'Ukendt objekt'},resize:'Træk for at skalere',colordialog:{title:'Select color',options:'Color Options',highlight:'Highlight',selected:'Selected Color',clear:'Clear'},toolbarCollapse:'Collapse Toolbar',toolbarExpand:'Expand Toolbar',bidi:{ltr:'Text direction from left to right',rtl:'Text direction from right to left'}};
PypiClean
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/models/__init__.py
__all__ = [ 'tag', 'service_tag', 'service_onboarding_additional_params', 'service_onboarding_helm_yaml_git_tag', 'workflow', 'service_swagger_spec_id', 'services', 'compatibility', 'resource_base', 'service_error', 'category', 'repository_credential', 'azure_csp_profile', 'csp_profile_data', 'service_onboarding_helm_git_branch', 'service_onboarding_helm_helmrepo', 'service_onboarding_yaml_git_branch', 'service_onboarding_terraform_git_tag', 'operations_wf', 'installation_wf', 'workload', 'service_delete_result', 'running_instance', 'edge_service_onboarding_delete_result', 'claim_status_request', 'aws_csp_profile', 'service_management_result', 'observability_template', 'service_resource', 'category_list', 'repository', 'service_onboarding_helm_git_tag', 'selected_service', 'claim', 'edge_service_onboarding_result', 'csp_profile', 'csp_profile_id_request', 'current_status', 'boundary', 'service_file', 'service', 'associate_cloud_credential_result', 'service_handler_id', 'service_dependency', 'service_onboarding_terraform_git_branch', 'custom_wf', 'dependent_service', 'service_claims', 'default_location', 'cluster_info_details', 'o_auth_token', 'service_state_enum', 'workflow_type_enum', 'workload_revision_type_enum', 'service_status_enum', 'service_type_enum', 'hook_type_enum', 'claim_type_enum', 'repository_reacheability_enum', 'csp_profile_type_enum', 'aws_csp_profile_cred_type_enum', 'sand_box_state_enum', 'sort_direction_enum', 'service_dependency_package_type_enum', 'csp_compatibility_enum', 'source_code_type_enum', 'event_type_enum', 'sand_box_status_enum', 'category_type_enum', 'repository_credential_type_enum', 'edge_service_repository_type_enum', 'upload_type_enum', 'workload_repository_type_enum', 'dependent_services_type_enum', 'claim_status_enum', 'o_auth_scope_enum', 'o_auth_provider_error_enum', ]
PypiClean
/OBP_reliability_pillar_3-0.0.13.tar.gz/OBP_reliability_pillar_3-0.0.13/OBP_reliability_pillar_4/elastic_beanstalk/enhanced_health_reporting_enabled.py
import botocore import logging from botocore.exceptions import ClientError logging.basicConfig(level=logging.INFO) logger = logging.getLogger() def enhanced_health_reporting_enabled(self) -> dict: """ :param self: :return: """ logger.info(" ---Inside elastic_beanstalk :: enhanced_health_reporting_enabled()") regions = self.session.get_available_regions('elasticbeanstalk') result = True failReason = '' offenders = [] control_id = 'Id3.13' compliance_type = "Beanstalk Enhanced Health Reporting Enabled" description = "Checks if an AWS Elastic Beanstalk environment is configured for enhanced health reporting. The rule is COMPLIANT if the environment is configured for enhanced health reporting." resource_type = "Elastic Beanstalk" risk_level = 'Medium' for region in regions: try: client = self.session.client('elasticbeanstalk', region_name=region) marker = '' while True: if marker == '' or marker is None: response_describe_eb = client.describe_environments() else: response_describe_eb = client.describe_environments( NextToken=marker ) for env in response_describe_eb['Environments']: if len(env['HealthStatus']) == 0: result = False failReason = 'AWS Elastic Beanstalk environment is not configured for enhanced health reporting.' offenders.append(env['EnvironmentId']) try: marker = response_describe_eb['NextToken'] if marker == '': break except KeyError: break except ClientError as e: logger.error("Something went wrong with region {}: {}".format(region, e)) return { 'Result': result, 'failReason': failReason, 'resource_type': resource_type, 'ControlId': control_id, 'Offenders': offenders, 'Compliance_type': compliance_type, 'Description': description, 'Risk Level': risk_level }
PypiClean
/AX3%20OTP%20Auth-1.0.5.tar.gz/AX3 OTP Auth-1.0.5/ax3_OTP_Auth/hotp.py
from secrets import token_urlsafe from django.core.cache import cache from django.utils.module_loading import import_string import boto3 import pyotp from . import settings class HOTP: def __init__(self, unique_id: str, digits: int = 6): self._unique_id = unique_id self._digits = digits self._ttl = settings.OTP_AUTH_TTL def _create_secret(self, secret: str) -> str: cache.set('{}.secret'.format(self._unique_id), secret, timeout=self._ttl) return secret def _create_counter(self) -> str: try: cache.incr('{}.counter'.format(self._unique_id)) except ValueError: cache.set('{}.counter'.format(self._unique_id), 1, timeout=self._ttl) return cache.get('{}.counter'.format(self._unique_id)) def _create_token(self, phone_number: int) -> str: token = token_urlsafe() cache.set(token, phone_number, timeout=self._ttl) return token def _get_secret(self): return cache.get('{}.secret'.format(self._unique_id)) def _get_counter(self): return cache.get('{}.counter'.format(self._unique_id)) def _send_sms(self, sms_code: int, country_code: str, phone_number: int): message = settings.OTP_AUTH_MESSAGE.format(sms_code) if settings.OTP_CUSTOM_SMS_GATEWAY: gateway = import_string(settings.OTP_CUSTOM_SMS_GATEWAY) gateway(country_code=country_code, phone_number=phone_number, message=message) else: sns = boto3.client( 'sns', aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY, region_name=settings.AWS_DEFAULT_REGION ) sns.publish( PhoneNumber=f'+{country_code}{phone_number}', Message=message, MessageAttributes={ 'AWS.SNS.SMS.SMSType': { 'DataType': 'String', 'StringValue': 'Transactional' } } ) def create(self, country_code: str, phone_number: int): secret = self._create_secret(secret=pyotp.random_base32(length=32)) counter = self._create_counter() hotp = pyotp.HOTP(secret, digits=self._digits) self._send_sms( sms_code=hotp.at(counter), country_code=country_code, phone_number=phone_number ) def verify(self, sms_code: int, phone_number: int) -> str: secret = self._get_secret() count = self._get_counter() if count and secret: hotp = pyotp.HOTP(secret, digits=self._digits) if hotp.verify(sms_code, count): return self._create_token(phone_number=phone_number) return None def get_phone_number(self, token: str) -> int: phone_number = cache.get(token) cache.delete(token) cache.delete_pattern('{}.*'.format(self._unique_id)) return phone_number
PypiClean
/IDS_XS_TOOLS_UiS-0.3-py3-none-any.whl/ids_xs_tools/pyueye_example_camera.py
# ------------------------------------------------------------------------------ # PyuEye example - camera modul # # Copyright (c) 2017 by IDS Imaging Development Systems GmbH. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from pyueye import ueye from ids_xs_tools.pyueye_example_utils import (uEyeException, Rect, get_bits_per_pixel, ImageBuffer, check) class Camera: def __init__(self, device_id=0): self.h_cam = ueye.HIDS(device_id) self.img_buffers = [] def __enter__(self): self.init() return self def __exit__(self, _type, value, traceback): self.exit() def handle(self): return self.h_cam def alloc(self, buffer_count=3): rect = self.get_aoi() bpp = get_bits_per_pixel(self.get_colormode()) for buff in self.img_buffers: check(ueye.is_FreeImageMem(self.h_cam, buff.mem_ptr, buff.mem_id)) for i in range(buffer_count): buff = ImageBuffer() ueye.is_AllocImageMem(self.h_cam, rect.width, rect.height, bpp, buff.mem_ptr, buff.mem_id) check(ueye.is_AddToSequence(self.h_cam, buff.mem_ptr, buff.mem_id)) self.img_buffers.append(buff) # ueye.is_InitImageQueue(self.h_cam, 0) return def init(self): ret = ueye.is_InitCamera(self.h_cam, None) if ret != ueye.IS_SUCCESS: self.h_cam = None raise uEyeException(ret) # return ret def exit(self): ret = None if self.h_cam is not None: ret = ueye.is_ExitCamera(self.h_cam) if ret == ueye.IS_SUCCESS: self.h_cam = None return def get_aoi(self): rect_aoi = ueye.IS_RECT() ueye.is_AOI(self.h_cam, ueye.IS_AOI_IMAGE_GET_AOI, rect_aoi, ueye.sizeof(rect_aoi)) return Rect(rect_aoi.s32X.value, rect_aoi.s32Y.value, rect_aoi.s32Width.value, rect_aoi.s32Height.value) def set_aoi(self, x, y, width, height): rect_aoi = ueye.IS_RECT() rect_aoi.s32X = ueye.int(x) rect_aoi.s32Y = ueye.int(y) rect_aoi.s32Width = ueye.int(width) rect_aoi.s32Height = ueye.int(height) return ueye.is_AOI(self.h_cam, ueye.IS_AOI_IMAGE_SET_AOI, rect_aoi, ueye.sizeof(rect_aoi)) def capture_video(self, wait=False): wait_param = ueye.IS_WAIT if wait else ueye.IS_DONT_WAIT # over er pythons variant av ternary operator "?:" i C # se Python 3.6 documentation, kap. 6.12. Conditional expressions: # https://docs.python.org/3.6/reference/expressions.html#grammar-token-or_test return ueye.is_CaptureVideo(self.h_cam, wait_param) def stop_video(self): return ueye.is_StopLiveVideo(self.h_cam, ueye.IS_FORCE_VIDEO_STOP) def freeze_video(self, wait=False): wait_param = ueye.IS_WAIT if wait else ueye.IS_DONT_WAIT return ueye.is_FreezeVideo(self.h_cam, wait_param) def set_colormode(self, colormode): check(ueye.is_SetColorMode(self.h_cam, colormode)) def get_colormode(self): ret = ueye.is_SetColorMode(self.h_cam, ueye.IS_GET_COLOR_MODE) return ret def get_format_list(self): count = ueye.UINT() check(ueye.is_ImageFormat(self.h_cam, ueye.IMGFRMT_CMD_GET_NUM_ENTRIES, count, ueye.sizeof(count))) format_list = ueye.IMAGE_FORMAT_LIST(ueye.IMAGE_FORMAT_INFO * count.value) format_list.nSizeOfListEntry = ueye.sizeof(ueye.IMAGE_FORMAT_INFO) format_list.nNumListElements = count.value check(ueye.is_ImageFormat(self.h_cam, ueye.IMGFRMT_CMD_GET_LIST, format_list, ueye.sizeof(format_list))) return format_list
PypiClean
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/network/authenticator.py
import os import time from hashlib import sha1 from ..tl.types import ( ResPQ, PQInnerData, ServerDHParamsFail, ServerDHParamsOk, ServerDHInnerData, ClientDHInnerData, DhGenOk, DhGenRetry, DhGenFail, ) from .. import helpers from ..crypto import AES, AuthKey, Factorization, rsa from ..errors import SecurityError from ..extensions import BinaryReader from ..tl.functions import ( ReqPqMultiRequest, ReqDHParamsRequest, SetClientDHParamsRequest, ) async def do_authentication(sender): """ Executes the authentication process with the Telegram servers. :param sender: a connected `MTProtoPlainSender`. :return: returns a (authorization key, time offset) tuple. """ # Step 1 sending: PQ Request, endianness doesn't matter since it's random nonce = int.from_bytes(os.urandom(16), "big", signed=True) res_pq = await sender.send(ReqPqMultiRequest(nonce)) assert isinstance(res_pq, ResPQ), "Step 1 answer was %s" % res_pq if res_pq.nonce != nonce: raise SecurityError("Step 1 invalid nonce from server") pq = get_int(res_pq.pq) # Step 2 sending: DH Exchange p, q = Factorization.factorize(pq) p, q = rsa.get_byte_array(p), rsa.get_byte_array(q) new_nonce = int.from_bytes(os.urandom(32), "little", signed=True) pq_inner_data = bytes( PQInnerData( pq=rsa.get_byte_array(pq), p=p, q=q, nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, new_nonce=new_nonce, ) ) # sha_digest + data + random_bytes cipher_text, target_fingerprint = None, None for fingerprint in res_pq.server_public_key_fingerprints: cipher_text = rsa.encrypt(fingerprint, pq_inner_data) if cipher_text is not None: target_fingerprint = fingerprint break if cipher_text is None: # Second attempt, but now we're allowed to use old keys for fingerprint in res_pq.server_public_key_fingerprints: cipher_text = rsa.encrypt(fingerprint, pq_inner_data, use_old=True) if cipher_text is not None: target_fingerprint = fingerprint break if cipher_text is None: raise SecurityError( "Step 2 could not find a valid key for fingerprints: {}".format( ", ".join([str(f) for f in res_pq.server_public_key_fingerprints]) ) ) server_dh_params = await sender.send( ReqDHParamsRequest( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, p=p, q=q, public_key_fingerprint=target_fingerprint, encrypted_data=cipher_text, ) ) assert isinstance(server_dh_params, (ServerDHParamsOk, ServerDHParamsFail)), ( "Step 2.1 answer was %s" % server_dh_params ) if server_dh_params.nonce != res_pq.nonce: raise SecurityError("Step 2 invalid nonce from server") if server_dh_params.server_nonce != res_pq.server_nonce: raise SecurityError("Step 2 invalid server nonce from server") if isinstance(server_dh_params, ServerDHParamsFail): nnh = int.from_bytes( sha1(new_nonce.to_bytes(32, "little", signed=True)).digest()[4:20], "little", signed=True, ) if server_dh_params.new_nonce_hash != nnh: raise SecurityError("Step 2 invalid DH fail nonce from server") assert isinstance(server_dh_params, ServerDHParamsOk), ( "Step 2.2 answer was %s" % server_dh_params ) # Step 3 sending: Complete DH Exchange key, iv = helpers.generate_key_data_from_nonce(res_pq.server_nonce, new_nonce) if len(server_dh_params.encrypted_answer) % 16 != 0: # See PR#453 raise SecurityError("Step 3 AES block size mismatch") plain_text_answer = AES.decrypt_ige(server_dh_params.encrypted_answer, key, iv) with BinaryReader(plain_text_answer) as reader: reader.read(20) # hash sum server_dh_inner = reader.tgread_object() assert isinstance(server_dh_inner, ServerDHInnerData), ( "Step 3 answer was %s" % server_dh_inner ) if server_dh_inner.nonce != res_pq.nonce: raise SecurityError("Step 3 Invalid nonce in encrypted answer") if server_dh_inner.server_nonce != res_pq.server_nonce: raise SecurityError("Step 3 Invalid server nonce in encrypted answer") dh_prime = get_int(server_dh_inner.dh_prime, signed=False) g = server_dh_inner.g g_a = get_int(server_dh_inner.g_a, signed=False) time_offset = server_dh_inner.server_time - int(time.time()) b = get_int(os.urandom(256), signed=False) g_b = pow(g, b, dh_prime) gab = pow(g_a, b, dh_prime) # IMPORTANT: Apart from the conditions on the Diffie-Hellman prime # dh_prime and generator g, both sides are to check that g, g_a and # g_b are greater than 1 and less than dh_prime - 1. We recommend # checking that g_a and g_b are between 2^{2048-64} and # dh_prime - 2^{2048-64} as well. # (https://core.telegram.org/mtproto/auth_key#dh-key-exchange-complete) if not (1 < g < (dh_prime - 1)): raise SecurityError("g_a is not within (1, dh_prime - 1)") if not (1 < g_a < (dh_prime - 1)): raise SecurityError("g_a is not within (1, dh_prime - 1)") if not (1 < g_b < (dh_prime - 1)): raise SecurityError("g_b is not within (1, dh_prime - 1)") safety_range = 2 ** (2048 - 64) if not (safety_range <= g_a <= (dh_prime - safety_range)): raise SecurityError("g_a is not within (2^{2048-64}, dh_prime - 2^{2048-64})") if not (safety_range <= g_b <= (dh_prime - safety_range)): raise SecurityError("g_b is not within (2^{2048-64}, dh_prime - 2^{2048-64})") # Prepare client DH Inner Data client_dh_inner = bytes( ClientDHInnerData( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, retry_id=0, # TODO Actual retry ID g_b=rsa.get_byte_array(g_b), ) ) client_dh_inner_hashed = sha1(client_dh_inner).digest() + client_dh_inner # Encryption client_dh_encrypted = AES.encrypt_ige(client_dh_inner_hashed, key, iv) # Prepare Set client DH params dh_gen = await sender.send( SetClientDHParamsRequest( nonce=res_pq.nonce, server_nonce=res_pq.server_nonce, encrypted_data=client_dh_encrypted, ) ) nonce_types = (DhGenOk, DhGenRetry, DhGenFail) assert isinstance(dh_gen, nonce_types), "Step 3.1 answer was %s" % dh_gen name = dh_gen.__class__.__name__ if dh_gen.nonce != res_pq.nonce: raise SecurityError("Step 3 invalid {} nonce from server".format(name)) if dh_gen.server_nonce != res_pq.server_nonce: raise SecurityError("Step 3 invalid {} server nonce from server".format(name)) auth_key = AuthKey(rsa.get_byte_array(gab)) nonce_number = 1 + nonce_types.index(type(dh_gen)) new_nonce_hash = auth_key.calc_new_nonce_hash(new_nonce, nonce_number) dh_hash = getattr(dh_gen, "new_nonce_hash{}".format(nonce_number)) if dh_hash != new_nonce_hash: raise SecurityError("Step 3 invalid new nonce hash") if not isinstance(dh_gen, DhGenOk): raise AssertionError("Step 3.2 answer was %s" % dh_gen) return auth_key, time_offset def get_int(byte_array, signed=True): """ Gets the specified integer from its byte array. This should be used by this module alone, as it works with big endian. :param byte_array: the byte array representing th integer. :param signed: whether the number is signed or not. :return: the integer representing the given byte array. """ return int.from_bytes(byte_array, byteorder="big", signed=signed)
PypiClean
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/tl/custom/adminlogevent.py
from ...tl import types from ...utils import get_input_peer class AdminLogEvent: """ Represents a more friendly interface for admin log events. Members: original (:tl:`ChannelAdminLogEvent`): The original :tl:`ChannelAdminLogEvent`. entities (`dict`): A dictionary mapping user IDs to :tl:`User`. When `old` and `new` are :tl:`ChannelParticipant`, you can use this dictionary to map the ``user_id``, ``kicked_by``, ``inviter_id`` and ``promoted_by`` IDs to their :tl:`User`. user (:tl:`User`): The user that caused this action (``entities[original.user_id]``). input_user (:tl:`InputPeerUser`): Input variant of `user`. """ def __init__(self, original, entities): self.original = original self.entities = entities self.user = entities[original.user_id] self.input_user = get_input_peer(self.user) @property def id(self): """ The ID of this event. """ return self.original.id @property def date(self): """ The date when this event occurred. """ return self.original.date @property def user_id(self): """ The ID of the user that triggered this event. """ return self.original.user_id @property def action(self): """ The original :tl:`ChannelAdminLogEventAction`. """ return self.original.action @property def old(self): """ The old value from the event. """ ori = self.original.action if isinstance( ori, ( types.ChannelAdminLogEventActionChangeAbout, types.ChannelAdminLogEventActionChangeTitle, types.ChannelAdminLogEventActionChangeUsername, types.ChannelAdminLogEventActionChangeLocation, types.ChannelAdminLogEventActionChangeHistoryTTL, ), ): return ori.prev_value elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto): return ori.prev_photo elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet): return ori.prev_stickerset elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage): return ori.prev_message elif isinstance( ori, ( types.ChannelAdminLogEventActionParticipantToggleAdmin, types.ChannelAdminLogEventActionParticipantToggleBan, ), ): return ori.prev_participant elif isinstance( ori, ( types.ChannelAdminLogEventActionToggleInvites, types.ChannelAdminLogEventActionTogglePreHistoryHidden, types.ChannelAdminLogEventActionToggleSignatures, ), ): return not ori.new_value elif isinstance(ori, types.ChannelAdminLogEventActionDeleteMessage): return ori.message elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights): return ori.prev_banned_rights elif isinstance(ori, types.ChannelAdminLogEventActionDiscardGroupCall): return ori.call elif isinstance( ori, ( types.ChannelAdminLogEventActionExportedInviteDelete, types.ChannelAdminLogEventActionExportedInviteRevoke, types.ChannelAdminLogEventActionParticipantJoinByInvite, ), ): return ori.invite elif isinstance(ori, types.ChannelAdminLogEventActionExportedInviteEdit): return ori.prev_invite @property def new(self): """ The new value present in the event. """ ori = self.original.action if isinstance( ori, ( types.ChannelAdminLogEventActionChangeAbout, types.ChannelAdminLogEventActionChangeTitle, types.ChannelAdminLogEventActionChangeUsername, types.ChannelAdminLogEventActionToggleInvites, types.ChannelAdminLogEventActionTogglePreHistoryHidden, types.ChannelAdminLogEventActionToggleSignatures, types.ChannelAdminLogEventActionChangeLocation, types.ChannelAdminLogEventActionChangeHistoryTTL, ), ): return ori.new_value elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto): return ori.new_photo elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet): return ori.new_stickerset elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage): return ori.new_message elif isinstance( ori, ( types.ChannelAdminLogEventActionParticipantToggleAdmin, types.ChannelAdminLogEventActionParticipantToggleBan, ), ): return ori.new_participant elif isinstance( ori, ( types.ChannelAdminLogEventActionParticipantInvite, types.ChannelAdminLogEventActionParticipantVolume, ), ): return ori.participant elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights): return ori.new_banned_rights elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll): return ori.message elif isinstance(ori, types.ChannelAdminLogEventActionStartGroupCall): return ori.call elif isinstance( ori, ( types.ChannelAdminLogEventActionParticipantMute, types.ChannelAdminLogEventActionParticipantUnmute, ), ): return ori.participant elif isinstance(ori, types.ChannelAdminLogEventActionToggleGroupCallSetting): return ori.join_muted elif isinstance(ori, types.ChannelAdminLogEventActionExportedInviteEdit): return ori.new_invite @property def changed_about(self): """ Whether the channel's about was changed or not. If `True`, `old` and `new` will be present as `str`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeAbout ) @property def changed_title(self): """ Whether the channel's title was changed or not. If `True`, `old` and `new` will be present as `str`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeTitle ) @property def changed_username(self): """ Whether the channel's username was changed or not. If `True`, `old` and `new` will be present as `str`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeUsername ) @property def changed_photo(self): """ Whether the channel's photo was changed or not. If `True`, `old` and `new` will be present as :tl:`Photo`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangePhoto ) @property def changed_sticker_set(self): """ Whether the channel's sticker set was changed or not. If `True`, `old` and `new` will be present as :tl:`InputStickerSet`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeStickerSet ) @property def changed_message(self): """ Whether a message in this channel was edited or not. If `True`, `old` and `new` will be present as `Message <telethon.tl.custom.message.Message>`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionEditMessage ) @property def deleted_message(self): """ Whether a message in this channel was deleted or not. If `True`, `old` will be present as `Message <telethon.tl.custom.message.Message>`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionDeleteMessage ) @property def changed_admin(self): """ Whether the permissions for an admin in this channel changed or not. If `True`, `old` and `new` will be present as :tl:`ChannelParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantToggleAdmin ) @property def changed_restrictions(self): """ Whether a message in this channel was edited or not. If `True`, `old` and `new` will be present as :tl:`ChannelParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantToggleBan ) @property def changed_invites(self): """ Whether the invites in the channel were toggled or not. If `True`, `old` and `new` will be present as `bool`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionToggleInvites ) @property def changed_location(self): """ Whether the location setting of the channel has changed or not. If `True`, `old` and `new` will be present as :tl:`ChannelLocation`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeLocation ) @property def joined(self): """ Whether `user` joined through the channel's public username or not. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantJoin ) @property def joined_invite(self): """ Whether a new user joined through an invite link to the channel or not. If `True`, `new` will be present as :tl:`ChannelParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantInvite ) @property def left(self): """ Whether `user` left the channel or not. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantLeave ) @property def changed_hide_history(self): """ Whether hiding the previous message history for new members in the channel was toggled or not. If `True`, `old` and `new` will be present as `bool`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionTogglePreHistoryHidden ) @property def changed_signatures(self): """ Whether the message signatures in the channel were toggled or not. If `True`, `old` and `new` will be present as `bool`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionToggleSignatures ) @property def changed_pin(self): """ Whether a new message in this channel was pinned or not. If `True`, `new` will be present as `Message <telethon.tl.custom.message.Message>`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionUpdatePinned ) @property def changed_default_banned_rights(self): """ Whether the default banned rights were changed or not. If `True`, `old` and `new` will be present as :tl:`ChatBannedRights`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionDefaultBannedRights ) @property def stopped_poll(self): """ Whether a poll was stopped or not. If `True`, `new` will be present as `Message <telethon.tl.custom.message.Message>`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionStopPoll ) @property def started_group_call(self): """ Whether a group call was started or not. If `True`, `new` will be present as :tl:`InputGroupCall`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionStartGroupCall ) @property def discarded_group_call(self): """ Whether a group call was started or not. If `True`, `old` will be present as :tl:`InputGroupCall`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionDiscardGroupCall ) @property def user_muted(self): """ Whether a participant was muted in the ongoing group call or not. If `True`, `new` will be present as :tl:`GroupCallParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantMute ) @property def user_unmutted(self): """ Whether a participant was unmuted from the ongoing group call or not. If `True`, `new` will be present as :tl:`GroupCallParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantUnmute ) @property def changed_call_settings(self): """ Whether the group call settings were changed or not. If `True`, `new` will be `True` if new users are muted on join. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionToggleGroupCallSetting ) @property def changed_history_ttl(self): """ Whether the Time To Live of the message history has changed. Messages sent after this change will have a ``ttl_period`` in seconds indicating how long they should live for before being auto-deleted. If `True`, `old` will be the old TTL, and `new` the new TTL, in seconds. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionChangeHistoryTTL ) @property def deleted_exported_invite(self): """ Whether the exported chat invite has been deleted. If `True`, `old` will be the deleted :tl:`ExportedChatInvite`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionExportedInviteDelete ) @property def edited_exported_invite(self): """ Whether the exported chat invite has been edited. If `True`, `old` and `new` will be the old and new :tl:`ExportedChatInvite`, respectively. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionExportedInviteEdit ) @property def revoked_exported_invite(self): """ Whether the exported chat invite has been revoked. If `True`, `old` will be the revoked :tl:`ExportedChatInvite`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionExportedInviteRevoke ) @property def joined_by_invite(self): """ Whether a new participant has joined with the use of an invite link. If `True`, `old` will be pre-existing (old) :tl:`ExportedChatInvite` used to join. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantJoinByInvite, ) @property def changed_user_volume(self): """ Whether a participant's volume in a call has been changed. If `True`, `new` will be the updated :tl:`GroupCallParticipant`. """ return isinstance( self.original.action, types.ChannelAdminLogEventActionParticipantVolume ) def __str__(self): return str(self.original) def stringify(self): return self.original.stringify()
PypiClean
/GeoNode-3.2.0-py3-none-any.whl/geonode/maps/models.py
import logging import uuid from django.conf import settings from django.db import models from django.db.models import signals import json from django.contrib.contenttypes.models import ContentType from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from django.urls import reverse from django.template.defaultfilters import slugify from django.core.cache import cache from geonode.layers.models import Layer, Style from geonode.compat import ensure_string from geonode.base.models import ResourceBase, resourcebase_post_save from geonode.maps.signals import map_changed_signal from geonode.security.utils import remove_object_permissions from geonode.client.hooks import hookset from geonode.utils import (GXPMapBase, GXPLayerBase, layer_from_viewer_config, default_map_config) from geonode import geoserver # noqa from geonode.utils import check_ogc_backend from deprecated import deprecated from pinax.ratings.models import OverallRating logger = logging.getLogger("geonode.maps.models") class Map(ResourceBase, GXPMapBase): """ A Map aggregates several layers together and annotates them with a viewport configuration. """ # viewer configuration zoom = models.IntegerField(_('zoom')) # The zoom level to use when initially loading this map. Zoom levels start # at 0 (most zoomed out) and each increment doubles the resolution. projection = models.CharField(_('projection'), max_length=32) # The projection used for this map. This is stored as a string with the # projection's SRID. center_x = models.FloatField(_('center X')) # The x coordinate to center on when loading this map. Its interpretation # depends on the projection. center_y = models.FloatField(_('center Y')) # The y coordinate to center on when loading this map. Its interpretation # depends on the projection. last_modified = models.DateTimeField(auto_now_add=True) # The last time the map was modified. urlsuffix = models.CharField(_('Site URL'), max_length=255, blank=True) # Alphanumeric alternative to referencing maps by id, appended to end of # URL instead of id, ie http://domain/maps/someview featuredurl = models.CharField( _('Featured Map URL'), max_length=255, blank=True) # Full URL for featured map view, ie http://domain/someview def __str__(self): return f'{self.title} by {(self.owner.username if self.owner else "<Anonymous>")}' @property def center(self): """ A handy shortcut for the center_x and center_y properties as a tuple (read only) """ return (self.center_x, self.center_y) @property def layers(self): layers = MapLayer.objects.filter(map=self.id) return [layer for layer in layers] @property def local_layers(self): layer_names = MapLayer.objects.filter(map__id=self.id).values('name') return Layer.objects.filter(alternate__in=layer_names) | \ Layer.objects.filter(name__in=layer_names) def json(self, layer_filter): """ Get a JSON representation of this map suitable for sending to geoserver for creating a download of all layers """ map_layers = MapLayer.objects.filter(map=self.id) layers = [] for map_layer in map_layers: if map_layer.local: layer = Layer.objects.get(alternate=map_layer.name) layers.append(layer) else: pass if layer_filter: layers = [lyr for lyr in layers if layer_filter(lyr)] # the readme text will appear in a README file in the zip readme = ( f"Title: {self.title}\n" + f"Author: {self.poc}\n" + f"Abstract: {self.abstract}\n" ) if self.license: readme += f"License: {self.license}" if self.license.url: readme += f" ({self.license.url})" readme += "\n" if self.constraints_other: readme += f"Additional constraints: {self.constraints_other}\n" def layer_json(lyr): return { "name": lyr.alternate, "service": lyr.service_type if hasattr(lyr, 'service_type') else "", "serviceURL": "", "metadataURL": "" } map_config = { # the title must be provided and is used for the zip file name "map": {"readme": readme, "title": self.title}, "layers": [layer_json(lyr) for lyr in layers] } return json.dumps(map_config) def update_from_viewer(self, conf, context=None): """ Update this Map's details by parsing a JSON object as produced by a GXP Viewer. This method automatically persists to the database! """ template_name = hookset.update_from_viewer(conf, context=context) if not isinstance(context, dict): try: context = json.loads(ensure_string(context)) except Exception: pass conf = context.get("config", {}) if not isinstance(conf, dict) or isinstance(conf, bytes): try: conf = json.loads(ensure_string(conf)) except Exception: conf = {} about = conf.get("about", {}) self.title = conf.get("title", about.get("title", "")) self.abstract = conf.get("abstract", about.get("abstract", "")) _map = conf.get("map", {}) center = _map.get("center", settings.DEFAULT_MAP_CENTER) self.zoom = _map.get("zoom", settings.DEFAULT_MAP_ZOOM) if isinstance(center, dict): self.center_x = center.get('x') self.center_y = center.get('y') else: self.center_x, self.center_y = center projection = _map.get("projection", settings.DEFAULT_MAP_CRS) bbox = _map.get("bbox", None) if bbox: self.set_bounds_from_bbox(bbox, projection) else: self.set_bounds_from_center_and_zoom( self.center_x, self.center_y, self.zoom) if self.projection is None or self.projection == '': self.projection = projection if self.uuid is None or self.uuid == '': self.uuid = str(uuid.uuid1()) def source_for(layer): try: return conf["sources"][layer["source"]] except Exception: if 'url' in layer: return {'url': layer['url']} else: return {} layers = [lyr for lyr in _map.get("layers", [])] layer_names = set(lyr.alternate for lyr in self.local_layers) self.layer_set.all().delete() self.keywords.add(*_map.get('keywords', [])) for ordering, layer in enumerate(layers): self.layer_set.add( layer_from_viewer_config( self.id, MapLayer, layer, source_for(layer), ordering )) self.save(notify=True) if layer_names != set(lyr.alternate for lyr in self.local_layers): map_changed_signal.send_robust(sender=self, what_changed='layers') return template_name def keyword_list(self): keywords_qs = self.keywords.all() if keywords_qs: return [kw.name for kw in keywords_qs] else: return [] def get_absolute_url(self): return reverse('map_detail', None, [str(self.id)]) @property def embed_url(self): return reverse('map_embed', kwargs={'mapid': self.pk}) def get_bbox_from_layers(self, layers): """ Calculate the bbox from a given list of Layer objects bbox format: [xmin, xmax, ymin, ymax] """ bbox = None for layer in layers: layer_bbox = layer.bbox if bbox is None: bbox = list(layer_bbox[0:4]) else: bbox[0] = min(bbox[0], layer_bbox[0]) bbox[1] = max(bbox[1], layer_bbox[1]) bbox[2] = min(bbox[2], layer_bbox[2]) bbox[3] = max(bbox[3], layer_bbox[3]) return bbox def create_from_layer_list(self, user, layers, title, abstract): self.owner = user self.title = title self.abstract = abstract self.projection = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:3857') self.zoom = 0 self.center_x = 0 self.center_y = 0 if self.uuid is None or self.uuid == '': self.uuid = str(uuid.uuid1()) DEFAULT_MAP_CONFIG, DEFAULT_BASE_LAYERS = default_map_config(None) _layers = [] for layer in layers: if not isinstance(layer, Layer): try: layer = Layer.objects.get(alternate=layer) except ObjectDoesNotExist: raise Exception( f'Could not find layer with name {layer}') if not user.has_perm( 'base.view_resourcebase', obj=layer.resourcebase_ptr): # invisible layer, skip inclusion or raise Exception? logger.error( 'User %s tried to create a map with layer %s without having premissions' % (user, layer)) else: _layers.append(layer) # Set bounding box based on all layers extents. # bbox format: [xmin, xmax, ymin, ymax] bbox = self.get_bbox_from_layers(_layers) self.set_bounds_from_bbox(bbox, self.projection) # Save the map in order to create an id in the database # used below for the maplayers. self.save() if _layers and len(_layers) > 0: index = 0 for layer in _layers: MapLayer.objects.create( map=self, name=layer.alternate, ows_url=layer.get_ows_url(), stack_order=index, visibility=True ) index += 1 # Save again to persist the zoom and bbox changes and # to generate the thumbnail. self.set_missing_info() self.save(notify=True) @property def sender(self): return None @property def class_name(self): return self.__class__.__name__ @property def is_public(self): """ Returns True if anonymous (public) user can view map. """ from guardian.shortcuts import get_anonymous_user user = get_anonymous_user() return user.has_perm( 'base.view_resourcebase', obj=self.resourcebase_ptr) @property def layer_group(self): """ Returns layer group name from local OWS for this map instance. """ if check_ogc_backend(geoserver.BACKEND_PACKAGE): from geonode.geoserver.helpers import gs_catalog, ogc_server_settings lg_name = f'{slugify(self.title)}_{self.id}' try: return { 'catalog': gs_catalog.get_layergroup(lg_name), 'ows': ogc_server_settings.ows } except Exception: return { 'catalog': None, 'ows': ogc_server_settings.ows } else: return None @deprecated(version='2.10.1', reason="APIs have been changed on geospatial service") def publish_layer_group(self): """ Publishes local map layers as WMS layer group on local OWS. """ if check_ogc_backend(geoserver.BACKEND_PACKAGE): from geonode.geoserver.helpers import gs_catalog from geoserver.layergroup import UnsavedLayerGroup as GsUnsavedLayerGroup else: raise Exception( 'Cannot publish layer group if geonode.geoserver is not in INSTALLED_APPS') # temporary permission workaround: # only allow public maps to be published if not self.is_public: return 'Only public maps can be saved as layer group.' map_layers = MapLayer.objects.filter(map=self.id) # Local Group Layer layers and corresponding styles layers = [] lg_styles = [] for ml in map_layers: if ml.local: layer = Layer.objects.get(alternate=ml.name) style = ml.styles or getattr(layer.default_style, 'name', '') layers.append(layer) lg_styles.append(style) lg_layers = [lyr.name for lyr in layers] # Group layer bounds and name lg_bounds = [str(coord) for coord in self.bbox] lg_name = f'{slugify(self.title)}_{self.id}' # Update existing or add new group layer lg = self.layer_group if lg is None: lg = GsUnsavedLayerGroup( gs_catalog, lg_name, lg_layers, lg_styles, lg_bounds) else: lg.layers, lg.styles, lg.bounds = lg_layers, lg_styles, lg_bounds gs_catalog.save(lg) return lg_name class Meta(ResourceBase.Meta): pass class MapLayer(models.Model, GXPLayerBase): """ The MapLayer model represents a layer included in a map. This doesn't just identify the dataset, but also extra options such as which style to load and the file format to use for image tiles. """ map = models.ForeignKey(Map, related_name="layer_set", on_delete=models.CASCADE) # The map containing this layer stack_order = models.IntegerField(_('stack order')) # The z-index of this layer in the map; layers with a higher stack_order will # be drawn on top of others. format = models.TextField( _('format'), null=True, blank=True) # The content_type of the image format to use for tiles (image/png, image/jpeg, # image/gif...) name = models.TextField(_('name'), null=True) # The name of the layer to load. store = models.TextField(_('store'), null=True) # The interpretation of this name depends on the source of the layer (Google # has a fixed set of names, WMS services publish a list of available layers # in their capabilities documents, etc.) opacity = models.FloatField(_('opacity'), default=1.0) # The opacity with which to render this layer, on a scale from 0 to 1. styles = models.TextField( _('styles'), null=True, blank=True) # The name of the style to use for this layer (only useful for WMS layers.) transparent = models.BooleanField(_('transparent'), default=False) # A boolean value, true if we should request tiles with a transparent # background. fixed = models.BooleanField(_('fixed'), default=False) # A boolean value, true if we should prevent the user from dragging and # dropping this layer in the layer chooser. group = models.TextField(_('group'), null=True, blank=True) # A group label to apply to this layer. This affects the hierarchy displayed # in the map viewer's layer tree. visibility = models.BooleanField(_('visibility'), default=True) # A boolean value, true if this layer should be visible when the map loads. ows_url = models.URLField(_('ows URL'), null=True, blank=True) # The URL of the OWS service providing this layer, if any exists. layer_params = models.TextField(_('layer params')) # A JSON-encoded dictionary of arbitrary parameters for the layer itself when # passed to the GXP viewer. # If this dictionary conflicts with options that are stored in other fields # (such as format, styles, etc.) then the fields override. source_params = models.TextField(_('source params')) # A JSON-encoded dictionary of arbitrary parameters for the GXP layer source # configuration for this layer. # If this dictionary conflicts with options that are stored in other fields # (such as ows_url) then the fields override. local = models.BooleanField(default=False) # True if this layer is served by the local geoserver def layer_config(self, user=None): # Try to use existing user-specific cache of layer config if self.id: cfg = cache.get("layer_config" + str(self.id) + "_" + str(0 if user is None else user.id)) if cfg is not None: return cfg cfg = GXPLayerBase.layer_config(self, user=user) # if this is a local layer, get the attribute configuration that # determines display order & attribute labels if Layer.objects.filter(alternate=self.name).exists(): try: if self.local: layer = Layer.objects.get(store=self.store, alternate=self.name) else: layer = Layer.objects.get( alternate=self.name, remote_service__base_url=self.ows_url) attribute_cfg = layer.attribute_config() if "ftInfoTemplate" in attribute_cfg: cfg["ftInfoTemplate"] = attribute_cfg["ftInfoTemplate"] if "getFeatureInfo" in attribute_cfg: cfg["getFeatureInfo"] = attribute_cfg["getFeatureInfo"] if not user.has_perm( 'base.view_resourcebase', obj=layer.resourcebase_ptr): cfg['disabled'] = True cfg['visibility'] = False except Exception: # shows maplayer with pink tiles, # and signals that there is problem # TODO: clear orphaned MapLayers layer = None if self.id: # Create temporary cache of maplayer config, should not last too long in case # local layer permissions or configuration values change (default # is 5 minutes) cache.set("layer_config" + str(self.id) + "_" + str(0 if user is None else user.id), cfg) return cfg @property def layer_title(self): title = None try: if self.local: if self.store: title = Layer.objects.get( store=self.store, alternate=self.name).title else: title = Layer.objects.get(alternate=self.name).title except Exception: title = None if title is None: title = self.name return title @property def local_link(self): link = None try: if self.local: if self.store: layer = Layer.objects.get( store=self.store, alternate=self.name) else: layer = Layer.objects.get(alternate=self.name) link = f"<a href=\"{layer.get_absolute_url()}\">{layer.title}</a>" except Exception: link = None if link is None: link = f"<span>{self.name}</span> " return link @property def get_legend(self): try: layer_params = json.loads(self.layer_params) capability = layer_params.get('capability', {}) # Use '' to represent default layer style style_name = capability.get('style', '') layer_obj = Layer.objects.filter(alternate=self.name).first() if ':' in style_name: style_name = style_name.split(':')[1] elif layer_obj.default_style: style_name = layer_obj.default_style.name href = layer_obj.get_legend_url(style_name=style_name) style = Style.objects.filter(name=style_name).first() if style: # replace map-legend display name if style has a title style_name = style.sld_title or style_name return {style_name: href} except Exception as e: logger.exception(e) return None class Meta: ordering = ["stack_order"] def __str__(self): return f'{self.ows_url}?layers={self.name}' def pre_delete_map(instance, sender, **kwrargs): ct = ContentType.objects.get_for_model(instance) OverallRating.objects.filter( content_type=ct, object_id=instance.id).delete() remove_object_permissions(instance.get_self_resource()) signals.pre_delete.connect(pre_delete_map, sender=Map) signals.post_save.connect(resourcebase_post_save, sender=Map)
PypiClean
/Hyperion-0.9.10.tar.gz/Hyperion-0.9.10/docs/setup/setup_grid.rst
.. _grid: Coordinate grids and physical quantities ======================================== In general, coordinate grids and density grids are set using methods of the form:: from hyperion.model import Model m = Model() m.set_<grid_type>_grid(...) m.add_density_grid(density, dust) where ``<grid_type>`` is the grid type being used, and ``dust`` is a dust file in HDF5 format specified either by filename, or as a dust object. See :doc:`setup_dust` for more details about creating and using dust files. For example, if you are using a dust file named ``kmh.hdf5``, you can specify this with:: m.add_density_grid(density, 'kmh.hdf5') The ``add_density_grid`` method can be called multiple times if multiple density arrays are needed (for example if different dust sizes have different spatial distributions). .. note:: If you haven't already done so, please make sure you read the :doc:`../important/important` to understand whether to specify dust or dust+gas densities! Optionally, a specific energy distribution can also be specified in ``add_density_grid`` using the ``specific_energy=`` argument:: m.add_density_grid(density, dust, specific_energy=specific_energy) where ``specific_energy`` is given in the same format as ``density`` (see sections below). By default, the specific energy specified is the *initial* specific energy used, and if the number of temperature iterations is not zero (see :ref:`convergence`) this specific energy gets replaced with the self-consistently calculated one in later iterations. If instead you want this specific energy to be *added* to the self-consistently computed one after each iteration, see :ref:`initial_specific_energy`. Hyperion currently supports six types of 3-d grids: * Cartesian grids * Spherical polar grids * Cylindrical polar grids * AMR (Adaptive Mesh Refinement) grids * Octree grids * Voronoi grids The following sections show how the different kinds of grids should be set up. Regular 3-d grids ----------------- Geometry ^^^^^^^^ In the case of the cartesian and polar grids, you should define the wall position in each of the three directions, using cgs units for the spatial coordinates, and radians for the angular coordinates. These wall positions should be stored in one 1-d NumPy array for each dimension, with one element more than the number of cells defined. The walls can then be used to create a coordinate grid using methods of the form ``set_x_grid(walls_1, walls_2, walls_3)``. The following examples demonstrate how to do this for the various grid types * A 10x10x10 cartesian grid from -1pc to 1pc in each direction:: x = np.linspace(-pc, pc, 11) y = np.linspace(-pc, pc, 11) z = np.linspace(-pc, pc, 11) m.set_cartesian_grid(x, y, z) * A 2-d 400x200x1 spherical polar grid with radial grid cells logarithmically spaced between one solar radius and 100AU, and the first grid cell wall located at 0:: r = np.logspace(np.log10(rsun), np.log10(100 * au), 400) r = np.hstack([0., r]) # add cell wall at r=0 theta = np.linspace(0., pi, 201) phi = np.array([0., 2 * pi]) m.set_spherical_polar_grid(r, theta, phi) * A 3-d 100x100x10 cylindrical polar grid with radial grid cells logarithmically spaced between one solar radius and 100AU, and the first grid cell wall located at 0:: w = np.logspace(np.log10(rsun), np.log10(100 * au), 100) w = np.hstack([0., w]) # add cell wall at w=0 z = np.linspace(-10 * au, 10 * au, 101) phi = np.linspace(0, 2 * pi, 11) m.set_cylindrical_polar_grid(w, z, phi) .. note:: Spherical and cylindrical polar grids do not have to start at ``r=0`` or ``w=0``, but you need to make sure that all sources are located inside the grid. For example, if you place a point source at the origin, you will need the first grid cell wall to be at ``r=0`` or ``w=0``. In the above cases, since the grid cell walls are distributed logarithmically, the first grid cell wall has to be added separately, hence the use of ``hstack``, which is used to add a 0 at the start of the array. Density and Specific Energy ^^^^^^^^^^^^^^^^^^^^^^^^^^^ For regular cartesian and polar grids, a 3-d NumPy array containing the density array is required, for example:: m.add_density_grid(np.ones((100,100,100)), 'kmh.hdf5') for a 100x100x100 grid. Due to Numpy array conventions, the dimensions should be specified in reverse order, i.e. ``(n_z, n_y, n_x)`` for a cartesian grid, ``(n_phi, n_theta, n_r)`` for a spherical polar grid, or ``(n_phi, n_z, n_r)`` for a cylindrical polar grid. Note that once you have set the grid geometry on a model, you can access variables that make it easy (if you wish) to set up densities from analytical equations: * ``m.grid.gx``, ``m.grid.gy``, and ``m.grid.gz`` for cartesian grids * ``m.grid.gr``, ``m.grid.gt``, and ``m.grid.gp`` for spherical polar grids * ``m.grid.gw``, ``m.grid.gz``, and ``m.grid.gp`` for cylindrical polar grids These variables are the coordinates of the center of the cells, and each of these variables is a full 3-d array. For example, ``m.grid.gx`` is the x position of the center of *all* the cells, and has the same shape as the density array needs to have. In addition, the ``m.grid.shape`` variable contains the shape of the grid. This makes it easy to use analytical prescriptions for the density. For example, to set up a sphere of dust with radius R in a cartesian grid, you could do:: density = np.zeros(m.grid.shape) density[(m.grid.gx ** 2 + m.grid.gy ** 2 + m.grid.gz ** 2) < R ** 2] = 1. This grid would have a density of 0 outside R, and 1 inside R. Note that of course you should probably be using a spherical polar grid if you want to set up a sphere of dust, but the above example can be applied to more complicated analytical dust structures. AMR grids --------- Geometry ^^^^^^^^ AMR grids have to be constructed using the :class:`~hyperion.grid.AMRGrid` class:: from hyperion.grid import AMRGrid amr = AMRGrid() Levels can be added with:: level = amr.add_level() And grids can be added to a level with:: grid = level.add_grid() Grid objects have the following attributes which should be set: * ``xmin`` - lower x position of the grid * ``xmax`` - upper x position of the grid * ``ymin`` - lower y position of the grid * ``ymax`` - upper y position of the grid * ``zmin`` - lower z position of the grid * ``zmax`` - upper z position of the grid * ``nx`` - number of cells in x direction * ``ny`` - number of cells in y direction * ``nz`` - number of cells in z direction * ``quantities`` - a dictionary containing physical quantities (see below) Once we have an AMR grid object, which we call ``amr`` here, the geometry can be set using:: m.set_amr_grid(amr) The ``quantities`` attribute is unimportant for this step, as long as the geometry is correct. For more details on how to create or read in an AMR object, and for a list of requirements and restrictions on the geometry, see :ref:`amr_indepth`. .. note:: If you load in your simulation data with the `yt <http://yt-project.org/>`_ package, you can make use of the :meth:`~hyperion.grid.AMRGrid.from_yt` method to easily convert the simulation into a Hyperion :class:`~hyperion.grid.AMRGrid` object. Density and Specific Energy ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since AMR grids have a more complex structure than regular 3-d arrays, the density should be added using an :class:`~hyperion.grid.AMRGrid` object. In this case, the ``quantity`` attribute should be set for each grid object. For each physical quantity in the AMR grid, the dictionary should have an entry of the form:: grid.quantities[<quantity>] = quantity_array where ``<quantity>`` is a string containing the name of the quantity (e.g. ``density``) and ``quantity_array`` should be a Numpy array with dimensions ``(grid.nz, grid.ny, grid.nx)`` (see :ref:`amr_indepth` for more details). When calling ``add_density_grid``, the density should be specified as an item of the :class:`~hyperion.grid.AMRGrid` object:: m.add_density_grid(amr_object['density'], dust_file) for example:: m.add_density_grid(amr['density'], 'kmh.hdf5') Specific energies can be specified using the same kinds of objects and using the ``specific_energy`` argument:: m.add_density_grid(amr['density], dust_file, specific_energy=amr['specific_energy']) Note that in this example, the ``amr`` object contains the geometry, the density, and the specific energy (i.e. it is not necessary to create a separate :class:`~hyperion.grid.AMRGrid` object for each one). Octree grids ------------ Geometry ^^^^^^^^ An `Octree <http://en.wikipedia.org/wiki/Octree>`_ is a hierarchical grid format where each cell can be divided into eight children cells. At the top level is a single cell that covers the whole spatial domain being considered. To set up an Octree, the following information is needed: * ``x``, ``y``, ``z`` - the coordinates of the center of the parent cell * ``dx``, ``dy``, ``dz`` - the size of the parent cell * ``refined`` a 1-d sequence of booleans giving the structure of the grid. The ``refined`` sequence contains all the information regarding the hierarchy of the grid, and is described in :ref:`indepth_oct`. Once this sequence is set, the geometry can be set with:: m.set_octree_grid(x, y, z, dx, dy, dz, refined) Density and Specific Energy ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Densities (and optionally specific energies) should be specified in the same manner as the regular grids, but should be specified as a 1-d Numpy array with the same length as the ``refined`` list, where each density value corresponds to the equivalent cell in the ``refined`` list. Density values for cells with ``refined`` set to ``True`` will be ignored, and can be set to zero. .. _voronoi_grid: Voronoi grids ------------- Geometry ^^^^^^^^ A Voronoi grid is based on the concept of 3D `Voronoi diagrams <http://en.wikipedia.org/wiki/Voronoi_diagram>`_. A Voronoi grid is created from a set of user-specified seed points. Each seed point corresponds to a single grid cell, and the cell in which a seed point is located is defined geometrically by the set of all points closer to that seed than to any other. Voronoi cells are always guaranteed to be convex polyhedra. The number and distribution of the seed points are arbitrary (clearly, for best results the values of these two parameters should be chosen following some physical intuition or with a specific goal in mind - e.g., seed points could be more numerous where higher resolution is needed). In order to set up a Voronoi grid, the following information is needed: * ``x``, ``y``, ``z`` - three 1-d Numpy arrays of equal size representing the coordinates of the seed points. The size of these arrays implicitly defines the number of seed points. The geometry can be set with:: m.set_voronoi_grid(x, y, z) Density and Specific Energy ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Densities (and optionally specific energies) should be specified in the same manner as the regular grids, but should be specified as a 1-d Numpy array with the same length as the number of seed points. Each density value in the array refers to the cell containing the corresponding seed point.
PypiClean
/233_misc-0.0.3.tar.gz/233_misc-0.0.3/LICENSE.md
MIT License Copyright (c) 2022 [The_Robin_Hood] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PypiClean