content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
from abc import ABC, abstractmethod from typing import List, Any import logging
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 201, 198, 6738, 19720, 1330, 7343, 11, 4377, 201, 198, 11748, 18931, 201, 198, 201, 198, 201, 198, 201, 198, 201 ]
3.103448
29
# encoding: utf-8 from __future__ import absolute_import, division, print_function import difflib import functools import os import re import sys import tempfile import pkg_resources import py import pytest from _pytest._code.code import ExceptionInfo, TerminalRepr from _pytest.outcomes import skip from hashlib import sha512 pytest_plugins = ["pytester"] _version = pkg_resources.require("pytest-regtest")[0].version.split(".") __version__ = tuple(map(int, _version)) del _version IS_PY3 = sys.version_info.major == 3 IS_WIN = sys.platform == "win32" if IS_PY3: open = functools.partial(open, encoding="utf-8") from io import StringIO else: from cStringIO import StringIO from string import ljust """ the function below is from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python """ textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f}) _converters_pre = [] _converters_post = [] def pytest_addoption(parser): """Add options to control the timeout plugin""" group = parser.getgroup("regtest", "regression test plugin") group.addoption( "--regtest-reset", action="store_true", help="do not run regtest but record current output", ) group.addoption( "--regtest-tee", action="store_true", default=False, help="print recorded results to console too", ) group.addoption( "--regtest-regard-line-endings", action="store_true", default=False, help="do not strip whitespaces at end of recorded lines", ) group.addoption( "--regtest-nodiff", action="store_true", default=False, help="do not show diff output for failed regresson tests", ) tw = py.io.TerminalWriter() @pytest.fixture @pytest.hookimpl(hookwrapper=True)
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 814, 8019, 198, 11748, 1257, 310, 10141, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, ...
2.568336
739
#!/usr/bin/env python # Moves the text content of MathML <annotation> elements into an # attribute in XML files, thus removing the annotations from the file # text content. # This is a component in a pipeline to convert PMC NXML files into # text and standoffs. The whole pipeline can be run as # # python rewritetex.py FILE.xml -s | python rewritemmla.py -s | python rewriteu2a.py - -s | python respace.py - -s | python standoff.py - FILE.{txt,so} from __future__ import with_statement import sys import os import re import codecs from lxml import etree as ET # XML tag to use for elements whose text content has been rewritten # by this script. REWRITTEN_TAG = 'n2t-mmla' # XML attribute to use for storing the original text and tag of # rewritten elements ORIG_TAG_ATTRIBUTE = 'orig-tag' ORIG_TEXT_ATTRIBUTE = 'orig-text' INPUT_ENCODING="UTF-8" OUTPUT_ENCODING="UTF-8" ########## def rewrite_element(e, s): """ Given an XML tree element e and a string s, stores the original text content of the element in an attribute and replaces it with the string, further changing the tag to relect the change. """ # check that the attributes that will be used don't clobber # anything for a in (ORIG_TAG_ATTRIBUTE, ORIG_TEXT_ATTRIBUTE): assert a not in e.attrib, "rewritemmla: error: attribute '%s' already defined!" % a # store original text content and tag as attributes e.attrib[ORIG_TEXT_ATTRIBUTE] = e.text if e.text is not None else '' e.attrib[ORIG_TAG_ATTRIBUTE] = e.tag # swap in the new ones e.text = s e.tag = REWRITTEN_TAG # that's all return True if __name__ == "__main__": sys.exit(main(sys.argv))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 38213, 262, 2420, 2695, 286, 16320, 5805, 1279, 1236, 14221, 29, 4847, 656, 281, 198, 2, 11688, 287, 23735, 3696, 11, 4145, 10829, 262, 37647, 422, 262, 2393, 198, 2, 2420, 2...
2.843072
599
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import unicode_literals from time import time from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy.types import Enum from schematizer.models.base_model import BaseModel from schematizer.models.database import Base from schematizer.models.database import session from schematizer.models.exceptions import EntityNotFoundError class MetaAttributeMappingStore(Base, BaseModel): """This table stores all the mappings of meta attribute registered for each entity. The entities can be Namespace or Source. This table is a source of truth for all mappings currently active in the Data Pipeline. Rows are modified in this table by hitting the meta_attribute_mappings endpoints for each entity. However this table is not used for enforcing the meta attributes in messages. This information is present in SchemaMetaAttributeMapping. """ __tablename__ = 'meta_attribute_mapping_store' id = Column(Integer, primary_key=True) # The name of the entity type, can be namespace or source. entity_type = Column( Enum( MetaAttributeEntity.NAMESPACE, MetaAttributeEntity.SOURCE, name='entity_type' ), nullable=False ) # Id of the entity specified in the entity_type attribute. entity_id = Column(Integer, nullable=False) # The schema_id of the meta attribute to be mapped. meta_attr_schema_id = Column(Integer, ForeignKey('avro_schema.id')) # Timestamp when the entry is created created_at = Column( Integer, nullable=False, default=lambda: int(time()) ) # Timestamp when the entry is last updated updated_at = Column( Integer, nullable=False, default=lambda: int(time()), onupdate=lambda: int(time()) ) @classmethod
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 1584, 44628, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428...
3.12716
810
from django.test import TestCase from django.contrib.auth.models import User from what_apps.people.models import GenericParty from django.test.simple import DjangoTestSuiteRunner from django.conf import settings def test_user_factory(number_of_users_to_create): ''' pseudo factory. takes an integer. returns a list of users. Obviously, don't use this to make production users. Dominick. ''' users = [] while len(users) < number_of_users_to_create: user = User.objects.create(username='test_user_%s' % len(users)) user.set_password('password') user.save() users.append(user) return users
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 644, 62, 18211, 13, 15332, 13, 27530, 1330, 42044, 33553, 198, 6738, 42625, 14208, 13, 9288, 13, 3643...
2.576779
267
import pandas as pd import numpy as np from tqdm import tqdm from tqdm.notebook import tqdm as tqdm_notebook from sklearn.base import clone import time import warnings from ..dataset.helpers import verbose_print from ..pipeline.helpers import get_mean_fis from sklearn.utils import Bunch from scipy.stats import t from pandas.util._decorators import doc from .stats_helpers import corrected_std, compute_corrected_ttest from sklearn.metrics._scorer import (_PredictScorer, _ProbaScorer, _ThresholdScorer) from .helpers import clean_str from copy import deepcopy import pickle as pkl from matplotlib import cm from matplotlib import colors from numpy.random import default_rng import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics._scorer import _MultimetricScorer _base_docs = {} _base_docs['dataset'] = """dataset : :class:`Dataset` The instance of :class:`Dataset` originally passed to :func:`evaluate`. .. note:: If a different dataset is passed, then unexpected behavior may occur. """ # @TODO # 1. Store permutation FI's in object after call # 2. Add methods for plot feature importance's ? # TODO - function to easily export saved results in different formats. class EvalResults(): '''This class is returned from calls to :func:`evaluate`, and can be used to store information from evaluate, or compute additional feature importances. It should typically not be initialized by the user.''' # Add verbose print _print = verbose_print @property def estimator(self): '''This parameter stores the passed saved, unfitted estimator used in this evaluation. This is a sklearn style estimator obtained from :func:`get_estimator`.''' return self._estimator @estimator.setter @property def mean_scores(self): '''This parameter stores the mean scores as a dictionary of values, where each dictionary is indexed by the name of the scorer, and the dictionary value is the mean score for that scorer.''' return self._mean_scores @mean_scores.setter @property def std_scores(self): '''This parameter stores the standard deviation scores as a dictionary of values, where each dictionary is indexed by the name of the scorer, and value contains the standard deviation across evaluation folds for that scorer. The default scorer key stores the micro standard deviation, but in the case that macro standard deviation differs, i.e., in the case of multiple repeats in an evaluation, then a separate macro standard deviation will be stored under the name of the scorer with _macro appended to the key. For example if a 3-fold twice repeated evaluation was run, with just r2 as the scorer, this parameter might look something like: :: self.std_scores = {'r2': .5, 'r2_macro': .01} ''' return self._std_scores @std_scores.setter @property def weighted_mean_scores(self): '''This property stores the mean scores across evaluation folds (simmilar to :data:`mean_scores<EvalResults.mean_scores>`), but weighted by the number of subjects / datapoints in each fold. It is scores as a dictionary indexed by the name of the scorer as the key, where values are the weighted mean score. ''' return self._weighted_mean_scores @weighted_mean_scores.setter @property def scores(self): '''This property stores the scores for each scorer as a dictionary of lists, where the keys are the names of the scorer and the list represents the score obtained for each fold, where each index corresponds to to a fold of cross validation.''' return self._scores @scores.setter @property def score(self): '''This property represents a quick helper for accessing the mean scores of whatever the first scorer is (in the case of multiple scorers). ''' first_scorer = list(self.mean_scores)[0] return self.mean_scores[first_scorer] @property def ps(self): '''A saved and pre-processed version of the problem_spec used (with any extra_params applied) when running this instance of Evaluator.''' return self._ps @ps.setter @property def feat_names(self): '''The features names corresponding to any measures of feature importance, stored as a list of lists, where the top level list represents each fold of cross validation. This parameter may be especially useful when pipeline objects such as transformers or feature selectors are used as these can drastically change the features passed to an eventual model. The values stored here may change based on the passed value of the `decode_feat_names` parameter from :func:`evaluate`. For example the feat_names from a 3-fold cross-validation with input features ['feat1', 'feat2', 'feat3'] with feature selection as a piece of the pipeline may look like: :: self.feat_names = [['feat1', 'feat2'], ['feat2', 'feat3'], ['feat1', 'feat2']] ''' return self._feat_names @feat_names.setter @property def val_subjects(self): '''| This parameter stores the validation subjects / index used in every fold of the cross-validation. It can be useful in some cases to check to see exactly what cross-validation was applied. | This parameter differs from :data:`all_val_subjects<EvalResults.all_val_subjects>` in that even subjects with missing target values are not included. ''' return self._val_subjects @val_subjects.setter @property def train_subjects(self): '''| This parameter stores the training subjects / index used in every fold of the cross-validation. It can be useful in some cases to check to see exactly what cross-validation was applied. | This parameter differs from :data:`all_train_subjects<EvalResults.all_train_subjects>` in that even subjects with missing target values are not included. ''' return self._train_subjects @train_subjects.setter @property def all_val_subjects(self): '''| This parameter stores the validation subjects / index used in every fold of the cross-validation. | This parameter differs from :data:`val_subjects<EvalResults.val_subjects>` in that even subjects with missing target values are included. ''' return self._all_val_subjects @all_val_subjects.setter @property def all_train_subjects(self): '''| This parameter stores the training subjects / index used in every fold of the cross-validation. | This parameter differs from :data:`train_subjects<EvalResults.train_subjects>` in that even subjects with missing target values are included. ''' return self._all_train_subjects @all_train_subjects.setter @property def n_subjects(self): '''A quicker helper property to get the sum of the length of :data:`train_subjects<EvalResults.train_subjects>` and :data:`val_subjects<EvalResults.val_subjects>`. If this number varies by fold, it will be set to None. This number is supposed to represent the number of subjects with non NaN targets used in the training and testing. ''' lens = [len(self.train_subjects[i]) + len(self.val_subjects[i]) for i in range(len(self.train_subjects))] if len(set(lens)) == 1: return lens[0] return None @property def n_folds(self): '''A quicker helper property to get the number of CV folds this object was evaluated with. ''' # Just use len of train subjects as proxy for n_folds return len(self.train_subjects) @property def timing(self): '''This property stores information on the fit and scoring times, if requested by the original call to :func:`evaluate`. This parameter is a dictionary with two keys, 'fit' and 'score'. Each key stores the time in seconds as a list of values for each of the evaluation folds. ''' return self._timing @timing.setter @property def mean_timing(self): '''This property stores information on the fit and scoring times, if requested by the original call to :func:`evaluate`. This parameter is a dictionary with two keys, 'fit' and 'score'. Each key stores the mean time in seconds across folds. ''' return self._mean_timing @mean_timing.setter @property def preds(self): '''If the parameter `store_preds` is set to True when calling :func:`evaluate`, then this parameter will store the predictions from every evaluate fold. The parameter preds is a dictionary, where raw predictions made can be accessed by the key 'predict'. Values are stored as list corresponding to each evaluation fold. In the case where other predict-like functions are available, e.g., in the case of a binary problem, where it may be desirable to see the predicted probability, then the those predictions will be made available under the name of the underlying predict function. In this case, that is self.preds['predict_proba']. It will also store results from 'predict' as well. self.preds also will store under 'y_true' a list, where each element of the list corresponds to the corresponding true target values for the predictions made. ''' return self._preds @preds.setter @property def estimators(self): '''If the parameter `store_estimators` is set to True when calling :func:`evaluate`, then this parameter will store the fitted estimator in a list. Where each element of the list corresponds to one of the validation folds. For example to access the fitted estimator from this first fold :: first_est = self.estimators[0] ''' return self._estimators @estimators.setter @property def cv(self): '''If set to store CV is true, a deepcopy of the passed cv splitter will be stored''' try: return self._cv except AttributeError: return None @cv.setter def _evaluate(self, X, y, cv): '''cv is passed as raw index, X and y as dataframes.''' # Store a deep copy of cv if requested if self._store_cv: self.cv = deepcopy(cv) # Compute and warn about num nan targets n_nan_targets = pd.isnull(y).sum() if n_nan_targets > 0: self._print('Warning: There are', str(n_nan_targets) + ' missing', 'targets passed to evaluate. Subjects with missing', 'target values will be skipped during training and ' 'scoring.') if self.preds is not None: self._print('Predictions will still be made for any', 'subjects with missing values in', 'any validation folds.') # Verbose info self._print('Predicting target =', str(self.ps.target), level=1) self._print('Using problem_type =', str(self.ps.problem_type), level=1) self._print('Using scope =', str(self.ps.scope), '(defining a total of', str(X.shape[1]), 'features).', level=1) self._print(f'Evaluating {len(X)} total data points.', level=1) # Init scores as dictionary of lists self.scores = {scorer_str: [] for scorer_str in self.ps.scorer} # Save train and test subjs self.all_train_subjects, self.all_val_subjects = [], [] self.train_subjects, self.val_subjects = [], [] # Save final feat names self.feat_names = [] # Init progress bar / save and compute fold info from cv progress_bars = self._init_progress_bars(cv) self._print('Using CV: ', cv, 'to generate evaluation splits.', level=2) self._print(level=1) # Run each split for train_inds, val_inds in cv.split(X, y): # Eval self._eval_fold(X.iloc[train_inds], y.iloc[train_inds], X.iloc[val_inds], y.iloc[val_inds]) # Increment progress bars progress_bars = self._incr_progress_bars(progress_bars) # Clean up progress bars self._finish_progress_bars(progress_bars) # Compute and score mean and stds self._compute_summary_scores() def get_preds_dfs(self, drop_nan_targets=False): '''This function can be used to return the raw predictions made during evaluation as a list of pandas Dataframes. Parameters ------------ drop_nan_targets : bool, optional If False (default), then this method will return the DataFrame of predictions including targets with NaN. To skip these, e.g., in this case of plotting against ground truth or computing new metrics, set to True. :: default = False Returns --------- dfs : list of pandas.DataFrame list of dataframe's per fold, where each DataFrame contains predictions made. ''' dfs = [] # For each fold for fold_indx in range(len(self.all_val_subjects)): # Init df df = pd.DataFrame(index=self.all_val_subjects[fold_indx]) # Add each predict type as a column for predict_type in self.preds: ps = self.preds[predict_type][fold_indx] # Either float or multi-class case if isinstance(ps[0], (float, np.floating)): df[predict_type] = ps else: for cls in range(len(ps[0])): df[predict_type + '_' + str(cls)] = ps[:, cls] # Drop nan-cols if not requested if drop_nan_targets: nan_targets = df[df['y_true'].isna()].index df = df.drop(nan_targets) # Add to by fold list dfs.append(df) return dfs @property def feature_importances_(self): '''This property stores the mean values across fitted estimators assuming each fitted estimator has a non empty `feature_importances_` attribute.''' self._estimators_check() return get_mean_fis(self.estimators, 'feature_importances_') def get_feature_importances(self): '''This function returns each `feature_importances_` value across fitted estimators. If None have this parameter, it will return a list of None. Returns -------- feature_importances : list A list of `feature_importances_` where each element in the list refers to a fold from the evaluation. ''' self._estimators_check() return [estimator.feature_importances_ for estimator in self.estimators] @property def coef_(self): '''This attribute represents the mean `coef_` as a numpy array across all folds. This parameter will only be available if all estimators have a non null `coef_` parameter and each returns the same shape. See `fis_` for a more flexible version of this parameter that can handle when there are differing numbers of features.''' self._estimators_check() return get_mean_fis(self.estimators, 'coef_') def get_coefs(self): '''This function returns each `coef_` value across fitted estimators. If None have this parameter, it will return a list of None. Returns -------- coefs : list A list of `coef_` where each element in the list refers to a fold from the evaluation. ''' self._estimators_check() return [estimator.coef_ for estimator in self.estimators] @property def fis_(self): '''This property stores the mean value across each fold of the CV for either the `coef_` or `feature_importance_` parameter. Warnings --------- If a feature is not present in all folds, then it's mean value will be computed from only the folds in which it was present. When using transformers, for example one hot encoder, since the encoding is done on the fly, there is no guarantee that 'one hot encoder category_1' is actually the same category 1 across folds. If for some reason some folds have a model with feature importances and other `coef_` they will still all be averaged together, so make sure that this parameter is only used when all of the underlying models across folds should have comparable feature importances. ''' # @TODO incoperate in information about the original # class names here // maybe in specific objects like # OneHotEncoder. self._estimators_check() # Grab fis as Dataframe or list of fis = self.get_fis() # Base case if isinstance(fis, pd.DataFrame): return fis.mean() # Categorical case return [fi.mean() for fi in fis] def get_fis(self, mean=False, abs=False): '''This method will return a pandas DataFrame with each row a fold, and each column a feature if the underlying model supported either the `coef_` or `feature_importance_` parameters. In the case that the underlying feature importances or `coefs_` are not flat, e.g., in the case of a one versus rest categorical model, then a list multiple DataFrames will be returned, one for each class. The order of the list will correspond to the order of classes. Parameters ----------- mean : bool, optional If True, return the mean value across evaluation folds as a pandas Series. Any features with a mean value of 0 will also be excluded. Otherwise, if default of False, return raw values for each fold as a Dataframe. :: default = False abs : bool, optional If the feature importances should be absolute values or not. :: default = False Returns -------- fis : pandas DataFrame or Series Assuming mean=False, the a pandas DataFrame where each row contains the feature importances from an evaluation fold (unless the underlying feature importances are categorical, in which a list of DataFrames will be returned.) If mean=True, then a pandas Series (or in the case of underlying categorical feature importances, list of) will be returned, with the mean value from each fold and all features with a value of 0 excluded. Note: To get the mean values without zero's excluded, just call .mean() on the result of this method with mean=False. ''' fis = self._get_base_fis_list() base = fis_to_df(fis) # Proc. abs arg if abs: if isinstance(base, list): base = [b.abs() for b in base] else: base = base.abs() # If not mean, return as is if not mean: return base # Categorical mean case if isinstance(base, list): return [mean_no_zeros(b) for b in base] # Base mean case return mean_no_zeros(base) def get_inverse_fis(self, fis=None): '''Try to inverse transform stored feature importances (either beta weights or automatically calculated feature importances) to their original space. .. warning:: If there are any underlying non-recoverable transformations in the pipeline, this method will fail! For example, if a PCA was applied, then a reverse transformation cannot be computed. This method can be especially helpful when using :class:`Loader`. Returns ------- inverse_fis : list of pandas Series | The inverse feature importances will be returned as a list, where each index of the list refers to a fold of the cross-validation, and each element of the list is either a pandas Series or a list of pandas Series (in the case of a categorical problem type where separate feature importances were calculated for each class). | If a :class:`Loader` was used, the returned Series may contain multi-dimensional arrays instead of scalar values, representing feature importances as transformed back into the original loaded space / shape. ''' # As list of series or list of list of series if fis is None: fis = self._get_base_fis_list() # If passed in df format, convert first # Drop any NaN also ~ # @ TODO handle categorical case ... elif isinstance(fis, pd.DataFrame): fis = [fis.loc[i].dropna() for i in range(len(fis))] # Otherwise, assumes passed inv_trans_fis = [] for i, fi in enumerate(fis): # The estimator for this fold estimator = self.estimators[i] # Non-categorical case if isinstance(fi, pd.Series): inv_trans_fis.append( estimator.inverse_transform_fis(fi)) # Categorical case else: cat_inv_fis =\ [estimator.inverse_transform_fis(f) for f in fi] inv_trans_fis.append(cat_inv_fis) return inv_trans_fis @doc(dataset=_base_docs['dataset']) def permutation_importance(self, dataset=None, n_repeats=10, scorer='default', just_model=True, nested_model=True, return_as='dfs', n_jobs=1, random_state='default'): '''This function computes the permutation feature importances from the base scikit-learn function :func:`sklearn.inspection.permutation_importance` Parameters ----------- {dataset} | If left as default=None, then will try to use a shallow copy of the dataset passed to the original evaluate call (assuming evaluate was run with store_data_ref=True). :: default = None n_repeats : int, optional The number of times to randomly permute each feature. :: default = 10 scorer : sklearn-style scoring, optional Scorer to use. It can be a single sklearn style str, or a callable. If left as 'default' will use the first scorer defined when evaluating the underlying estimator. :: default = 'default' just_model : bool, optional When set to true, the permutation feature importances will be computed using the final set of transformed features as passed when fitting the base model. This is reccomended behavior because it means that the features do not need to be re-transformed through the full pipeline to evaluate each feature. If set to False, will permute the features in the original feature space (which may be useful in some context). :: default = True nested_model : bool, optional In the case that `just_model` is set to True, there exists in some cases the further option to use an even more transformed set of features. For example, in the case where in the main pipeline the final estimator is another pipeline, there could be more static transformations applied in this second pipeline. If `nested_model` is set to True, then it will attempt to apply these further nested transformations in the same way as with just_model, feeding in eventually an even further transformed set of features and even more specific final estimator when calculating the permutation feature importances. By default, this value is True, so the calculated feature importances here will correspond to the saved `self.feat_names` in this object. :: default = True return_as : ['dfs', 'raw'], optional This parameter controls if calculated permutation feature importances should be returned as a DataFrame with column names as the corresponding feature names, or if it should be returned as a list with the raw output from each fold, e.g., sklearn Batch's with parameters 'importances_mean', 'importances_std' and 'importances'. If return as DataFrame is requested, then 'importances_mean' and 'importances_std' will be returned, but not the raw 'importances'. :: default = 'dfs' n_jobs : int, optional The number of jobs to use for this function. Note that if the underlying estimator supports multiple jobs during inference (predicting), and the original problem_spec was set with multiple n_jobs then that original behavior will still hold, and you may wish to keep this parameter as 1. On the otherhand, if the base estimator does not use multiple jobs, passing a higher value here could greatly speed up computation. :: default = 1 random_state : int, 'default' or None, optional Pseudo-random number generator to control the permutations of each feature. If left as 'default' then use the random state defined during the initial evaluation of the model. Otherwise, you may pass an int for a different fixed random state or None for explicitly no random state. :: default = 'default' ''' # @TODO in case of just_model = False, won't pass along # transform_index correctly to scorer. from sklearn.inspection import permutation_importance # Check dataset dataset = self._dataset_check(dataset) # Check estimators self._estimators_check() # If default scorer, take the first one if scorer == 'default': first = list(self.ps.scorer)[0] scorer = self.ps.scorer[first] self._print('Using scorer:', first, level=1) # If default random_state use the one saved in # original problem spec. if random_state == 'default': random_state = self.ps.random_state # Get X and y from saved problem spec X, y = dataset.get_Xy(self.ps) # For each estimator all_fis, all_feat_names = [], [] for fold, estimator in enumerate(self.estimators): # Get correct estimator, X_val, y_val and feat_names estimator, X_val, y_val, feat_names =\ self._get_val_fold_Xy(estimator, X_df=X, y_df=y, fold=fold, just_model=just_model, nested_model=nested_model) all_feat_names.append(feat_names) # Run the sklearn feature importances. fis = permutation_importance(estimator, X_val, y_val, scoring=scorer, n_repeats=n_repeats, n_jobs=n_jobs, random_state=random_state) # Add to all fis all_fis.append(fis) # If raw, return as raw if return_as == 'raw': return all_fis # Otherwise, use return df mean_series, std_series = [], [] for fis, feat_names in zip(all_fis, all_feat_names): mean_series.append( fi_to_series(fis['importances_mean'], feat_names)) std_series.append( fi_to_series(fis['importances_std'], feat_names)) # Return as sklearn bunch of DataFrames return Bunch(importances_mean=fis_to_df(mean_series), importances_std=fis_to_df(std_series)) @doc(dataset=_base_docs['dataset']) def get_X_transform_df(self, dataset=None, fold=0, subjects='tr', nested_model=True, trans_y=False): '''This method is used as a helper for getting the transformed input data for one of the saved models run during evaluate. Parameters ----------- {dataset} | If left as default=None, then will try to use a shallow copy of the dataset passed to the original evaluate call (assuming evaluate was run with store_data_ref=True). :: default = None fold : int, optional The corresponding fold of the trained estimator to use. subjects : 'tr', 'val' or :ref:`Subjects`, optional The subjects data in which to return. As either special strings 'tr' for train subjects in the corresponding fold. Special str 'val' for the validation subjects in the selected for or lastly any valid :ref:`Subjects` style input. :: default = 'tr' nested_model : bool, optional In the case where the final estimator is itself a nested pipeline, the user may want to apply any of those transformations too. If passed as True, then these transformed features will apply to the fitted estimators `self._nested_final_estimator`, which may not be the same a the base `self._final_estimator`. Note: In the case of some complex nested ensemble, this method may break. :: default = False trans_y : bool, optional Can optionally try to tranform y along with X, this is experimental designed to work with samplers. Default is False, as not 100% confident will work correctly in all cases. default = False Returns ---------- X_trans_df : pandas DataFrame The transformed features in a DataFrame, according to the saved estimator from a fold, for the specified subjects. If kept as the default of subjects == 'tr', then these represent the feature values as passed to trained the actual model component of the pipeline. ''' # Check dataset dataset = self._dataset_check(dataset) # This method requires that the fitted estimators # were saved. self._estimators_check() # Estimator from the fold estimator = self.estimators[fold] if subjects == 'tr': subjects = self.train_subjects[fold] elif subjects == 'val': subjects = self.val_subjects[fold] # Get as X dataframe, since passing df don't need to worry # about transform_index X_fold, y_fold = dataset.get_Xy(problem_spec=self.ps, subjects=subjects) # Get feature names from fold if nested_model: feat_names = self.feat_names[fold] # If not using nested_model, need to re-calculate else: feat_names = estimator.transform_feat_names(X_fold, encoders=self.encoders_, nested_model=False) # Trans y experimental case if trans_y: X_trans_fold, y_trans_fold, transform_index =\ estimator.transform(X_fold, nested_model=nested_model, trans_y=y_fold) X_trans_df = pd.DataFrame(X_trans_fold, columns=feat_names, index=transform_index) y_series = pd.Series(y_trans_fold, index=transform_index) return X_trans_df, y_series # Transform the data up to right before it gets passed to the # final model X_trans_fold = estimator.transform(X_fold, nested_model=nested_model) # Put the data in a dataframe with associated feature names, and index then return return pd.DataFrame(X_trans_fold, columns=feat_names, index=X_fold.index) def compare(self, other, rope_interval=[-0.01, 0.01]): '''This method is designed to perform a statistical comparison between the results from the evaluation stored in this object and another instance of :class:`EvalResults`. The statistics produced here are explained in: https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_stats.html .. note:: In the case that the sizes of the training and validation sets at each fold vary dramatically, it is unclear if this statistics are still valid. In that case, the mean train size and mean validation sizes are employed when computing statistics. Parameters ------------ other : :class:`EvalResults` Another instance of :class:`EvalResults` in which to compare which. The cross-validation used should be the same in both instances, otherwise statistics will not be generated. rope_interval : list or dict of | This parameter allows for passing in a custom region of practical equivalence interval (or rope interval) a concept from bayesian statistics. If passed as a list, this should be a list with two elements, describing the difference in score which should be treated as two models or runs being practically equivalent. | Alternatively, in the case of multiple underlying scorers / metrics. A dictionary, where keys correspond to scorer / metric names can be passed with a separate rope_interval for each. For example: :: rope_interval = {'explained_variance': [-0.01, 0.01], 'neg_mean_squared_error': [-1, 1]} This example would define separate rope regions depending on the metric. :: default = [-0.01, 0.01] Returns ------- compare_df : pandas DataFrame | The returned DataFrame will generate separate rows for all overlapping metrics / scorers between the evaluators being compared. Further, columns with statistics of interest will be generated: - 'mean_diff' The mean score minus other's mean score - 'std_diff' The std minus other's std | Further, only in the case that the cross-validation folds are identical between the comparisons, the following additional columns will be generated: - 't_stat' Corrected paired ttest statistic. - 'p_val' The p value for the corrected paired ttest statistic. - 'better_prob' The probability that this evaluated option is better than the other evaluated option under a bayesian framework and the passed value of rope_interval. See sklearn example for more details. - 'worse_prob' The probability that this evaluated option is worse than the other evaluated option under a bayesian framework and the passed value of rope_interval. See sklearn example for more details. - 'rope_prob' The probability that this evaluated option is equivalent to the other evaluated option under a bayesian framework and the passed value of rope_interval. See sklearn example for more details. ''' equal_cv = True # Make sure same number of folds if len(self.train_subjects) != len(other.train_subjects): equal_cv = False # Make sure subjects from folds line up for fold in range(len(self.train_subjects)): if not np.array_equal(self.train_subjects[fold], other.train_subjects[fold]): equal_cv = False if not np.array_equal(self.val_subjects[fold], other.val_subjects[fold]): equal_cv = False # Only compute for the overlapping metrics overlap_metrics = set(list(self.mean_scores)).intersection(set( list(other.mean_scores))) for metric in overlap_metrics: if np.array_equal(self.scores[metric], other.scores[metric]): raise RuntimeError( f'Cannot compare as scores are identical for {metric}.') # Init difference dataframe dif_df = pd.DataFrame(index=list(overlap_metrics)) # Add base differences for metric in overlap_metrics: dif_df.loc[metric, 'mean_diff'] =\ self.mean_scores[metric] - other.mean_scores[metric] dif_df.loc[metric, 'std_diff'] =\ self.std_scores[metric] - other.std_scores[metric] # Only compute p-values if equal cv if equal_cv: for metric in overlap_metrics: # Grab scores and other info scores1 = np.array(self.scores[metric]) scores2 = np.array(other.scores[metric]) differences = scores1 - scores2 n = len(scores1) df = n - 1 # Use the mean train / test size n_train = np.mean([len(ti) for ti in self.train_subjects]) n_test = np.mean([len(ti) for ti in self.val_subjects]) # Frequentist Approach t_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test) dif_df.loc[metric, 't_stat'] = t_stat dif_df.loc[metric, 'p_val'] = p_val # Bayesian t_post = t(df, loc=np.mean(differences), scale=corrected_std(differences, n_train, n_test)) # Passed as either list of two values or dict if isinstance(rope_interval, dict): ri = rope_interval[metric] else: ri = rope_interval worse_prob = t_post.cdf(ri[0]) better_prob = 1 - t_post.cdf(ri[1]) rope_prob =\ t_post.cdf(ri[1]) - t_post.cdf(ri[0]) # Add to dif_df dif_df.loc[metric, 'better_prob'] = better_prob dif_df.loc[metric, 'worse_prob'] = worse_prob dif_df.loc[metric, 'rope_prob'] = rope_prob return dif_df @doc(dataset=_base_docs['dataset']) def subset_by(self, group, dataset=None, decode_values=True): '''Generate instances of :class:`EvalResultsSubset` based on subsets of subjects based on different unique groups. This method is used to analyze results as broken down by the different unique groups of a column in the passed :class:`Dataset`. Note that the train subjects in resulting breakdown will not change, that only the validation sets will be split by group. Parameters ------------ group : str The name of a column within the passed dataset that defines the different subsets of subjects. This column must be categorical and have no missing values. {dataset} | If left as default=None, then will try to use a shallow copy of the dataset passed to the original evaluate call (assuming evaluate was run with store_data_ref=True). :: default = None decode_values : bool If the original values of the group column were encoded via a :class:`Dataset` function, this if True, this function will try to represent values by their original name rather than the name used internally. If False, then the internal ordinal number value will be used. :: default = True Returns --------- subsets : dict of :class:`EvalResultsSubset` | Returns a dictionary of :class:`EvalResultsSubset`, where keys are generated as a representation of the value stored for each unique group. If decode_values is True, then these values are the original names otherwise they are the internal names. | Saved under each key is an instance of :class:`EvalResultsSubset`, which can be treated the same as an instance of :class:`EvalResults`, except it has a subset of values for val_subjects, and different preds and scores representing this subset. ''' from .compare import compare_dict_from_existing # Check dataset dataset = self._dataset_check(dataset) if self.preds is None: raise RuntimeError('store_preds must have been set ' 'to True to use this function.') subsets = {} # Make sure exists, is categorical and no NaN dataset._validate_group_key(group, name='group') # Get the values for just this column values = dataset._get_values(group, decode_values=decode_values) # Add a subset for each set of values for value in values.unique(): subset_name = clean_str(f'{group}={value}') # Get all subjects with this value subjs = values[values == value].index # Get evaluator subset subsets[clean_str(value)] =\ EvalResultsSubset(self, subjs, subset_name=subset_name) # Return as compare dict, so we have access to the summary function return compare_dict_from_existing(subsets) def to_pickle(self, loc): '''Quick helper to save as pickle. Parameters ----------- loc : str The location in which to save the results object. ''' with open(loc, 'wb') as f: pkl.dump(self, f) @doc(dataset=_base_docs['dataset']) def run_permutation_test(self, n_perm=100, dataset=None, random_state=None, blocks=None, within_grp=True, plot=False): '''Compute signifigance values for the original results according to a permutation test scheme. In this setup, we estimate the null model by randomly permuting the target variable, and re-evaluating the same pipeline according to the same CV. In this manner, a null distribution of size `n_perm` is generated in which we can compare the real, unpermuted results to. Note: If using a custom scorer, w/ no sign_ attribute, this method will assume that higher values for metrics are better. Parameters ------------ n_perm : int, optional The number of permutations to test. :: default = 100 {dataset} | If left as default=None, then will try to use a shallow copy of the dataset passed to the original evaluate call (assuming evaluate was run with store_data_ref=True). :: default = None random_state : int, or None, optional Pseudo-random number generator to control the permutations of each feature. If left as None, then initialize a new random state for each permutation. :: default = None blocks : None, array, pd.Series or pd.DataFrame, optional This parameter is only available when the neurotool library is installed. See: https://github.com/sahahn/neurotools This parameter represents the underlying exchangability-block structure of the data passed. It is also used to constrain the possible permutations in some way. See PALM's documentation for an introduction on how to format ExchangeabilityBlocks: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM/ExchangeabilityBlocks This parameter accepts the same style input as PALM, except it is passed here as an array or DataFrame instead of as a file. The main requirement is that the shape of the structure match the number of subjects / data points in the first dimension. :: default = None within_grp : bool, optional This parameter is only relevant when a permutation structure / blocks is passed, in that case it describes how the left-most exchanability / permutation structure column should act. Specifically, if True, then it specifies that the left-most column should be treated as groups to act in a within group swap only manner. If False, then it will consider the left-most column groups to only be able to swap at the group level with other groups of the same size. :: default = True plot : bool, optional Can optionally add a plot visualizing the true result in comparison to the generated null distribution. :: default = False Returns ------------ p_values : dict of float A dictionary, as indexed by all of the valid metrics, with the computed p-values. p_scores : dict of array The null distribution, as indexed by all of the valid metrics, of scores. ''' # Check dataset dataset = self._dataset_check(dataset) # Make sure cv is saved if self.cv is None: raise RuntimeError('The original call to evaluate must have had store_cv set to True to use this method.') # Init rng rng = default_rng(random_state) # X stays the same X, _ = dataset.get_Xy(self.ps) p_scores = {} for _ in range(n_perm): # Get the random seed for this permutation try: rng_integers = rng.integers except AttributeError: rng_integers = rng.randint rs = rng_integers(147483648) # Get permuted y y_perm = dataset._get_permuted_y(self.ps, random_state=rs, blocks=blocks, within_grp=within_grp) # Init silent copy to eval with p_eval = EvalResults(estimator=self.estimator, ps=self.ps, encoders=self.encoders_, progress_bar=False, store_preds=False, store_estimators=False, store_timing=False, store_cv=False, store_data_ref=False, eval_verbose=-2, progress_loc=None, mute_warnings=False, compare_bars=None) # Evaluate p_eval._eval(X, y_perm, cv=deepcopy(self.cv)) # Extract scores and add to baseline for metric in p_eval.mean_scores: try: p_scores[metric].append(p_eval.mean_scores[metric]) except KeyError: p_scores[metric] = [p_eval.mean_scores[metric]] # Convert to p-values p_values, null_dist_means, null_dist_stds = {}, {}, {} for metric in p_scores: # Sort actual w/ null dist actual = self.mean_scores[metric] base = np.vstack(p_scores[metric] + [actual]) sorted_base = np.sort(base, axis=0) # Get ind in sorted ind = np.where(sorted_base == actual)[0][0] # Compute p-value, if no info on higher better, # e.g., custom scorer, then we just assume higher better. higher_better = True if hasattr(self.ps.scorer[metric], '_sign'): higher_better = bool(self.ps.scorer[metric]._sign) # Use version based on if higher better if higher_better: p_values[metric] = (n_perm - ind + 1) / (n_perm + 1) else: p_values[metric] = (ind + 1) / (n_perm + 1) # Add means and stds null_dist_means[metric] = np.mean(p_scores[metric]) null_dist_stds[metric] = np.std(p_scores[metric]) # Optionally make plot if plot: if len(p_scores) == 1: n_rows, n_cols = 1, 1 else: n_rows, n_cols = (len(p_scores) // 2) + (len(p_scores) % 2), 2 # Init sub plots _, ax = plt.subplots(n_rows, n_cols, figsize=(n_cols * 8, n_rows * 6)) # Plot each metric for row in range(n_rows): for col in range(n_cols): if len(p_scores) == 1: a = ax elif n_rows == 1: a = ax[col] elif n_cols == 1: a = ax[row] else: a = ax[row][col] # Get current metric try: metric = list(p_scores)[col + (row * n_cols)] except IndexError: continue # Base hist sns.histplot(p_scores[metric], ax=a, kde=True, label=f'Null Dist. (Mean): {null_dist_means[metric]:.3f}') # Add vert line a.axvline(self.mean_scores[metric], color='Red', linewidth=6, label=f'Baseline: {self.mean_scores[metric]:.3f} (pval={p_values[metric]:.3f})') # Add legend + title a.legend() a.set_title(metric) # Return p_values and each of the null results return p_values, p_scores class EvalResultsSubset(EvalResults): '''This class represents a subset of :class:`EvalResults` and is returned as a result of calling :func:`EvalResults.subset_by`. This class specifically updates values for a subset of val_subjects, which mean only the following attributes are re-calculated / will be different from the source :class:`EvalResults` :: val_subjects all_val_subjects preds scores mean_scores weighted_mean_scores ''' # TODO -
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 6738, 256, 80, 36020, 13, 11295, 2070, 1330, 256, 80, 36020, 355, 256, 80, 36020, 62, 11295, 2070, 198, 6738, ...
2.251519
23,700
""" This script contains the function loading data from csv file with or without header. the csv file must has the first column as the timestamps, while the other columns are features. """ from dev.util import logger import numpy as np import pandas as pd import copy import time from typing import Dict def load_csv(file, single_realization=False, categorical=False): ''' :param file: str or pandas.dataframe. string is the location of file. :param single_realization: if the dataset contains multiple realizations, then the first column is the number of realization (seq_no). or if the dataset has only one realization, then the first column will be the timestamps. :return: a dataframe with column name seq_no, timestamp, and features. ''' if isinstance(file, str): logger.info('Start to load from csv file...') df = pd.read_csv(file, index_col=0) elif isinstance(file, pd.DataFrame): df = file.copy() else: raise TypeError('file must be a string or a pandas.dataframe') # TODO: check timestamp is ordered or not. if single_realization: col_names = ['timestamp'] + ['x' + str(i) for i in np.arange(df.shape[1] - 1)] df.columns = col_names df['seq_no'] = np.zeros(df.__len__()) df = df[['seq_no'] + col_names] df = df.sort_values(by='timestamp') else: col_names = ['seq_no', 'timestamp'] + ['x' + str(i) for i in np.arange(df.shape[1] - 2)] df.columns = col_names seq_dict_keys = list(df['seq_no'].drop_duplicates()) seq_dict_values = np.arange(seq_dict_keys.__len__()) seq_dict = dict(zip(seq_dict_keys, seq_dict_values)) df['seq_no'] = df['seq_no'].replace(seq_dict) df = df.sort_values(by=['seq_no', 'timestamp']) if categorical: category_keys = list(df['x0'].drop_duplicates()) category_values = np.arange(category_keys.__len__()) cate_dict = dict(zip(category_keys, category_values)) df['x0'] = df['x0'].replace(cate_dict) df.index = np.arange(df.__len__()) return df
[ 37811, 198, 1212, 4226, 4909, 262, 2163, 11046, 1366, 422, 269, 21370, 2393, 351, 393, 1231, 13639, 13, 198, 1169, 269, 21370, 2393, 1276, 468, 262, 717, 5721, 355, 262, 4628, 395, 9430, 11, 981, 262, 584, 15180, 389, 3033, 13, 198, ...
2.510689
842
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pipeline to load appengine applications into Inventory.""" from google.cloud.security.common.data_access import project_dao as proj_dao from google.cloud.security.common.util import log_util from google.cloud.security.common.util import parser from google.cloud.security.inventory.pipelines import base_pipeline LOGGER = log_util.get_logger(__name__) class LoadAppenginePipeline(base_pipeline.BasePipeline): """Load all AppEngine applications for all projects.""" RESOURCE_NAME = 'appengine' def _retrieve(self): """Retrieve AppEngine applications from GCP. Get all the projects in the current snapshot and retrieve the AppEngine applications for each. Returns: dict: Mapping projects with their AppEngine applications: {project_id: application} """ projects = ( proj_dao .ProjectDao(self.global_configs) .get_projects(self.cycle_timestamp)) apps = {} for project in projects: app = self.safe_api_call('get_app', project.id) if app: apps[project.id] = app return apps def _transform(self, resource_from_api): """Create an iterator of AppEngine applications to load into database. Args: resource_from_api (dict): AppEngine applications, keyed by project id, from GCP API. Yields: iterator: AppEngine applications in a dict. """ for project_id, app in resource_from_api.iteritems(): yield {'project_id': project_id, 'name': app.get('name'), 'app_id': app.get('id'), 'dispatch_rules': parser.json_stringify( app.get('dispatchRules', [])), 'auth_domain': app.get('authDomain'), 'location_id': app.get('locationId'), 'code_bucket': app.get('codeBucket'), 'default_cookie_expiration': app.get( 'defaultCookieExpiration'), 'serving_status': app.get('servingStatus'), 'default_hostname': app.get('defaultHostname'), 'default_bucket': app.get('defaultBucket'), 'iap': parser.json_stringify(app.get('iap', {})), 'gcr_domain': app.get('gcrDomain'), 'raw_application': parser.json_stringify(app)} def run(self): """Run the pipeline.""" apps = self._retrieve() loadable_apps = self._transform(apps) self._load(self.RESOURCE_NAME, loadable_apps) self._get_loaded_count()
[ 2, 15069, 2177, 383, 27325, 316, 72, 4765, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11...
2.395214
1,379
#!/usr/bin/env python # -*- coding: utf-8 -*- """ providing trait objects with an id """ from collections import Hashable import uuid import traitlets as trait import ipywidgets as widgets from matplotlib import colors import pandas as pd class HashableType(trait.TraitType): """ Examples -------- >>> hash = HashableType() >>> hash.validate(object, 1) 1 >>> hash.validate(object, (1,1,1)) (1, 1, 1) >>> try: ... hash.validate(object, {'a':1}) ... print('validated') ... except: ... print('not validated') not validated """ info_text = ('a value that is hashable') default_value = 1 class IDObject(widgets.Widget): """ an object with an id """ id = HashableType() groups = trait.List(trait=trait.CUnicode(),default_value=("all",), help='the groups that this object belongs to') other_info = trait.CUnicode('',help='other information about the object as HTML').tag(sync=True) @trait.default('id') def get_object_trait_names(self): """ get trait names which are only associated with the object, i.e. not from the ipywidgets base class """ base_ipywidget_traits = set(widgets.Widget().trait_names()) all_traits = set(self.trait_names()) return list(all_traits.difference(base_ipywidget_traits)) def trait_series(self): """ create pandas.Series of objects traits Examples -------- >>> obj = IDObject(id=1,other_info='test') >>> obj.trait_series() groups (all,) id 1 other_info test dtype: object """ trait_dict = {} for name in self.get_object_trait_names(): value = getattr(self, name) # might break series if cell value is a list value = tuple(value) if isinstance(value, list) else value trait_dict[name] = value return pd.Series(trait_dict) # def _repr_latex_(self): # """ # """ # return self.trait_series().to_latex() def __repr__(self): """ visualising in jupyter notebook """ return self.trait_series().to_string() class Color(trait.TraitType): """ a trait type that validates a color_like value: hex str, rgb/rgba tuple (0 to 1) or valid html name Examples -------- >>> color = Color() >>> color.validate(object, (1,1,1)) (1, 1, 1) >>> color.validate(object, 'red') 'red' >>> color.validate(object, '#ff0000') '#ff0000' >>> try: ... color.validate(object, 1) ... print('validated') ... except: ... print('not validated') not validated """ info_text = ('a color_like value: ' 'hex str, rgb/rgba tuple (0 to 1) or valid html name') default_value = 'red' class Vector3(trait.TraitType): """ converts numpy arrays """ info_text = 'a 3d vector' default_value = (0.,0.,0.) class GeometricObject(IDObject): """ a geometric object x,y,z should represent the centre of volume Examples -------- >>> gobject = GeometricObject() >>> gobject.position (0.0, 0.0, 0.0) """ position = Vector3(default_value=(0,0,0),help='cartesian coordinate of pivot').tag(sync=True) visible = trait.Bool(True).tag(sync=True) color = Color('red').tag(sync=True) transparency = trait.CFloat(1,min=0.0,max=1.0).tag(sync=True) label = trait.CUnicode('-').tag(sync=True).tag(sync=True) label_visible = trait.Bool(False).tag(sync=True) label_color = Color('red').tag(sync=True) label_transparency = trait.CFloat(1,min=0.0,max=1.0).tag(sync=True) def default_viewmap(label_height=None): """ a wrapper to signal that all subclass attributes should be directly linked to the default view mapping Properties ---------- label_height : None or str the attribute to link to label height if None, no label is created """ return decorator @default_viewmap('radius') class Sphere(GeometricObject): """ a spherical object Examples -------- >>> sphere = Sphere() >>> sphere.position (0.0, 0.0, 0.0) >>> sphere.radius 1.0 >>> try: ... sphere.radius = -1 ... except Exception as err: ... print(err) The value of the 'radius' trait of a Sphere instance should not be less than 0.0, but a value of -1.0 was specified """ radius = trait.CFloat(1,min=0.).tag(sync=True) # TODO orientation of default geometries @default_viewmap('height') class Box(GeometricObject): """ a spherical object Examples -------- >>> object = Box() >>> object.position (0.0, 0.0, 0.0) """ width = trait.CFloat(1).tag(sync=True) height = trait.CFloat(1).tag(sync=True) depth = trait.CFloat(1).tag(sync=True) @default_viewmap('radius') class Octahedron(GeometricObject): """ a spherical object Examples -------- >>> object = Circle() >>> object.position (0.0, 0.0, 0.0) """ radius = trait.CFloat(1).tag(sync=True) detail = trait.CFloat(0).tag(sync=True) @default_viewmap('radius') class Icosahedron(GeometricObject): """ a spherical object Examples -------- >>> object = Circle() >>> object.position (0.0, 0.0, 0.0) """ radius = trait.CFloat(1).tag(sync=True) detail = trait.CFloat(0).tag(sync=True) @default_viewmap('radius') class Circle(GeometricObject): """ a spherical object Examples -------- >>> object = Circle() >>> object.position (0.0, 0.0, 0.0) """ radius = trait.CFloat(1).tag(sync=True) segments = trait.CFloat(36).tag(sync=True) class Plane(GeometricObject): """ a plane object Examples -------- >>> object = Plane() >>> object.position (0.0, 0.0, 0.0) """ normal = Vector3(default_value=(0,0,1),help='the normal vector of the plane').tag(sync=True) width = trait.CFloat(1,min=0.0).tag(sync=True) @trait.validate('normal') class Line(GeometricObject): """ a line object Examples -------- >>> line = Line() >>> line.position (0.0, 0.0, 0.0) >>> line.end (1.0, 1.0, 1.0) >>> try: ... line.linewidth = -1 ... except Exception as err: ... print(err) The value of the 'linewidth' trait of a Line instance should not be less than 0.0, but a value of -1.0 was specified """ end = Vector3(default_value=(1,1,1),help='cartesian coordinate of line end').tag(sync=True) end_color = Color('red').tag(sync=True) linewidth = trait.CFloat(1,min=0.0).tag(sync=True) # TDOD only development version of PlainGeometry exposes face colors class TriclinicSolid(GeometricObject): """ a wireframe object Examples -------- >>> box = TriclinicSolid() >>> box.position (0.0, 0.0, 0.0) >>> box.a (1.0, 0.0, 0.0) >>> try: ... box.pivot = '' ... except Exception as err: ... print(err) pivot must be at the centre or corner """ a = Vector3(default_value=(1,0,0),help='box vector a').tag(sync=True) b = Vector3(default_value=(0,1,0),help='box vector b').tag(sync=True) c = Vector3(default_value=(0,0,1),help='box vector c').tag(sync=True) pivot = trait.CUnicode('centre',help='pivot about centre or corner').tag(sync=True) @trait.validate('pivot') class TriclinicWire(TriclinicSolid): """ a wireframe object Examples -------- >>> box = TriclinicWire() >>> box.position (0.0, 0.0, 0.0) >>> box.a (1.0, 0.0, 0.0) >>> try: ... box.linewidth = '' ... except: ... print('not valid') not valid """ linewidth = trait.CFloat(1).tag(sync=True) # TODO Gimbal: add labels at end of each vector class Gimbal(GeometricObject): """ a gimbal object pointing to basis vectors default: a red, b green, c orange, Examples -------- >>> gimbal = Gimbal() >>> gimbal.position (0.0, 0.0, 0.0) >>> gimbal.a (1.0, 0.0, 0.0) >>> gimbal.a_color 'red' >>> try: ... gimbal.linewidth = '' ... except: ... print('not valid') not valid """ a = Vector3(default_value=(1,0,0),help='vector a').tag(sync=True) b = Vector3(default_value=(0,1,0),help='vector b').tag(sync=True) c = Vector3(default_value=(0,0,1),help='vector c').tag(sync=True) a_color = Color('red').tag(sync=True) b_color = Color('green').tag(sync=True) c_color = Color('orange').tag(sync=True) linewidth = trait.CFloat(1,min=0.0).tag(sync=True)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 4955, 16708, 5563, 351, 281, 4686, 198, 198, 37811, 198, 6738, 17268, 1330, 21059, 540, 198, 11748, 334, 27...
2.264165
3,971
import click import mlflow import logging logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s") logger = logging.getLogger() @click.command(help="This program registers a trained model .") @click.option("--mlflow_run_id", default=None, help="This is the mlflow run id") @click.option("--registered_model_name", default="dl_finetuned_model", help="This is the registered model name.") @click.option("--pipeline_run_name", default="chapter04", help="This is the mlflow run name.") if __name__ == '__main__': task()
[ 11748, 3904, 198, 11748, 285, 1652, 9319, 198, 11748, 18931, 198, 198, 6404, 2667, 13, 35487, 16934, 7, 5715, 28, 6404, 2667, 13, 10778, 11, 5794, 2625, 4, 7, 292, 310, 524, 13219, 1314, 82, 4064, 7, 20500, 8, 82, 4943, 198, 6404, ...
2.881443
194
import re import svgwrite from collections import OrderedDict from datetime import datetime, timezone from dateutil.relativedelta import relativedelta from subprocess import check_output from config import configuration from svgwrite import Drawing from svgwrite.shapes import Rect, Circle from svgwrite.text import Text from svgwrite_title import Title if __name__ == "__main__": import sys name = sys.argv[1] if len(sys.argv) > 2 else "history.svg" create_timeline(name)
[ 11748, 302, 198, 11748, 38487, 70, 13564, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 640, 11340, 198, 6738, 3128, 22602, 13, 2411, 265, 1572, 12514, 1330, 48993, 1572, 12514, 198, 6738, 85...
2.971264
174
import spacy nlp = spacy.load("en") text = """Natural Language Toolkit, or more commonly NLTK, is a suite of libraries and programs for symbolic and statistical natural language processing (NLP) for English written in the Python programming language. It was developed by Steven Bird and Edward Loper in the Department of Computer and Information Science at the University of Pennsylvania.""" text = nlp(text) sent_tokenize = (list(text.sents)) for sent in sent_tokenize: print (sent)
[ 11748, 599, 1590, 198, 21283, 79, 796, 599, 1590, 13, 2220, 7203, 268, 4943, 198, 5239, 796, 37227, 35364, 15417, 16984, 15813, 11, 393, 517, 8811, 22879, 51, 42, 11, 318, 257, 18389, 286, 12782, 290, 4056, 329, 18975, 290, 13905, 328...
4.024793
121
# -*- python -*- # This software was produced by NIST, an agency of the U.S. government, # and by statute is not subject to copyright in the United States. # Recipients of this software assume all responsibilities associated # with its operation, modification and maintenance. However, to # facilitate maintenance we ask that before distributing modified # versions of this software, you first contact the authors at # oof_manager@nist.gov. # Collect information to send as an error report from ooflib.SWIG.common import switchboard from ooflib.SWIG.image import oofimage3d as oofimage from ooflib.common import debug from ooflib.common import installationLog from ooflib.common.IO import filenameparam from ooflib.common.IO import mainmenu from ooflib.common.IO import oofmenu from ooflib.common.IO import parameter from ooflib.common.IO import reporter import os import os.path import platform import shutil import sys import tarfile import tempfile import time #goes through the reporter messages and collects the python log messages # Parses through python log to find files that were inputted/outputed into/out of OOF # and seperates those file names based on if they are input, output or have been deleted # tars a directory with all the error report files # end openReportError errorreportermenu = mainmenu.OOF.Help.addItem(oofmenu.OOFMenuItem( 'Report_Error', callback= openReportError, params=[ filenameparam.WriteFileNameParameter( 'filename',tip="Name of the report file."), parameter.BooleanParameter( 'pythonlog', True), parameter.BooleanParameter( 'pythonlogandoutputs', True), parameter.BooleanParameter( 'installationlog', True), parameter.ListOfStringsParameter( 'inputfiles', tip="Names of input files"), parameter.ListOfStringsParameter( 'outputfiles', tip="Names of output files"), parameter.ListOfStringsParameter( 'deletedfiles', tip="Names of input/output files no longer on the system"), parameter.StringParameter('comments')], no_log=1, help ="Choose information to send as an error report")) errorreportermenu.addItem(oofmenu.OOFMenuItem( 'Add_Input_File', callback=addInputFile, params=[filenameparam.FileOrDirectoryParameter( 'filename', ident="add", tip="Name of the input file or directory.")], ellipsis=1, no_log=1, help="Add an input file."))
[ 2, 532, 9, 12, 21015, 532, 9, 12, 198, 198, 2, 770, 3788, 373, 4635, 416, 399, 8808, 11, 281, 4086, 286, 262, 471, 13, 50, 13, 1230, 11, 198, 2, 290, 416, 14195, 318, 407, 2426, 284, 6634, 287, 262, 1578, 1829, 13, 198, 2, 3...
3.021687
830
# Traveling Salesman # # Author: Maxwell Rahmani # Date: September 18, 2020 # Version: 1.0.0 # Python Version: 2.7.16 # Description: # ------------------------------------------------------------------------------ # Imports import sys from src.Model import * from src.Environment import * from src.Hyperparameters import * if __name__ == '__main__': main(sys.argv[1:])
[ 2, 13524, 278, 17329, 805, 198, 2, 198, 2, 6434, 25, 28276, 46611, 72, 198, 2, 7536, 25, 2693, 220, 1248, 11, 12131, 198, 2, 10628, 25, 352, 13, 15, 13, 15, 198, 2, 11361, 10628, 25, 362, 13, 22, 13, 1433, 198, 2, 12489, 25, ...
3.481481
108
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 628 ]
3.833333
6
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-11-15 19:00 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 13, 18, 319, 1584, 12, 1157, 12, 1314, 678, 25, 405, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 19...
2.73913
69
# -*- coding: utf-8 -*- """ test temporary saving of stm files """ from stg4x.helper import tempdir from stg4x.stm import PulseFile
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 9288, 8584, 8914, 286, 336, 76, 3696, 198, 37811, 198, 6738, 336, 70, 19, 87, 13, 2978, 525, 1330, 20218, 15908, 198, 6738, 336, 70, 19, 87, 13, 301, 76,...
2.673469
49
# Lista de la compra. # # Este ejercicio consiste en la digitalización de una lista de la compra totalmente funcional. # Esta lista de la compra será una aplicación de consola basada en menus. # # En caso que el usuario de una opcion no valida, se debera informar que la opción no es valida # y volver a mostrar actual. # # Ejemplo Menu principal. # # > Bienvenid@ a la lista de la compra. Que quieres hacer: # > 0. Salir # > 1. Ver lista de la compra # > 2. Agregar producto a la lista # > 3. Modificar producto de la lista # > 4. Quitar producto de la lista # < # # El usuario podrá introducir el numero correspondiente a la acción que quiera realizar. # # Funcionalidades: ## 0. Salir ### Si el usuario elige salir la aplicación debe preguntar si esta seguro que desea salir. ### En caso que diga que si, debe cerrarse ### Ejemplo: ### > Esta seguro que desea salir? ### > 0. Si ### > 1. No ### < ## ## 1. Ver lista de la compra ### Si el usuario selecciona esta opcion se deben mostrar todos los productos que haya introducido ### o modificado previamente seguido del menu principal ### ### Ejemplo: ### < 1 ### > Tomates ### > Patatas ### > Pañales ### > ### > ...Bienvenid@ a la ... ### < ## ## 2. Agregar producto a la lista ### Cuando el usuario seleccione esta opcion, la aplicación le pedirá el producto a añadir ### Despues de que el usuario introduzca un producto le notificara que ha sido agregado y el ### numero de productos que hay en la lista ### Despues del mensaje se vuelve a mostrar el menu principal ### ### Ejemplo: ### < 2 ### > Que producto deseas añadir? ### < Sangre de unicornio ### > 'Sangre de unicornio' se agregó a la lista de la compra. hay 4 productos. ### > Bienvenid@ a la ... ### > ... ### < ## ## 3. Modificar producto de la lista ### Cuando El usuario utiliza esta opción. Ve todos los productos que hay en la lista, ### elige cual quiere modificar, y introduce un valor en su lugar. ### Despues de la operación de modificado se muestra el menu principal ### ### Ejemplo: ### < 3 ### > Estos son los productos que hay en la lista. Cuál quieres modificar? ### > 0. Tomates ### > 1. Patatas ### > 2. Pañales ### > 3. Sangre de unicornio ### < 1 ### > Cual es su nuevo valor? ### < Mana del bueno ### > 'Patatas' Se ha modificado por 'Mana del bueno' ### > Bienvenid@ a la ... ### > ... ### < ## ## 4. Quitar producto de la lista ### Cuando El usuario utiliza esta opción. Ve todos los productos que hay en la lista, ### elige cual quiere eliminar. ### Despues de la operación de modificado se muestra el menu principal ### ### Ejemplo: ### < 4 ### > Estos son los productos que hay en la lista. Cuál quieres eliminar? ### > 0. Tomates ### > 1. Mana del bueno ### > 2. Pañales ### > 3. Sangre de unicornio ### < 0 ### > 'Tomates'ha sido eliminado de la lista. Quedan 3 productos. ### > Bienvenid@ a la ... ### > ... ### < print("unica linea") print() print("prints separados") print("linea uno") print("linea dos") print ("linea tres") print() print("triple comilla") print("""linea uno linea dos linea tres """) print() print("special char ") print("linea uno \nlinea dos\nlinea tres") print() print("playing with special chars") print("text with \" quote") print("mi headline\n\tsubhedline1\n\tsubheadline2") print("string with bars \\")
[ 2, 7343, 64, 390, 8591, 552, 430, 13, 201, 198, 2, 220, 201, 198, 2, 412, 4169, 304, 73, 2798, 46441, 3473, 68, 551, 8591, 4875, 528, 32009, 18840, 390, 555, 64, 1351, 64, 390, 8591, 552, 430, 2472, 434, 68, 25439, 1538, 13, 201...
2.527097
1,347
#!/usr/bin/env python from setuptools import setup setup(name='GoodRepoProject', version='1.0', description='Software Engineering Homework Assignment 2b', author='Aditya, Harsh, Ashritha, Abhishek, Akash', author_email='adityajadhavncsu@gmail.com', url='https://github.com/adi2768/GoodRepoProject.git', packages=['GoodRepoProject'], license='MIT' )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 40406, 7, 3672, 11639, 10248, 6207, 78, 16775, 3256, 198, 220, 220, 220, 220, 220, 2196, 11639, 16, 13, 15, 3256, 198, 220, 220, 2...
2.45679
162
""" Sponge Knowledge Base Using knowledge base callbacks. """ from java.util.concurrent.atomic import AtomicBoolean, AtomicInteger from org.openksavi.sponge.examples.util import TestStatus
[ 37811, 201, 198, 4561, 14220, 20414, 7308, 201, 198, 12814, 3725, 2779, 869, 10146, 13, 201, 198, 37811, 201, 198, 201, 198, 6738, 20129, 13, 22602, 13, 1102, 14421, 13, 47116, 1330, 28976, 46120, 13087, 11, 28976, 46541, 201, 198, 6738...
3.338983
59
from django.urls import path,include from . import views urlpatterns = [ path('', views.subject), path('clear', views.clear), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 17256, 198, 6738, 764, 1330, 5009, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 32796, 828, 198, 220, 220, 220, 3108, 10786, 20063, 3256, 5009, 13, ...
2.934783
46
import time from enum import IntEnum import numpy as np from modelhub import onnx as onnx_models from xlib import os as lib_os from xlib.face import FRect from xlib.image import ImageProcessor from xlib.mp import csw as lib_csw from xlib.python import all_is_not_None from .BackendBase import (BackendConnection, BackendDB, BackendHost, BackendSignal, BackendWeakHeap, BackendWorker, BackendWorkerState, BackendFaceSwapInfo) DetectorTypeNames = ['CenterFace', 'S3FD', 'YoloV5'] FaceSortByNames = ['@FaceDetector.LARGEST', '@FaceDetector.DIST_FROM_CENTER', '@FaceDetector.LEFT_RIGHT', '@FaceDetector.RIGHT_LEFT', '@FaceDetector.TOP_BOTTOM', '@FaceDetector.BOTTOM_TOP' ]
[ 11748, 640, 198, 6738, 33829, 1330, 2558, 4834, 388, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2746, 40140, 1330, 319, 77, 87, 355, 319, 77, 87, 62, 27530, 198, 6738, 2124, 8019, 1330, 28686, 355, 9195, 62, 418, 198, 6738, ...
2.298507
335
import numpy as np import theano import theano.tensor as T from .. import init from .. import nonlinearities from .base import Layer from theano.sandbox.cuda.basic_ops import gpu_contiguous from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs __all__ = [ "CCLayer", "Conv2DCCLayer", "MaxPool2DCCLayer", "ShuffleBC01ToC01BLayer", "bc01_to_c01b", "ShuffleC01BToBC01Layer", "c01b_to_bc01", "NINLayer_c01b", ] if not theano.config.device.startswith("gpu"): raise ImportError("requires a GPU to work") # TODO: make sure to document the limitations and 'best practices' # (i.e. minibatch size % 128 == 0) # TODO: see if the 'dimshuffle' logic can be put in the base class instead. # base class for all layers that use ops from pylearn2.sandbox.cuda_convnet # TODO: crossmapnorm # from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm # Helper classes for switching between bc01 and c01b input formats class ShuffleBC01ToC01BLayer(Layer): """ This layer dimshuffles 4D input for interoperability between c01b and bc01 ops. bc01 (theano) -> c01b (cuda-convnet) """ bc01_to_c01b = ShuffleBC01ToC01BLayer # shortcut class ShuffleC01BToBC01Layer(Layer): """ This layer dimshuffles 4D input for interoperability between c01b and bc01 ops. c01b (cuda-convnet) -> bc01 (theano) """ c01b_to_bc01 = ShuffleC01BToBC01Layer # shortcut # c01b versions of other Layer classes class NINLayer_c01b(Layer): """ This does the same as lasagne.layers.NINLayer, but operates with c01b axis arrangement instead of bc01. This reduces the number of shuffles and reshapes required and might be faster as a result. """
[ 11748, 299, 32152, 355, 45941, 198, 11748, 262, 5733, 198, 11748, 262, 5733, 13, 83, 22854, 355, 309, 198, 198, 6738, 11485, 1330, 2315, 198, 6738, 11485, 1330, 1729, 29127, 871, 198, 198, 6738, 764, 8692, 1330, 34398, 198, 198, 6738, ...
2.680556
648
import tests.missing_data.test_missing_data_air_passengers_generic as gen gen.test_air_passengers_missing_data('DiscardRow', 'Interpolate')
[ 11748, 5254, 13, 45688, 62, 7890, 13, 9288, 62, 45688, 62, 7890, 62, 958, 62, 6603, 9302, 62, 41357, 355, 2429, 198, 198, 5235, 13, 9288, 62, 958, 62, 6603, 9302, 62, 45688, 62, 7890, 10786, 15642, 446, 25166, 3256, 705, 9492, 16104...
3.065217
46
bread_data = input().split('|') current_energy = 100 current_coins = 100 is_bankrupt = False for bread in bread_data: event, value = bread.split('-') value = int(value) if event == 'rest': if current_energy + value <= 100: current_energy += value else: value = 0 print(f'You gained {value} energy.') print(f'Current energy: {current_energy}.') elif event == 'order': if current_energy >= 30: current_energy -= 30 current_coins += value print(f'You earned {value} coins.') else: current_energy += 50 print('You had to rest!') elif current_coins >= 0 and current_coins > value: current_coins -= value print(f'You bought {event}.') else: print(f'Closed! Cannot afford {event}.') is_bankrupt = True break if not is_bankrupt: print('Day completed!') print(f'Coins: {current_coins}') print(f'Energy: {current_energy}')
[ 29573, 62, 7890, 796, 5128, 22446, 35312, 10786, 91, 11537, 198, 14421, 62, 22554, 796, 1802, 198, 14421, 62, 14624, 796, 1802, 198, 271, 62, 17796, 3622, 796, 10352, 198, 198, 1640, 8509, 287, 8509, 62, 7890, 25, 198, 220, 220, 220, ...
2.245077
457
# Vim startup script written in python. import vim import sys import re # Set this option first. vim.command('set nocompatible') vim.command('let loaded_matchparen = 1') # Clear mappings and auto commands in case we are reloading the file. vim.command('mapclear') vim.command('imapclear') vim.command('autocmd!') # General options # vim.command('execute pathogen#infect()') vim.command('syntax on') # vim.command('filetype plugin indent on') vim.command('set backspace=indent,eol,start') vim.command('set mouse=a') vim.command('set nofoldenable') vim.command('set number') vim.command('set ruler') vim.command('set scrolloff=5') vim.command('set showtabline=2') vim.command('let mapleader="-"') vim.command('set spellfile="~/scripts/vim-spelling/en.utf-8.add"') # Comma separated list of regexes. vim.command(r"let g:netrw_list_hide='\.sw.$,\.pyc$'") # Rainbow Parenthesis options. # vim.command('au VimEnter * RainbowParenthesesToggle') # vim.command('au Syntax * RainbowParenthesesLoadRound') # vim.command('au Syntax * RainbowParenthesesLoadSquare') # vim.command('au Syntax * RainbowParenthesesLoadBraces') # Swap dirs swap_dirs = ['.', '~/.vim/backup'] vim.command('set directory=' + ','.join(swap_dirs)) vim.command('set backupdir=' + ','.join(swap_dirs)) # Search settings vim.command('set hlsearch') vim.command('nohlsearch') vim.command('set ignorecase') vim.command('set incsearch') vim.command('set nowrapscan') vim.command('set smartcase') # Tab settings vim.command('set autoindent') vim.command('set expandtab') vim.command('set shiftwidth=4') vim.command('set smarttab') vim.command('set softtabstop=4') vim.command('set tabstop=4') # Change line number colors in insert mode. vim.command('au InsertEnter * hi LineNr ctermfg=0 ctermbg=darkgreen') vim.command('au InsertLeave * hi LineNr ctermfg=darkgreen ctermbg=8') vim.command('hi LineNr ctermfg=darkgreen ctermbg=8') vim.command('autocmd BufRead * :call RunPy("set_file_type()")') # Set spellcheck highlight color. vim.command('hi clear SpellBad') vim.command('hi SpellBad ctermfg=white ctermbg=darkgreen') def replace_string_contents(): """ Delete the contents of a string literal and go into insert mode. Does not work for string literals spanning more than one line or for Python tripple quoted string literals. """ in_string = [] current_quote = None last_char = '' for char in vim.current.line: if current_quote is None: if char in ['"', "'"]: current_quote = char in_string.append(False) else: if char == current_quote and last_char != '\\': current_quote = None in_string.append(False) else: in_string.append(True) last_char = char _, pos = vim.current.window.cursor if not in_string[pos]: print('not currently in a string literal') else: pos1 = pos while in_string[pos1 - 1]: pos1 -= 1 pos2 = pos while in_string[pos2 + 1]: pos2 += 1 print(vim.current.line[pos1:pos2 + 1]) # 'asdfsd' "weqrwqerqwe" 'as"dfsd' "weqr'wqerqwe" 'as\'dfsd' "weqr\"wqerqwe" # def exec_current_buffer(): # filetype = vim.eval('&filetype') # if filetype != 'python': # sys.stderr.write('Unsupported file type: ' + filetype) # return # script = '\n'.join(vim.current.buffer[:]) # exec script in globals() # def exec_current_block(): # filetype = vim.eval('&filetype') # if filetype != 'python': # sys.stderr.write('Unsupported file type: ' + filetype) # return # lines = vim.current.buffer[:] # # # Find start of block. # L1 = vim.current.window.cursor[0] - 1 # while (lines[L1].startswith(' ') or # lines[L1].startswith('#') or # lines[L1] == ''): # L1 -= 1 # # # Find end of block. # L2 = vim.current.window.cursor[0] # while L2 < len(lines) and (lines[L2].startswith(' ') or # lines[L2].startswith('#') or # lines[L2] == ''): # L2 += 1 # # script = '\n'.join(lines[L1:L2]) # exec script in globals() _ov_toggle = False key_codes = { '<S-F1>': '<Esc>[1;2P', '<S-F2>': '<Esc>[1;2Q', '<S-F3>': '<Esc>[1;2Q', '<S-F4>': '<Esc>[1;2S', '<S-F5>': '<Esc>[15;2~', '<S-F6>': '<Esc>[17;2~', '<S-F7>': '<Esc>[18;2~', '<S-F8>': '<Esc>[19;2~', '<S-F9>': '<Esc>[20~', '<S-F10>': '<Esc>[21~', '<S-F11>': '<Esc>[23~', '<S-F12>': '<Esc>[24~', } nnoremap = map_func('nnoremap') vnoremap = map_func('vnoremap') inoremap = map_func('inoremap') do_keybindings()
[ 2, 36645, 13693, 4226, 3194, 287, 21015, 13, 198, 11748, 43907, 198, 11748, 25064, 198, 11748, 302, 198, 198, 2, 5345, 428, 3038, 717, 13, 198, 31124, 13, 21812, 10786, 2617, 299, 420, 3361, 16873, 11537, 198, 31124, 13, 21812, 10786, ...
2.273295
2,082
#Hand gesture recognition - deep learning project #Importing libraries import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import preprocessing os.chdir("D:/_IRRI-SOUTH ASIA/personal projects/hand_gesture_recognition_project/dataset") #Data Preprocessing train = pd.read_csv("sign_mnist_train.csv") test = pd.read_csv("sign_mnist_test.csv") train_label=train["label"] test_label = test["label"] del train["label"] del test["label"] x = train.values/255 # normalized training set y = test.values/255 # normalized testing set x = x.reshape(-1,28,28,1) y = y.reshape(-1,28,28,1) #label binarizer - encoding labels into categories label_binarizer = preprocessing.LabelBinarizer() train_label = label_binarizer.fit_transform(train_label) test_label = label_binarizer.fit_transform(test_label) ##Function to see the dataset show_image(155) #Builing CNN - deep learning model from keras.models import Sequential from keras.layers import Conv2D, Dense, MaxPool2D, Dropout,BatchNormalization, Flatten from keras.callbacks import ReduceLROnPlateau model = Sequential() model.add(Conv2D(75,(3,3),strides=1,padding='same',activation='relu',input_shape=(128,128,1))) model.add(BatchNormalization()) model.add(MaxPool2D(2,2)) model.add(Conv2D(75,(3,3),strides=1,padding='same',activation='relu')) model.add(MaxPool2D(2,2)) model.add(Conv2D(75,(3,3),strides=1,padding='same',activation='relu')) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(MaxPool2D(2,2)) model.add(Conv2D(75,(3,3),strides=1,padding='same',activation='relu')) model.add(MaxPool2D(2,2)) model.add(Flatten()) model.add(Dense(units=512,activation='relu')) model.add(Dense(units=1,activation='relu')) model.compile(optimizer='adam', loss='mse',metrics=['accuracy']) learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',patience=2,verbose=1,factor=0.5,min_lr=0.00001) #From keras.callback import ReduceLROnPlateau #setting learning rate #Training model model.fit(x,trainy,batch_size=128,epochs=10,validation_data=(y,testy),callbacks=[learning_rate_reduction]) #Evaluating model - checking final accuracy model.evaluate(y,test_label) #Predicting prediction = model.predict_classes(x) for i in range(len(prediction)): if (prediction[i] >= 9 or prediction[i] >=25): prediction[i]+=1 prediction[:10] #Saving model model.save('hand_gesture.h5') #Implementing model on web import streamlit as st st.write('''M''')
[ 2, 12885, 18342, 9465, 532, 2769, 4673, 1628, 628, 198, 2, 20939, 278, 12782, 198, 11748, 28686, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 1...
2.676724
928
import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil
[ 11748, 32660, 9945, 198, 6738, 32660, 67, 1443, 84, 578, 13, 9288, 13, 12501, 273, 2024, 1330, 1635, 198, 6738, 32660, 67, 1443, 84, 578, 13, 9288, 13, 297, 9945, 9288, 1330, 1635, 198, 6738, 32660, 67, 1443, 84, 578, 13, 9288, 1330...
2.625
48
# -*- python -*- from nevow import guard, inevow, appserver from twisted.application import strports, service from codebay.common import logger from codebay.l2tpserver import constants from codebay.l2tpserver.webui import master, website application = service.Application('l2tpserver') webuimaster = master.LiveCdMaster() webuimaster.pre_start() webuiservice = master.LiveCdService(webuimaster) webuiservice.setServiceParent(application) # XXX: limit binding to localhost (no network at the moment -> does not matter) webuisite = website.LiveCdSite(webuimaster) mainsite = webuisite.createMainSite() strports.service(constants.WEBUI_STRPORT_HTTP, mainsite).setServiceParent(application) ###strports.service(constants.WEBUI_STRPORT_HTTPS, mainsite).setServiceParent(application)
[ 2, 532, 9, 12, 21015, 532, 9, 12, 198, 6738, 497, 85, 322, 1330, 4860, 11, 9026, 322, 11, 598, 15388, 198, 6738, 19074, 13, 31438, 1330, 965, 3742, 11, 2139, 198, 198, 6738, 2438, 24406, 13, 11321, 1330, 49706, 198, 198, 6738, 243...
3.123506
251
""" dbarray creators for simple, commonly-used contents. """ import npdb # db = npdb.dbarray((3,3,3), float) # bounds = db[0:3] # print "bounds", bounds # db = npdb.dbarray((10,), float) # bounds = db[2:5] # print "bounds", bounds # bounds = db[:-7] # print "bounds", bounds # bounds = db[1:7:2] # print "bounds", bounds # db = dbarray((5,7), float) # bounds = db[1:5:2,::3] # print "bounds", bounds # bounds = db[...,1] # print "bounds", bounds # a = dbview(np.zeros(shape=(3,3)), db, dims=(0,1), offset=(0,0,0)) # b = dbview(np.ones(shape=(2,2)), db, dims=(0,1), offset=(1,0,0)) # c = dbview([[3,1],[1,5],[2,3]], offset=(1,2)) # d = dbview([[3,1],[1,5]], offset=(6,9)) # print a # print b # print dbview.merge([a,b], fill=2) # c = a.asndarray(copy=True) # c[0,0] = 100 # print c # print a # print type(c) # print type(a)
[ 37811, 198, 67, 5657, 2433, 16294, 329, 2829, 11, 8811, 12, 1484, 10154, 13, 220, 198, 37811, 198, 198, 11748, 45941, 9945, 198, 198, 2, 20613, 796, 45941, 9945, 13, 67, 5657, 2433, 19510, 18, 11, 18, 11, 18, 828, 12178, 8, 198, 2...
2.167959
387
from flask_appbuilder import ModelRestApi from flask_appbuilder.models.sqla.interface import SQLAInterface from . import db, appbuilder from .models import ContactGroup, Gender, Contact db.create_all() fill_gender() appbuilder.add_api(ContactModelApi) appbuilder.add_api(GroupModelApi)
[ 6738, 42903, 62, 1324, 38272, 1330, 9104, 19452, 32, 14415, 198, 6738, 42903, 62, 1324, 38272, 13, 27530, 13, 31166, 5031, 13, 39994, 1330, 16363, 32, 39317, 198, 6738, 764, 1330, 20613, 11, 598, 38272, 198, 6738, 764, 27530, 1330, 1403...
3.241758
91
""" This is a packet of KWS detection, dependent on DNN training part """ import ctypes as ct import numpy as np import wave import math import matplotlib.pyplot as plt import pyaudio import os import sys from scipy.io import wavfile # must have matching versions: llvmlite==0.22 numba==0.36.1 librosa==0.5 import librosa import librosa.display import threading import time from numpy.linalg import norm from SoundSourceLocalization.kws_do_inference import KwsNNet if __name__ == "__main__": # test kws module pwd = os.path.abspath(os.path.abspath(__file__)) father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..") print(father_path) sys.path.append(father_path) # chunk, record_device_name, record_width, channels, rate, format, wav_path, model_path, label_path kws = KwsDetector(1024, "USB Camera-B4.09.24.1", 2, 4, 16000, pyaudio.paInt16, father_path + "/resource/stream_tmp", father_path + "/resource/Pretrained_models/DNN/follow.pb", father_path+"/resource/Pretrained_models/follow_labels.txt") kws.slide_win_loop()
[ 37811, 198, 220, 220, 220, 770, 318, 257, 19638, 286, 509, 19416, 13326, 11, 198, 220, 220, 220, 220, 220, 220, 220, 10795, 319, 360, 6144, 3047, 636, 198, 37811, 198, 198, 11748, 269, 19199, 355, 269, 83, 198, 11748, 299, 32152, 35...
2.635922
412
#! /usr/bin/env python """Helper to run all available tests.""" import sys import test_formatting import test_dir_recursion import test_pylint if __name__ == '__main__': # Each test() call returns True if successful, so only exit with 0 when they # all succeed. sys.exit(not all(( test_formatting.test(), test_dir_recursion.test(), test_pylint.test(), )))
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 47429, 284, 1057, 477, 1695, 5254, 526, 15931, 198, 11748, 25064, 198, 198, 11748, 1332, 62, 18982, 889, 198, 11748, 1332, 62, 15908, 62, 8344, 24197, 198, 11748, 1332, 62, 79...
2.61039
154
""" drivers/__init__.py @copyright: (C) 2012-2015 by D. Brian Kimmel The following terms apply to all files associated with the software unless explicitly disclaimed in individual files. The authors hereby grant permission to use, copy, modify, distribute, and license this software and its documentation for any purpose, provided that existing copyright notices are retained in all copies and that this notice is included verbatim in any distributions. No written agreement, license, or royalty fee is required for any of the authorized uses. Modifications to this software may be copyrighted by their authors and need not follow the licensing terms described here, provided that the new terms are clearly indicated on the first page of each file where they apply. IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ---------------------------------------------------------------------------- These are the drivers for various interfaces with the computer. Serial - going away but it still exists. Serial commands thru a USB dongle work here. USB - the new standard. HID - Human Interface Device class of USB devices. Ethernet - not too much of this is used yet. """ __version_info__ = (1, 6, 0) __version__ = '.' . join(map(str, __version_info__)) VALID_INTERFACES = ['Null', 'Serial', 'USB', 'Ethernet'] VALID_PROTOCOLS = ['TCP', 'UDP', 'Both'] # ## END DBK
[ 37811, 6643, 14, 834, 15003, 834, 13, 9078, 198, 198, 31, 22163, 4766, 25, 357, 34, 8, 2321, 12, 4626, 416, 360, 13, 8403, 46552, 198, 198, 464, 1708, 2846, 4174, 284, 477, 3696, 3917, 198, 351, 262, 3788, 4556, 11777, 28468, 276, ...
3.693578
545
sh_data = { 'imports': [ (0x4949c, 4, 9, '*.recvfrom'), (0x494a0, 4, 9, '*.div'), (0x494a4, 4, 9, '*.fflush'), (0x494a8, 4, 9, '*.lchown'), (0x494ac, 4, 9, '*.statfs'), (0x494b0, 4, 9, '*.atof'), (0x494b4, 4, 9, '*.dup2'), (0x494b8, 4, 9, '*.strcasecmp'), (0x494bc, 4, 9, '*.fgets'), (0x494c0, 4, 9, '*._IO_getc'), (0x494c4, 4, 9, '*.vsnprintf'), (0x494c8, 4, 9, '*.umount2'), (0x494cc, 4, 9, '*.uname'), (0x494d0, 4, 9, '*.setbuf'), (0x494d4, 4, 9, '*.strtoul'), (0x494d8, 4, 9, '*.__xstat'), (0x494dc, 4, 9, '*.fscanf'), (0x494e0, 4, 9, '*.getprotobyname'), (0x494e4, 4, 9, '*.syscall'), (0x494e8, 4, 9, '*.strptime'), (0x494ec, 4, 9, '*.mktime'), (0x494f0, 4, 9, '*.swapoff'), (0x494f4, 4, 9, '*.memset'), (0x494f8, 4, 9, '*.getservbyport'), (0x494fc, 4, 9, '*.__ctype_tolower_loc'), (0x49500, 4, 9, '*.closedir'), (0x49504, 4, 9, '*.putchar'), (0x49508, 4, 9, '*.isatty'), (0x4950c, 4, 9, '*.strchrnul'), (0x49510, 4, 9, '*._exit'), (0x49514, 4, 9, '*.strpbrk'), (0x49518, 4, 9, '*.strchr'), (0x4951c, 4, 9, '*.puts'), (0x49520, 4, 9, '*.getpagesize'), (0x49524, 4, 9, '*.xdr_int'), (0x49528, 4, 9, '*.bind'), (0x4952c, 4, 9, '*.getuid'), (0x49530, 4, 9, '*.abort'), (0x49534, 4, 9, '*.execvp'), (0x49538, 4, 9, '*.islower'), (0x4953c, 4, 9, '*.fseek'), (0x49540, 4, 9, '*.fchown'), (0x49544, 4, 9, '*.cfsetispeed'), (0x49548, 4, 9, '*.strncmp'), (0x4954c, 4, 9, '*.getdomainname'), (0x49550, 4, 9, '*.rand'), (0x49554, 4, 9, '*.sysinfo'), (0x49558, 4, 9, '*.mount'), (0x4955c, 4, 9, '*.strspn'), (0x49560, 4, 9, '*.fputs'), (0x49564, 4, 9, '*.chown'), (0x49568, 4, 9, '*.fcntl'), (0x4956c, 4, 9, '*.lseek'), (0x49570, 4, 9, '*.pututline'), (0x49574, 4, 9, '*.socket'), (0x49578, 4, 9, '*.strcat'), (0x4957c, 4, 9, '*.klogctl'), (0x49580, 4, 9, '*.pivot_root'), (0x49584, 4, 9, '*.fileno'), (0x49588, 4, 9, '*.cfgetospeed'), (0x4958c, 4, 9, '*.sethostname'), (0x49590, 4, 9, '*.getrlimit'), (0x49594, 4, 9, '*.addmntent'), (0x49598, 4, 9, '*.xdr_enum'), (0x4959c, 4, 9, '*.pclose'), (0x495a0, 4, 9, '*.isgraph'), (0x495a4, 4, 9, '*.rmdir'), (0x495a8, 4, 9, '*.getopt_long'), (0x495ac, 4, 9, '*.getgroups'), (0x495b0, 4, 9, '*.unsetenv'), (0x495b4, 4, 9, '*.gethostbyname'), (0x495b8, 4, 9, '*.fnmatch'), (0x495bc, 4, 9, '*.setmntent'), (0x495c0, 4, 9, '*.__libc_start_main'), (0x495c4, 4, 9, '*.sigaction'), (0x495c8, 4, 9, '*.syslog'), (0x495cc, 4, 9, '*.ftruncate'), (0x495d0, 4, 9, '*.isalpha'), (0x495d4, 4, 9, '*.memcpy'), (0x495d8, 4, 9, '*.sigemptyset'), (0x495dc, 4, 9, '*.closelog'), (0x495e0, 4, 9, '*.srand'), (0x495e4, 4, 9, '*.execlp'), (0x495e8, 4, 9, '*.setpgid'), (0x495ec, 4, 9, '*.gai_strerror'), (0x495f0, 4, 9, '*.vsyslog'), (0x495f4, 4, 9, '*.getutent'), (0x495f8, 4, 9, '*.dprintf'), (0x495fc, 4, 9, '*.raise'), (0x49600, 4, 9, '*.unlink'), (0x49604, 4, 9, '*.utime'), (0x49608, 4, 9, '*.clearenv'), (0x4960c, 4, 9, '*.toupper'), (0x49610, 4, 9, '*.umount'), (0x49614, 4, 9, '*.clntudp_create'), (0x49618, 4, 9, '*.scandir'), (0x4961c, 4, 9, '*.clnt_spcreateerror'), (0x49620, 4, 9, '*.tcflush'), (0x49624, 4, 9, '*.kill'), (0x49628, 4, 9, '*.popen'), (0x4962c, 4, 9, '*.longjmp'), (0x49630, 4, 9, '*.ioctl'), (0x49634, 4, 9, '*.signal'), (0x49638, 4, 9, '*.snprintf'), (0x4963c, 4, 9, '*.dirname'), (0x49640, 4, 9, '*.readdir'), (0x49644, 4, 9, '*.tcsetattr'), (0x49648, 4, 9, '*.getopt'), (0x4964c, 4, 9, '*.setsid'), (0x49650, 4, 9, '*.strcmp'), (0x49654, 4, 9, '*.tcsetpgrp'), (0x49658, 4, 9, '*.sendto'), (0x4965c, 4, 9, '*.strrchr'), (0x49660, 4, 9, '*.ctime'), (0x49664, 4, 9, '*.times'), (0x49668, 4, 9, '*.isupper'), (0x4966c, 4, 9, '*.getpid'), (0x49670, 4, 9, '*.crypt'), (0x49674, 4, 9, '*.strdup'), (0x49678, 4, 9, '*.perror'), (0x4967c, 4, 9, '*.write'), (0x49680, 4, 9, '*.getnetbyname'), (0x49684, 4, 9, '*.malloc'), (0x49688, 4, 9, '*.isprint'), (0x4968c, 4, 9, '*.ftello'), (0x49690, 4, 9, '*.vsprintf'), (0x49694, 4, 9, '*.execl'), (0x49698, 4, 9, '*.gettimeofday'), (0x4969c, 4, 9, '*.fchmod'), (0x496a0, 4, 9, '*.gethostname'), (0x496a4, 4, 9, '*.stime'), (0x496a8, 4, 9, '*.strlen'), (0x496ac, 4, 9, '*.symlink'), (0x496b0, 4, 9, '*.regcomp'), (0x496b4, 4, 9, '*.getgid'), (0x496b8, 4, 9, '*.regerror'), (0x496bc, 4, 9, '*.__ctype_toupper_loc'), (0x496c0, 4, 9, '*.sigfillset'), (0x496c4, 4, 9, '*.regfree'), (0x496c8, 4, 9, '*.__cxa_atexit'), (0x496cc, 4, 9, '*.vasprintf'), (0x496d0, 4, 9, '*.sync'), (0x496d4, 4, 9, '*.xdr_u_int'), (0x496d8, 4, 9, '*.setpgrp'), (0x496dc, 4, 9, '*.strstr'), (0x496e0, 4, 9, '*.isalnum'), (0x496e4, 4, 9, '*.__errno_location'), (0x496e8, 4, 9, '*.execve'), (0x496ec, 4, 9, '*.inet_addr'), (0x496f0, 4, 9, '*.mkdtemp'), (0x496f4, 4, 9, '*.hstrerror'), (0x496f8, 4, 9, '*.endmntent'), (0x496fc, 4, 9, '*.getmntent'), (0x49700, 4, 9, '*.setrlimit'), (0x49704, 4, 9, '*.strncpy'), (0x49708, 4, 9, '*.qsort'), (0x4970c, 4, 9, '*.execle'), (0x49710, 4, 9, '*.ftell'), (0x49714, 4, 9, '*.alphasort'), (0x49718, 4, 9, '*.vprintf'), (0x4971c, 4, 9, '*.strtol'), (0x49720, 4, 9, '*.tcgetpgrp'), (0x49724, 4, 9, '*.cfsetospeed'), (0x49728, 4, 9, '*.chroot'), (0x4972c, 4, 9, '*.strcpy'), (0x49730, 4, 9, '*.mkfifo'), (0x49734, 4, 9, '*.isascii'), (0x49738, 4, 9, '*.getcwd'), (0x4973c, 4, 9, '*.__h_errno_location'), (0x49740, 4, 9, '*.atoi'), (0x49744, 4, 9, '*.mkstemp'), (0x49748, 4, 9, '*.clnttcp_create'), (0x4974c, 4, 9, '*.__xmknod'), (0x49750, 4, 9, '*.realpath'), (0x49754, 4, 9, '*.freeaddrinfo'), (0x49758, 4, 9, '*.memchr'), (0x4975c, 4, 9, '*.bsearch'), (0x49760, 4, 9, '*.feof'), (0x49764, 4, 9, '*.sprintf'), (0x49768, 4, 9, '*._IO_putc'), (0x4976c, 4, 9, '*.strncasecmp'), (0x49770, 4, 9, '*.regexec'), (0x49774, 4, 9, '*.getenv'), (0x49778, 4, 9, '*.strsignal'), (0x4977c, 4, 9, '*.endutent'), (0x49780, 4, 9, '*.isblank'), (0x49784, 4, 9, '*.fprintf'), (0x49788, 4, 9, '*.openlog'), (0x4978c, 4, 9, '*.open'), (0x49790, 4, 9, '*.strftime'), (0x49794, 4, 9, '*.authunix_create_default'), (0x49798, 4, 9, '*.fsync'), (0x4979c, 4, 9, '*.umask'), (0x497a0, 4, 9, '*.chmod'), (0x497a4, 4, 9, '*.tcgetattr'), (0x497a8, 4, 9, '*.tolower'), (0x497ac, 4, 9, '*.sscanf'), (0x497b0, 4, 9, '*.readlink'), (0x497b4, 4, 9, '*.setvbuf'), (0x497b8, 4, 9, '*.setgid'), (0x497bc, 4, 9, '*.isspace'), (0x497c0, 4, 9, '*.getlogin'), (0x497c4, 4, 9, '*.inet_aton'), (0x497c8, 4, 9, '*.getpriority'), (0x497cc, 4, 9, '*.rewind'), (0x497d0, 4, 9, '*.sleep'), (0x497d4, 4, 9, '*.realloc'), (0x497d8, 4, 9, '*.mkdir'), (0x497dc, 4, 9, '*.setgroups'), (0x497e0, 4, 9, '*.setpriority'), (0x497e4, 4, 9, '*.fgetc'), (0x497e8, 4, 9, '*.utmpname'), (0x497ec, 4, 9, '*.cfgetispeed'), (0x497f0, 4, 9, '*.xdr_string'), (0x497f4, 4, 9, '*.getmntent_r'), (0x497f8, 4, 9, '*.access'), (0x497fc, 4, 9, '*.localtime'), (0x49800, 4, 9, '*.killpg'), (0x49804, 4, 9, '*.getnetbyaddr'), (0x49808, 4, 9, '*.xdr_opaque'), (0x4980c, 4, 9, '*.difftime'), (0x49810, 4, 9, '*.ferror'), (0x49814, 4, 9, '*.memmove'), (0x49818, 4, 9, '*.setutent'), (0x4981c, 4, 9, '*.exit'), (0x49820, 4, 9, '*.setsockopt'), (0x49824, 4, 9, '*.ispunct'), (0x49828, 4, 9, '*.link'), (0x4982c, 4, 9, '*.free'), (0x49830, 4, 9, '*.fdopen'), (0x49834, 4, 9, '*.strerror'), (0x49838, 4, 9, '*.select'), (0x4983c, 4, 9, '*.getaddrinfo'), (0x49840, 4, 9, '*.execv'), (0x49844, 4, 9, '*.clearerr'), (0x49848, 4, 9, '*.strtoll'), (0x4984c, 4, 9, '*.strncat'), (0x49850, 4, 9, '*.getchar'), (0x49854, 4, 9, '*.gethostbyaddr'), (0x49858, 4, 9, '*.inet_pton'), (0x4985c, 4, 9, '*.setuid'), (0x49860, 4, 9, '*.read'), (0x49864, 4, 9, '*.bindresvport'), (0x49868, 4, 9, '*.strtok'), (0x4986c, 4, 9, '*.updwtmp'), (0x49870, 4, 9, '*.time'), (0x49874, 4, 9, '*.vfprintf'), (0x49878, 4, 9, '*.inet_ntop'), (0x4987c, 4, 9, '*.fork'), (0x49880, 4, 9, '*.inet_ntoa'), (0x49884, 4, 9, '*.close'), (0x49888, 4, 9, '*.memcmp'), (0x4988c, 4, 9, '*._setjmp'), (0x49890, 4, 9, '*.vfork'), (0x49894, 4, 9, '*.wait3'), (0x49898, 4, 9, '*.putenv'), (0x4989c, 4, 9, '*.getpgrp'), (0x498a0, 4, 9, '*.swapon'), (0x498a4, 4, 9, '*.rename'), (0x498a8, 4, 9, '*.__fxstat'), (0x498ac, 4, 9, '*.setenv'), (0x498b0, 4, 9, '*.chdir'), (0x498b4, 4, 9, '*.getppid'), (0x498b8, 4, 9, '*.fclose'), (0x498bc, 4, 9, '*.fopen'), (0x498c0, 4, 9, '*.strsep'), (0x498c4, 4, 9, '*.geteuid'), (0x498c8, 4, 9, '*.strtod'), (0x498cc, 4, 9, '*.pmap_getmaps'), (0x498d0, 4, 9, '*.fread'), (0x498d4, 4, 9, '*.strcspn'), (0x498d8, 4, 9, '*.getegid'), (0x498dc, 4, 9, '*.sysconf'), (0x498e0, 4, 9, '*.__res_state'), (0x498e4, 4, 9, '*.xdr_array'), (0x498e8, 4, 9, '*.clnt_sperror'), (0x498ec, 4, 9, '*.__lxstat'), (0x498f0, 4, 9, '*.fputc'), (0x498f4, 4, 9, '*.pipe'), (0x498f8, 4, 9, '*._Jv_RegisterClasses'), (0x498fc, 4, 9, '*.system'), (0x49900, 4, 9, '*.strtoull'), (0x49904, 4, 9, '*.creat'), (0x49908, 4, 9, '*.__res_init'), (0x4990c, 4, 9, '*.printf'), (0x49910, 4, 9, '*.alarm'), (0x49914, 4, 9, '*.ttyname'), (0x49918, 4, 9, '*.xdr_bytes'), (0x4991c, 4, 9, '*.getnameinfo'), (0x49920, 4, 9, '*.wait'), (0x49924, 4, 9, '*.waitpid'), (0x49928, 4, 9, '*.pmap_getport'), (0x4992c, 4, 9, '*.opendir'), (0x49934, 4, 9, '*.__gmon_start__'), ], 'exports': [ (0xb5f8, 0, 'recvfrom', 'sh'), (0xb604, 0, 'div', 'sh'), (0xb610, 0, 'fflush', 'sh'), (0xb61c, 0, 'lchown', 'sh'), (0xb628, 0, 'statfs', 'sh'), (0xb634, 0, 'atof', 'sh'), (0xb640, 0, 'dup2', 'sh'), (0xb64c, 0, 'strcasecmp', 'sh'), (0xb658, 0, 'fgets', 'sh'), (0xb664, 0, '_IO_getc', 'sh'), (0xb670, 0, 'vsnprintf', 'sh'), (0xb67c, 0, 'umount2', 'sh'), (0xb688, 0, 'uname', 'sh'), (0xb694, 0, 'setbuf', 'sh'), (0xb6a0, 0, 'strtoul', 'sh'), (0xb6ac, 0, '__xstat', 'sh'), (0xb6b8, 0, 'fscanf', 'sh'), (0xb6c4, 0, 'getprotobyname', 'sh'), (0xb6d0, 0, 'syscall', 'sh'), (0xb6dc, 0, 'strptime', 'sh'), (0xb6e8, 0, 'mktime', 'sh'), (0xb6f4, 0, 'swapoff', 'sh'), (0xb700, 0, 'memset', 'sh'), (0xb70c, 0, 'getservbyport', 'sh'), (0xb718, 0, '__ctype_tolower_loc', 'sh'), (0xb724, 0, 'closedir', 'sh'), (0xb730, 0, 'putchar', 'sh'), (0xb73c, 0, 'isatty', 'sh'), (0xb748, 0, 'strchrnul', 'sh'), (0xb754, 0, '_exit', 'sh'), (0xb760, 0, 'strpbrk', 'sh'), (0xb76c, 0, 'strchr', 'sh'), (0xb778, 0, 'puts', 'sh'), (0xb784, 0, 'getpagesize', 'sh'), (0xb790, 0, 'xdr_int', 'sh'), (0xb79c, 0, 'bind', 'sh'), (0xb7a8, 0, 'getuid', 'sh'), (0xb7b4, 0, 'abort', 'sh'), (0xb7c0, 0, 'execvp', 'sh'), (0xb7cc, 0, 'islower', 'sh'), (0xb7d8, 0, 'fseek', 'sh'), (0xb7e4, 0, 'fchown', 'sh'), (0xb7f0, 0, 'cfsetispeed', 'sh'), (0xb7fc, 0, 'strncmp', 'sh'), (0xb808, 0, 'getdomainname', 'sh'), (0xb814, 0, 'rand', 'sh'), (0xb820, 0, 'sysinfo', 'sh'), (0xb82c, 0, 'mount', 'sh'), (0xb838, 0, 'strspn', 'sh'), (0xb844, 0, 'fputs', 'sh'), (0xb850, 0, 'chown', 'sh'), (0xb85c, 0, 'fcntl', 'sh'), (0xb868, 0, 'lseek', 'sh'), (0xb874, 0, 'pututline', 'sh'), (0xb880, 0, 'socket', 'sh'), (0xb88c, 0, 'strcat', 'sh'), (0xb898, 0, 'klogctl', 'sh'), (0xb8a4, 0, 'pivot_root', 'sh'), (0xb8b0, 0, 'fileno', 'sh'), (0xb8bc, 0, 'cfgetospeed', 'sh'), (0xb8c8, 0, 'sethostname', 'sh'), (0xb8d4, 0, 'getrlimit', 'sh'), (0xb8e0, 0, 'addmntent', 'sh'), (0xb8ec, 0, 'xdr_enum', 'sh'), (0xb8f8, 0, 'pclose', 'sh'), (0xb904, 0, 'isgraph', 'sh'), (0xb910, 0, 'rmdir', 'sh'), (0xb91c, 0, 'getopt_long', 'sh'), (0xb928, 0, 'getgroups', 'sh'), (0xb934, 0, 'unsetenv', 'sh'), (0xb940, 0, 'gethostbyname', 'sh'), (0xb94c, 0, 'fnmatch', 'sh'), (0xb958, 0, 'setmntent', 'sh'), (0xb964, 0, '__libc_start_main', 'sh'), (0xb970, 0, 'sigaction', 'sh'), (0xb97c, 0, 'syslog', 'sh'), (0xb988, 0, 'ftruncate', 'sh'), (0xb994, 0, 'isalpha', 'sh'), (0xb9a0, 0, 'memcpy', 'sh'), (0xb9ac, 0, 'sigemptyset', 'sh'), (0xb9b8, 0, 'closelog', 'sh'), (0xb9c4, 0, 'srand', 'sh'), (0xb9d0, 0, 'execlp', 'sh'), (0xb9dc, 0, 'setpgid', 'sh'), (0xb9e8, 0, 'gai_strerror', 'sh'), (0xb9f4, 0, 'vsyslog', 'sh'), (0xba00, 0, 'getutent', 'sh'), (0xba0c, 0, 'dprintf', 'sh'), (0xba18, 0, 'raise', 'sh'), (0xba24, 0, 'unlink', 'sh'), (0xba30, 0, 'utime', 'sh'), (0xba3c, 0, 'clearenv', 'sh'), (0xba48, 0, 'toupper', 'sh'), (0xba54, 0, 'umount', 'sh'), (0xba60, 0, 'clntudp_create', 'sh'), (0xba6c, 0, 'scandir', 'sh'), (0xba78, 0, 'clnt_spcreateerror', 'sh'), (0xba84, 0, 'tcflush', 'sh'), (0xba90, 0, 'kill', 'sh'), (0xba9c, 0, 'popen', 'sh'), (0xbaa8, 0, 'longjmp', 'sh'), (0xbab4, 0, 'ioctl', 'sh'), (0xbac0, 0, 'signal', 'sh'), (0xbacc, 0, 'snprintf', 'sh'), (0xbad8, 0, 'dirname', 'sh'), (0xbae4, 0, 'readdir', 'sh'), (0xbaf0, 0, 'tcsetattr', 'sh'), (0xbafc, 0, 'getopt', 'sh'), (0xbb08, 0, 'setsid', 'sh'), (0xbb14, 0, 'strcmp', 'sh'), (0xbb20, 0, 'tcsetpgrp', 'sh'), (0xbb2c, 0, 'sendto', 'sh'), (0xbb38, 0, 'strrchr', 'sh'), (0xbb44, 0, 'ctime', 'sh'), (0xbb50, 0, 'times', 'sh'), (0xbb5c, 0, 'isupper', 'sh'), (0xbb68, 0, 'getpid', 'sh'), (0xbb74, 0, 'crypt', 'sh'), (0xbb80, 0, 'strdup', 'sh'), (0xbb8c, 0, 'perror', 'sh'), (0xbb98, 0, 'write', 'sh'), (0xbba4, 0, 'getnetbyname', 'sh'), (0xbbb0, 0, 'malloc', 'sh'), (0xbbbc, 0, 'isprint', 'sh'), (0xbbc8, 0, 'ftello', 'sh'), (0xbbd4, 0, 'vsprintf', 'sh'), (0xbbe0, 0, 'execl', 'sh'), (0xbbec, 0, 'gettimeofday', 'sh'), (0xbbf8, 0, 'fchmod', 'sh'), (0xbc04, 0, 'gethostname', 'sh'), (0xbc10, 0, 'stime', 'sh'), (0xbc1c, 0, 'strlen', 'sh'), (0xbc28, 0, 'symlink', 'sh'), (0xbc34, 0, 'regcomp', 'sh'), (0xbc40, 0, 'getgid', 'sh'), (0xbc4c, 0, 'regerror', 'sh'), (0xbc58, 0, '__ctype_toupper_loc', 'sh'), (0xbc64, 0, 'sigfillset', 'sh'), (0xbc70, 0, 'regfree', 'sh'), (0xbc7c, 0, '__cxa_atexit', 'sh'), (0xbc88, 0, 'vasprintf', 'sh'), (0xbc94, 0, 'sync', 'sh'), (0xbca0, 0, 'xdr_u_int', 'sh'), (0xbcac, 0, 'setpgrp', 'sh'), (0xbcb8, 0, 'strstr', 'sh'), (0xbcc4, 0, 'isalnum', 'sh'), (0xbcd0, 0, '__errno_location', 'sh'), (0xbcdc, 0, 'execve', 'sh'), (0xbce8, 0, 'inet_addr', 'sh'), (0xbcf4, 0, 'mkdtemp', 'sh'), (0xbd00, 0, 'hstrerror', 'sh'), (0xbd0c, 0, 'endmntent', 'sh'), (0xbd18, 0, 'getmntent', 'sh'), (0xbd24, 0, 'setrlimit', 'sh'), (0xbd30, 0, 'strncpy', 'sh'), (0xbd3c, 0, 'qsort', 'sh'), (0xbd48, 0, 'execle', 'sh'), (0xbd54, 0, 'ftell', 'sh'), (0xbd60, 0, 'alphasort', 'sh'), (0xbd6c, 0, 'vprintf', 'sh'), (0xbd78, 0, 'strtol', 'sh'), (0xbd84, 0, 'tcgetpgrp', 'sh'), (0xbd90, 0, 'cfsetospeed', 'sh'), (0xbd9c, 0, 'chroot', 'sh'), (0xbda8, 0, 'strcpy', 'sh'), (0xbdb4, 0, 'mkfifo', 'sh'), (0xbdc0, 0, 'isascii', 'sh'), (0xbdcc, 0, 'getcwd', 'sh'), (0xbdd8, 0, '__h_errno_location', 'sh'), (0xbde4, 0, 'atoi', 'sh'), (0xbdf0, 0, 'mkstemp', 'sh'), (0xbdfc, 0, 'clnttcp_create', 'sh'), (0xbe08, 0, '__xmknod', 'sh'), (0xbe14, 0, 'realpath', 'sh'), (0xbe20, 0, 'freeaddrinfo', 'sh'), (0xbe2c, 0, 'memchr', 'sh'), (0xbe38, 0, 'bsearch', 'sh'), (0xbe44, 0, 'feof', 'sh'), (0xbe50, 0, 'sprintf', 'sh'), (0xbe5c, 0, '_IO_putc', 'sh'), (0xbe68, 0, 'strncasecmp', 'sh'), (0xbe74, 0, 'regexec', 'sh'), (0xbe80, 0, 'getenv', 'sh'), (0xbe8c, 0, 'strsignal', 'sh'), (0xbe98, 0, 'endutent', 'sh'), (0xbea4, 0, 'isblank', 'sh'), (0xbeb0, 0, 'fprintf', 'sh'), (0xbebc, 0, 'openlog', 'sh'), (0xbec8, 0, 'open', 'sh'), (0xbed4, 0, 'strftime', 'sh'), (0xbee0, 0, 'authunix_create_default', 'sh'), (0xbeec, 0, 'fsync', 'sh'), (0xbef8, 0, 'umask', 'sh'), (0xbf04, 0, 'chmod', 'sh'), (0xbf10, 0, 'tcgetattr', 'sh'), (0xbf1c, 0, 'tolower', 'sh'), (0xbf28, 0, 'sscanf', 'sh'), (0xbf34, 0, 'readlink', 'sh'), (0xbf40, 0, 'setvbuf', 'sh'), (0xbf4c, 0, 'setgid', 'sh'), (0xbf58, 0, 'isspace', 'sh'), (0xbf64, 0, 'getlogin', 'sh'), (0xbf70, 0, 'inet_aton', 'sh'), (0xbf7c, 0, 'getpriority', 'sh'), (0xbf88, 0, 'rewind', 'sh'), (0xbf94, 0, 'sleep', 'sh'), (0xbfa0, 0, 'realloc', 'sh'), (0xbfac, 0, 'mkdir', 'sh'), (0xbfb8, 0, 'setgroups', 'sh'), (0xbfc4, 0, 'setpriority', 'sh'), (0xbfd0, 0, 'fgetc', 'sh'), (0xbfdc, 0, 'utmpname', 'sh'), (0xbfe8, 0, 'cfgetispeed', 'sh'), (0xbff4, 0, 'xdr_string', 'sh'), (0xc000, 0, 'getmntent_r', 'sh'), (0xc00c, 0, 'access', 'sh'), (0xc018, 0, 'localtime', 'sh'), (0xc024, 0, 'killpg', 'sh'), (0xc030, 0, 'getnetbyaddr', 'sh'), (0xc03c, 0, 'xdr_opaque', 'sh'), (0xc048, 0, 'difftime', 'sh'), (0xc054, 0, 'ferror', 'sh'), (0xc060, 0, 'memmove', 'sh'), (0xc06c, 0, 'setutent', 'sh'), (0xc078, 0, 'exit', 'sh'), (0xc084, 0, 'setsockopt', 'sh'), (0xc090, 0, 'ispunct', 'sh'), (0xc09c, 0, 'link', 'sh'), (0xc0a8, 0, 'free', 'sh'), (0xc0b4, 0, 'fdopen', 'sh'), (0xc0c0, 0, 'strerror', 'sh'), (0xc0cc, 0, 'select', 'sh'), (0xc0d8, 0, 'getaddrinfo', 'sh'), (0xc0e4, 0, 'execv', 'sh'), (0xc0f0, 0, 'clearerr', 'sh'), (0xc0fc, 0, 'strtoll', 'sh'), (0xc108, 0, 'strncat', 'sh'), (0xc114, 0, 'getchar', 'sh'), (0xc120, 0, 'gethostbyaddr', 'sh'), (0xc12c, 0, 'inet_pton', 'sh'), (0xc138, 0, 'setuid', 'sh'), (0xc144, 0, 'read', 'sh'), (0xc150, 0, 'bindresvport', 'sh'), (0xc15c, 0, 'strtok', 'sh'), (0xc168, 0, 'updwtmp', 'sh'), (0xc174, 0, 'time', 'sh'), (0xc180, 0, 'vfprintf', 'sh'), (0xc18c, 0, 'inet_ntop', 'sh'), (0xc198, 0, 'fork', 'sh'), (0xc1a4, 0, 'inet_ntoa', 'sh'), (0xc1b0, 0, 'close', 'sh'), (0xc1bc, 0, 'memcmp', 'sh'), (0xc1c8, 0, '_setjmp', 'sh'), (0xc1d4, 0, 'vfork', 'sh'), (0xc1e0, 0, 'wait3', 'sh'), (0xc1ec, 0, 'putenv', 'sh'), (0xc1f8, 0, 'getpgrp', 'sh'), (0xc204, 0, 'swapon', 'sh'), (0xc210, 0, 'rename', 'sh'), (0xc21c, 0, '__fxstat', 'sh'), (0xc228, 0, 'setenv', 'sh'), (0xc234, 0, 'chdir', 'sh'), (0xc240, 0, 'getppid', 'sh'), (0xc24c, 0, 'fclose', 'sh'), (0xc258, 0, 'fopen', 'sh'), (0xc264, 0, 'strsep', 'sh'), (0xc270, 0, 'geteuid', 'sh'), (0xc27c, 0, 'strtod', 'sh'), (0xc288, 0, 'pmap_getmaps', 'sh'), (0xc294, 0, 'fread', 'sh'), (0xc2a0, 0, 'strcspn', 'sh'), (0xc2ac, 0, 'getegid', 'sh'), (0xc2b8, 0, 'sysconf', 'sh'), (0xc2c4, 0, '__res_state', 'sh'), (0xc2d0, 0, 'xdr_array', 'sh'), (0xc2dc, 0, 'clnt_sperror', 'sh'), (0xc2e8, 0, '__lxstat', 'sh'), (0xc2f4, 0, 'fputc', 'sh'), (0xc300, 0, 'pipe', 'sh'), (0xc318, 0, 'system', 'sh'), (0xc324, 0, 'strtoull', 'sh'), (0xc330, 0, 'creat', 'sh'), (0xc33c, 0, '__res_init', 'sh'), (0xc348, 0, 'printf', 'sh'), (0xc354, 0, 'alarm', 'sh'), (0xc360, 0, 'ttyname', 'sh'), (0xc36c, 0, 'xdr_bytes', 'sh'), (0xc378, 0, 'getnameinfo', 'sh'), (0xc384, 0, 'wait', 'sh'), (0xc390, 0, 'waitpid', 'sh'), (0xc39c, 0, 'pmap_getport', 'sh'), (0xc3a8, 0, 'opendir', 'sh'), (0xc3b8, 0, '__entry', 'sh'), (0x210d5, 0, 'setpwent', 'sh'), (0x210ed, 0, 'endpwent', 'sh'), (0x21105, 0, 'setgrent', 'sh'), (0x2111d, 0, 'endgrent', 'sh'), (0x21135, 0, 'putpwent', 'sh'), (0x21185, 0, 'putgrent', 'sh'), (0x213b5, 0, 'initgroups', 'sh'), (0x2146d, 0, 'getgrent_r', 'sh'), (0x214c9, 0, 'getgrent', 'sh'), (0x214e9, 0, 'getpwent_r', 'sh'), (0x21545, 0, 'getpwent', 'sh'), (0x21565, 0, 'getgrgid_r', 'sh'), (0x215cd, 0, 'getgrgid', 'sh'), (0x215f1, 0, 'getpwuid_r', 'sh'), (0x21659, 0, 'getpw', 'sh'), (0x216b5, 0, 'getpwuid', 'sh'), (0x216d9, 0, 'getgrnam_r', 'sh'), (0x21745, 0, 'getgrnam', 'sh'), (0x21769, 0, 'getpwnam_r', 'sh'), (0x217d5, 0, 'getpwnam', 'sh'), (0x217f9, 0, 'fgetgrent_r', 'sh'), (0x21821, 0, 'fgetgrent', 'sh'), (0x21845, 0, 'fgetpwent_r', 'sh'), (0x2186d, 0, 'fgetpwent', 'sh'), (0x49da8, 1, 'stdout', 'sh'), (0x49dac, 1, '__environ', 'sh'), (0x49dac, 1, '_environ', 'sh'), (0x49dac, 1, 'environ', 'sh'), (0x49db0, 1, 'optind', 'sh'), (0x49db4, 1, 'optarg', 'sh'), (0x49db8, 1, 'stdin', 'sh'), (0x49dbc, 1, 'stderr', 'sh'), ], 'relocs': [ ], 'names': [ (0x809c, 'sh.ptr_ptr_init_function_0_00049394_0000809c'), (0x80a0, 'sh.ptr_ptr_init_function_0_00049394_000080a0'), (0xb5d0, 'sh.init_function'), (0xb5f8, 'sh.plt_recvfrom'), (0xb604, 'sh.plt_div'), (0xb610, 'sh.plt_fflush'), (0xb61c, 'sh.plt_lchown'), (0xb628, 'sh.plt_statfs'), (0xb634, 'sh.plt_atof'), (0xb640, 'sh.plt_dup2'), (0xb64c, 'sh.plt_strcasecmp'), (0xb658, 'sh.plt_fgets'), (0xb664, 'sh.plt__IO_getc'), (0xb670, 'sh.plt_vsnprintf'), (0xb67c, 'sh.plt_umount2'), (0xb688, 'sh.plt_uname'), (0xb694, 'sh.plt_setbuf'), (0xb6a0, 'sh.plt_strtoul'), (0xb6ac, 'sh.plt___xstat'), (0xb6b8, 'sh.plt_fscanf'), (0xb6c4, 'sh.plt_getprotobyname'), (0xb6d0, 'sh.plt_syscall'), (0xb6dc, 'sh.plt_strptime'), (0xb6e8, 'sh.plt_mktime'), (0xb6f4, 'sh.plt_swapoff'), (0xb700, 'sh.plt_memset'), (0xb70c, 'sh.plt_getservbyport'), (0xb718, 'sh.plt___ctype_tolower_loc'), (0xb724, 'sh.plt_closedir'), (0xb730, 'sh.plt_putchar'), (0xb73c, 'sh.plt_isatty'), (0xb748, 'sh.plt_strchrnul'), (0xb754, 'sh.plt__exit'), (0xb760, 'sh.plt_strpbrk'), (0xb76c, 'sh.plt_strchr'), (0xb778, 'sh.plt_puts'), (0xb784, 'sh.plt_getpagesize'), (0xb790, 'sh.plt_xdr_int'), (0xb79c, 'sh.plt_bind'), (0xb7a8, 'sh.plt_getuid'), (0xb7b4, 'sh.plt_abort'), (0xb7c0, 'sh.plt_execvp'), (0xb7cc, 'sh.plt_islower'), (0xb7d8, 'sh.plt_fseek'), (0xb7e4, 'sh.plt_fchown'), (0xb7f0, 'sh.plt_cfsetispeed'), (0xb7fc, 'sh.plt_strncmp'), (0xb808, 'sh.plt_getdomainname'), (0xb814, 'sh.plt_rand'), (0xb820, 'sh.plt_sysinfo'), (0xb82c, 'sh.plt_mount'), (0xb838, 'sh.plt_strspn'), (0xb844, 'sh.plt_fputs'), (0xb850, 'sh.plt_chown'), (0xb85c, 'sh.plt_fcntl'), (0xb868, 'sh.plt_lseek'), (0xb874, 'sh.plt_pututline'), (0xb880, 'sh.plt_socket'), (0xb88c, 'sh.plt_strcat'), (0xb898, 'sh.plt_klogctl'), (0xb8a4, 'sh.plt_pivot_root'), (0xb8b0, 'sh.plt_fileno'), (0xb8bc, 'sh.plt_cfgetospeed'), (0xb8c8, 'sh.plt_sethostname'), (0xb8d4, 'sh.plt_getrlimit'), (0xb8e0, 'sh.plt_addmntent'), (0xb8ec, 'sh.plt_xdr_enum'), (0xb8f8, 'sh.plt_pclose'), (0xb904, 'sh.plt_isgraph'), (0xb910, 'sh.plt_rmdir'), (0xb91c, 'sh.plt_getopt_long'), (0xb928, 'sh.plt_getgroups'), (0xb934, 'sh.plt_unsetenv'), (0xb940, 'sh.plt_gethostbyname'), (0xb94c, 'sh.plt_fnmatch'), (0xb958, 'sh.plt_setmntent'), (0xb964, 'sh.plt___libc_start_main'), (0xb970, 'sh.plt_sigaction'), (0xb97c, 'sh.plt_syslog'), (0xb988, 'sh.plt_ftruncate'), (0xb994, 'sh.plt_isalpha'), (0xb9a0, 'sh.plt_memcpy'), (0xb9ac, 'sh.plt_sigemptyset'), (0xb9b8, 'sh.plt_closelog'), (0xb9c4, 'sh.plt_srand'), (0xb9d0, 'sh.plt_execlp'), (0xb9dc, 'sh.plt_setpgid'), (0xb9e8, 'sh.plt_gai_strerror'), (0xb9f4, 'sh.plt_vsyslog'), (0xba00, 'sh.plt_getutent'), (0xba0c, 'sh.plt_dprintf'), (0xba18, 'sh.plt_raise'), (0xba24, 'sh.plt_unlink'), (0xba30, 'sh.plt_utime'), (0xba3c, 'sh.plt_clearenv'), (0xba48, 'sh.plt_toupper'), (0xba54, 'sh.plt_umount'), (0xba60, 'sh.plt_clntudp_create'), (0xba6c, 'sh.plt_scandir'), (0xba78, 'sh.plt_clnt_spcreateerror'), (0xba84, 'sh.plt_tcflush'), (0xba90, 'sh.plt_kill'), (0xba9c, 'sh.plt_popen'), (0xbaa8, 'sh.plt_longjmp'), (0xbab4, 'sh.plt_ioctl'), (0xbac0, 'sh.plt_signal'), (0xbacc, 'sh.plt_snprintf'), (0xbad8, 'sh.plt_dirname'), (0xbae4, 'sh.plt_readdir'), (0xbaf0, 'sh.plt_tcsetattr'), (0xbafc, 'sh.plt_getopt'), (0xbb08, 'sh.plt_setsid'), (0xbb14, 'sh.plt_strcmp'), (0xbb20, 'sh.plt_tcsetpgrp'), (0xbb2c, 'sh.plt_sendto'), (0xbb38, 'sh.plt_strrchr'), (0xbb44, 'sh.plt_ctime'), (0xbb50, 'sh.plt_times'), (0xbb5c, 'sh.plt_isupper'), (0xbb68, 'sh.plt_getpid'), (0xbb74, 'sh.plt_crypt'), (0xbb80, 'sh.plt_strdup'), (0xbb8c, 'sh.plt_perror'), (0xbb98, 'sh.plt_write'), (0xbba4, 'sh.plt_getnetbyname'), (0xbbb0, 'sh.plt_malloc'), (0xbbbc, 'sh.plt_isprint'), (0xbbc8, 'sh.plt_ftello'), (0xbbd4, 'sh.plt_vsprintf'), (0xbbe0, 'sh.plt_execl'), (0xbbec, 'sh.plt_gettimeofday'), (0xbbf8, 'sh.plt_fchmod'), (0xbc04, 'sh.plt_gethostname'), (0xbc10, 'sh.plt_stime'), (0xbc1c, 'sh.plt_strlen'), (0xbc28, 'sh.plt_symlink'), (0xbc34, 'sh.plt_regcomp'), (0xbc40, 'sh.plt_getgid'), (0xbc4c, 'sh.plt_regerror'), (0xbc58, 'sh.plt___ctype_toupper_loc'), (0xbc64, 'sh.plt_sigfillset'), (0xbc70, 'sh.plt_regfree'), (0xbc7c, 'sh.plt___cxa_atexit'), (0xbc88, 'sh.plt_vasprintf'), (0xbc94, 'sh.plt_sync'), (0xbca0, 'sh.plt_xdr_u_int'), (0xbcac, 'sh.plt_setpgrp'), (0xbcb8, 'sh.plt_strstr'), (0xbcc4, 'sh.plt_isalnum'), (0xbcd0, 'sh.plt___errno_location'), (0xbcdc, 'sh.plt_execve'), (0xbce8, 'sh.plt_inet_addr'), (0xbcf4, 'sh.plt_mkdtemp'), (0xbd00, 'sh.plt_hstrerror'), (0xbd0c, 'sh.plt_endmntent'), (0xbd18, 'sh.plt_getmntent'), (0xbd24, 'sh.plt_setrlimit'), (0xbd30, 'sh.plt_strncpy'), (0xbd3c, 'sh.plt_qsort'), (0xbd48, 'sh.plt_execle'), (0xbd54, 'sh.plt_ftell'), (0xbd60, 'sh.plt_alphasort'), (0xbd6c, 'sh.plt_vprintf'), (0xbd78, 'sh.plt_strtol'), (0xbd84, 'sh.plt_tcgetpgrp'), (0xbd90, 'sh.plt_cfsetospeed'), (0xbd9c, 'sh.plt_chroot'), (0xbda8, 'sh.plt_strcpy'), (0xbdb4, 'sh.plt_mkfifo'), (0xbdc0, 'sh.plt_isascii'), (0xbdcc, 'sh.plt_getcwd'), (0xbdd8, 'sh.plt___h_errno_location'), (0xbde4, 'sh.plt_atoi'), (0xbdf0, 'sh.plt_mkstemp'), (0xbdfc, 'sh.plt_clnttcp_create'), (0xbe08, 'sh.plt___xmknod'), (0xbe14, 'sh.plt_realpath'), (0xbe20, 'sh.plt_freeaddrinfo'), (0xbe2c, 'sh.plt_memchr'), (0xbe38, 'sh.plt_bsearch'), (0xbe44, 'sh.plt_feof'), (0xbe50, 'sh.plt_sprintf'), (0xbe5c, 'sh.plt__IO_putc'), (0xbe68, 'sh.plt_strncasecmp'), (0xbe74, 'sh.plt_regexec'), (0xbe80, 'sh.plt_getenv'), (0xbe8c, 'sh.plt_strsignal'), (0xbe98, 'sh.plt_endutent'), (0xbea4, 'sh.plt_isblank'), (0xbeb0, 'sh.plt_fprintf'), (0xbebc, 'sh.plt_openlog'), (0xbec8, 'sh.plt_open'), (0xbed4, 'sh.plt_strftime'), (0xbee0, 'sh.plt_authunix_create_default'), (0xbeec, 'sh.plt_fsync'), (0xbef8, 'sh.plt_umask'), (0xbf04, 'sh.plt_chmod'), (0xbf10, 'sh.plt_tcgetattr'), (0xbf1c, 'sh.plt_tolower'), (0xbf28, 'sh.plt_sscanf'), (0xbf34, 'sh.plt_readlink'), (0xbf40, 'sh.plt_setvbuf'), (0xbf4c, 'sh.plt_setgid'), (0xbf58, 'sh.plt_isspace'), (0xbf64, 'sh.plt_getlogin'), (0xbf70, 'sh.plt_inet_aton'), (0xbf7c, 'sh.plt_getpriority'), (0xbf88, 'sh.plt_rewind'), (0xbf94, 'sh.plt_sleep'), (0xbfa0, 'sh.plt_realloc'), (0xbfac, 'sh.plt_mkdir'), (0xbfb8, 'sh.plt_setgroups'), (0xbfc4, 'sh.plt_setpriority'), (0xbfd0, 'sh.plt_fgetc'), (0xbfdc, 'sh.plt_utmpname'), (0xbfe8, 'sh.plt_cfgetispeed'), (0xbff4, 'sh.plt_xdr_string'), (0xc000, 'sh.plt_getmntent_r'), (0xc00c, 'sh.plt_access'), (0xc018, 'sh.plt_localtime'), (0xc024, 'sh.plt_killpg'), (0xc030, 'sh.plt_getnetbyaddr'), (0xc03c, 'sh.plt_xdr_opaque'), (0xc048, 'sh.plt_difftime'), (0xc054, 'sh.plt_ferror'), (0xc060, 'sh.plt_memmove'), (0xc06c, 'sh.plt_setutent'), (0xc078, 'sh.plt_exit'), (0xc084, 'sh.plt_setsockopt'), (0xc090, 'sh.plt_ispunct'), (0xc09c, 'sh.plt_link'), (0xc0a8, 'sh.plt_free'), (0xc0b4, 'sh.plt_fdopen'), (0xc0c0, 'sh.plt_strerror'), (0xc0cc, 'sh.plt_select'), (0xc0d8, 'sh.plt_getaddrinfo'), (0xc0e4, 'sh.plt_execv'), (0xc0f0, 'sh.plt_clearerr'), (0xc0fc, 'sh.plt_strtoll'), (0xc108, 'sh.plt_strncat'), (0xc114, 'sh.plt_getchar'), (0xc120, 'sh.plt_gethostbyaddr'), (0xc12c, 'sh.plt_inet_pton'), (0xc138, 'sh.plt_setuid'), (0xc144, 'sh.plt_read'), (0xc150, 'sh.plt_bindresvport'), (0xc15c, 'sh.plt_strtok'), (0xc168, 'sh.plt_updwtmp'), (0xc174, 'sh.plt_time'), (0xc180, 'sh.plt_vfprintf'), (0xc18c, 'sh.plt_inet_ntop'), (0xc198, 'sh.plt_fork'), (0xc1a4, 'sh.plt_inet_ntoa'), (0xc1b0, 'sh.plt_close'), (0xc1bc, 'sh.plt_memcmp'), (0xc1c8, 'sh.plt__setjmp'), (0xc1d4, 'sh.plt_vfork'), (0xc1e0, 'sh.plt_wait3'), (0xc1ec, 'sh.plt_putenv'), (0xc1f8, 'sh.plt_getpgrp'), (0xc204, 'sh.plt_swapon'), (0xc210, 'sh.plt_rename'), (0xc21c, 'sh.plt___fxstat'), (0xc228, 'sh.plt_setenv'), (0xc234, 'sh.plt_chdir'), (0xc240, 'sh.plt_getppid'), (0xc24c, 'sh.plt_fclose'), (0xc258, 'sh.plt_fopen'), (0xc264, 'sh.plt_strsep'), (0xc270, 'sh.plt_geteuid'), (0xc27c, 'sh.plt_strtod'), (0xc288, 'sh.plt_pmap_getmaps'), (0xc294, 'sh.plt_fread'), (0xc2a0, 'sh.plt_strcspn'), (0xc2ac, 'sh.plt_getegid'), (0xc2b8, 'sh.plt_sysconf'), (0xc2c4, 'sh.plt___res_state'), (0xc2d0, 'sh.plt_xdr_array'), (0xc2dc, 'sh.plt_clnt_sperror'), (0xc2e8, 'sh.plt___lxstat'), (0xc2f4, 'sh.plt_fputc'), (0xc300, 'sh.plt_pipe'), (0xc30c, 'sh.plt__Jv_RegisterClasses'), (0xc318, 'sh.plt_system'), (0xc324, 'sh.plt_strtoull'), (0xc330, 'sh.plt_creat'), (0xc33c, 'sh.plt___res_init'), (0xc348, 'sh.plt_printf'), (0xc354, 'sh.plt_alarm'), (0xc360, 'sh.plt_ttyname'), (0xc36c, 'sh.plt_xdr_bytes'), (0xc378, 'sh.plt_getnameinfo'), (0xc384, 'sh.plt_wait'), (0xc390, 'sh.plt_waitpid'), (0xc39c, 'sh.plt_pmap_getport'), (0xc3a8, 'sh.plt_opendir'), (0xc3b8, 'sh.__entry'), (0xc41c, 'sh.fini_function_0'), (0xc438, 'sh.init_function_0'), (0xc554, 'sh.ptr_stderr_0000c554'), (0xc8d0, 'sh.ptr_optind_0000c8d0'), (0xdf60, 'sh.ptr_optind_0000df60'), (0xe418, 'sh.ptr_stdout_0000e418'), (0xe41c, 'sh.ptr_stderr_0000e41c'), (0xe830, 'sh.ptr_optind_0000e830'), (0xe83c, 'sh.ptr_stdout_0000e83c'), (0xe844, 'sh.ptr_stdin_0000e844'), (0x107ec, 'sh.ptr_optind_000107ec'), (0x10930, 'sh.ptr_optind_00010930'), (0x10a54, 'sh.ptr_optind_00010a54'), (0x10c10, 'sh.ptr_stdout_00010c10'), (0x10c20, 'sh.ptr_optind_00010c20'), (0x10c2c, 'sh.ptr_stderr_00010c2c'), (0x10d00, 'sh.ptr_optind_00010d00'), (0x10ec8, 'sh.ptr_stdout_00010ec8'), (0x1105c, 'sh.ptr_optind_0001105c'), (0x110a4, 'sh.ptr_stdin_000110a4'), (0x113cc, 'sh.ptr_optind_000113cc'), (0x11440, 'sh.ptr_stderr_00011440'), (0x11940, 'sh.ptr_optind_00011940'), (0x11c3c, 'sh.ptr_optind_00011c3c'), (0x11ddc, 'sh.ptr_optind_00011ddc'), (0x11de0, 'sh.ptr___environ_00011de0'), (0x12014, 'sh.ptr_stderr_00012014'), (0x12610, 'sh.ptr_optarg_00012610'), (0x12618, 'sh.ptr_optind_00012618'), (0x12624, 'sh.ptr_stdin_00012624'), (0x12750, 'sh.ptr_optind_00012750'), (0x1289c, 'sh.ptr_optind_0001289c'), (0x13268, 'sh.ptr_stdout_00013268'), (0x132a8, 'sh.ptr_optind_000132a8'), (0x13340, 'sh.ptr_optind_00013340'), (0x13384, 'sh.ptr_optind_00013384'), (0x13420, 'sh.ptr_optind_00013420'), (0x13598, 'sh.ptr_optind_00013598'), (0x1359c, 'sh.ptr_stderr_0001359c'), (0x135fc, 'sh.ptr_stderr_000135fc'), (0x13adc, 'sh.ptr_optind_00013adc'), (0x13b5c, 'sh.ptr_optind_00013b5c'), (0x13e0c, 'sh.ptr_stdout_00013e0c'), (0x13e28, 'sh.ptr_optind_00013e28'), (0x13e2c, 'sh.ptr_stdin_00013e2c'), (0x13f20, 'sh.ptr_stdout_00013f20'), (0x14e4c, 'sh.ptr_optind_00014e4c'), (0x14f40, 'sh.ptr_optind_00014f40'), (0x14f44, 'sh.ptr_stdout_00014f44'), (0x14f4c, 'sh.ptr_stdin_00014f4c'), (0x157a8, 'sh.ptr_optind_000157a8'), (0x15b10, 'sh.ptr_optind_00015b10'), (0x15b94, 'sh.ptr_optind_00015b94'), (0x15bdc, 'sh.ptr_stdout_00015bdc'), (0x15be0, 'sh.ptr_stdin_00015be0'), (0x15d24, 'sh.ptr_optarg_00015d24'), (0x15d2c, 'sh.ptr_optind_00015d2c'), (0x15ec4, 'sh.ptr_optind_00015ec4'), (0x16198, 'sh.ptr_optind_00016198'), (0x16334, 'sh.ptr_alphasort_00016334'), (0x16340, 'sh.ptr___environ_00016340'), (0x163f4, 'sh.ptr_optarg_000163f4'), (0x16404, 'sh.ptr_optind_00016404'), (0x166d8, 'sh.ptr_stdin_000166d8'), (0x17d44, 'sh.ptr_stdin_00017d44'), (0x18100, 'sh.ptr_stdout_00018100'), (0x184b8, 'sh.ptr_stdout_000184b8'), (0x18818, 'sh.ptr_tolower_00018818'), (0x1881c, 'sh.ptr_toupper_0001881c'), (0x193b4, 'sh.ptr_stdin_000193b4'), (0x193bc, 'sh.ptr_stdout_000193bc'), (0x193c4, 'sh.ptr_stderr_000193c4'), (0x193c8, 'sh.ptr___environ_000193c8'), (0x193d4, 'sh.ptr_optind_000193d4'), (0x1a458, 'sh.ptr_optind_0001a458'), (0x1a464, 'sh.ptr_stdout_0001a464'), (0x1a470, 'sh.ptr_stdin_0001a470'), (0x1a494, 'sh.ptr_stdout_0001a494'), (0x1aba8, 'sh.ptr_stdout_0001aba8'), (0x1ac18, 'sh.ptr_stdout_0001ac18'), (0x1ad64, 'sh.ptr_stdout_0001ad64'), (0x1ba18, 'sh.ptr_stdout_0001ba18'), (0x1c910, 'sh.ptr_optind_0001c910'), (0x1cc80, 'sh.ptr_optind_0001cc80'), (0x1d7d8, 'sh.ptr_optind_0001d7d8'), (0x1e544, 'sh.ptr_optind_0001e544'), (0x1e550, 'sh.ptr_stdin_0001e550'), (0x1e8dc, 'sh.ptr_optind_0001e8dc'), (0x1e8f4, 'sh.ptr_stderr_0001e8f4'), (0x1ebfc, 'sh.ptr_stderr_0001ebfc'), (0x1f294, 'sh.ptr_stdin_0001f294'), (0x1f2b4, 'sh.ptr_stdout_0001f2b4'), (0x1f8c4, 'sh.ptr_optarg_0001f8c4'), (0x1f8c8, 'sh.ptr_optind_0001f8c8'), (0x1f924, 'sh.ptr_optind_0001f924'), (0x20670, 'sh.ptr_stderr_00020670'), (0x207f0, 'sh.ptr_stdout_000207f0'), (0x207f4, 'sh.ptr_stderr_000207f4'), (0x208a8, 'sh.ptr_stdin_000208a8'), (0x20c20, 'sh.ptr_stdout_00020c20'), (0x20c44, 'sh.ptr_stdout_00020c44'), (0x20e98, 'sh.ptr_stdout_00020e98'), (0x210d5, 'sh.setpwent'), (0x210ed, 'sh.endpwent'), (0x21105, 'sh.setgrent'), (0x2111c, 'sh.endgrent'), (0x21135, 'sh.putpwent'), (0x21185, 'sh.putgrent'), (0x213b4, 'sh.initgroups'), (0x2146d, 'sh.getgrent_r'), (0x214c9, 'sh.getgrent'), (0x214e9, 'sh.getpwent_r'), (0x21545, 'sh.getpwent'), (0x21564, 'sh.getgrgid_r'), (0x215cc, 'sh.getgrgid'), (0x215f0, 'sh.getpwuid_r'), (0x21659, 'sh.getpw'), (0x216b4, 'sh.getpwuid'), (0x216d8, 'sh.getgrnam_r'), (0x21744, 'sh.getgrnam'), (0x21768, 'sh.getpwnam_r'), (0x217d4, 'sh.getpwnam'), (0x217f9, 'sh.fgetgrent_r'), (0x21821, 'sh.fgetgrent'), (0x21845, 'sh.fgetpwent_r'), (0x2186d, 'sh.fgetpwent'), (0x21cb4, 'sh.ptr_optind_00021cb4'), (0x224b4, 'sh.ptr_optind_000224b4'), (0x224e8, 'sh.ptr_stdin_000224e8'), (0x2266c, 'sh.ptr_stdout_0002266c'), (0x227e4, 'sh.ptr_optind_000227e4'), (0x22db4, 'sh.ptr_stderr_00022db4'), (0x22db8, 'sh.ptr_stdout_00022db8'), (0x22e38, 'sh.ptr_stdout_00022e38'), (0x23a74, 'sh.ptr_optind_00023a74'), (0x23b64, 'sh.ptr_optind_00023b64'), (0x23cc4, 'sh.ptr_optind_00023cc4'), (0x24a68, 'sh.ptr_optind_00024a68'), (0x24c84, 'sh.ptr_optind_00024c84'), (0x25878, 'ptr_wstr_ _00025878'), (0x268c8, 'sh.ptr_optind_000268c8'), (0x269cc, 'sh.ptr_stdout_000269cc'), (0x26eb0, 'sh.ptr_optind_00026eb0'), (0x285d4, 'sh.ptr_stdout_000285d4'), (0x292a4, 'sh.ptr_stdout_000292a4'), (0x292d0, 'sh.ptr_stdout_000292d0'), (0x292d4, 'sh.ptr_stderr_000292d4'), (0x29364, 'sh.ptr_stdout_00029364'), (0x2938c, 'sh.ptr_stderr_0002938c'), (0x293a4, 'sh.ptr_stderr_000293a4'), (0x29640, 'sh.ptr_stderr_00029640'), (0x2a860, 'sh.ptr_stderr_0002a860'), (0x2aeac, 'sh.ptr_stderr_0002aeac'), (0x2b3a8, 'sh.ptr_stdout_0002b3a8'), (0x2b574, 'sh.ptr_stdout_0002b574'), (0x2f1b4, 'sh.ptr_stderr_0002f1b4'), (0x2f534, 'sh.ptr_stderr_0002f534'), (0x2f550, 'sh.ptr___environ_0002f550'), (0x2f978, 'sh.ptr_stdout_0002f978'), (0x30a14, 'sh.ptr_stdout_00030a14'), (0x30b1c, 'sh.ptr_stdout_00030b1c'), (0x30b70, 'sh.ptr_stdout_00030b70'), (0x31de8, 'sh.ptr_stdout_00031de8'), (0x32130, 'sh.ptr_optind_00032130'), (0x3213c, 'sh.ptr_stdin_0003213c'), (0x33000, 'sh.ptr_optind_00033000'), (0x332fc, 'sh.ptr_stdin_000332fc'), (0x33328, 'sh.ptr_stdout_00033328'), (0x33670, 'sh.ptr_xdr_int_00033670'), (0x34560, 'sh.ptr_optind_00034560'), (0x34840, 'sh.ptr_optind_00034840'), (0x34d80, 'sh.ptr_stdout_00034d80'), (0x34db4, 'sh.ptr_stdout_00034db4'), (0x351a4, 'sh.ptr_stderr_000351a4'), (0x353a0, 'sh.ptr_stdout_000353a0'), (0x354b0, 'sh.ptr_stdout_000354b0'), (0x370c8, 'sh.fini_function'), (0x3d4e8, 'wstr_ _0003d4e8'), (0x49394, 'sh.ptr_init_function_0_00049394'), (0x49398, 'sh.ptr_fini_function_0_00049398'), (0x4949c, '*.recvfrom_0004949c'), (0x494a0, '*.div_000494a0'), (0x494a4, '*.fflush_000494a4'), (0x494a8, '*.lchown_000494a8'), (0x494ac, '*.statfs_000494ac'), (0x494b0, '*.atof_000494b0'), (0x494b4, '*.dup2_000494b4'), (0x494b8, '*.strcasecmp_000494b8'), (0x494bc, '*.fgets_000494bc'), (0x494c0, '*._IO_getc_000494c0'), (0x494c4, '*.vsnprintf_000494c4'), (0x494c8, '*.umount2_000494c8'), (0x494cc, '*.uname_000494cc'), (0x494d0, '*.setbuf_000494d0'), (0x494d4, '*.strtoul_000494d4'), (0x494d8, '*.__xstat_000494d8'), (0x494dc, '*.fscanf_000494dc'), (0x494e0, '*.getprotobyname_000494e0'), (0x494e4, '*.syscall_000494e4'), (0x494e8, '*.strptime_000494e8'), (0x494ec, '*.mktime_000494ec'), (0x494f0, '*.swapoff_000494f0'), (0x494f4, '*.memset_000494f4'), (0x494f8, '*.getservbyport_000494f8'), (0x494fc, '*.__ctype_tolower_loc_000494fc'), (0x49500, '*.closedir_00049500'), (0x49504, '*.putchar_00049504'), (0x49508, '*.isatty_00049508'), (0x4950c, '*.strchrnul_0004950c'), (0x49510, '*._exit_00049510'), (0x49514, '*.strpbrk_00049514'), (0x49518, '*.strchr_00049518'), (0x4951c, '*.puts_0004951c'), (0x49520, '*.getpagesize_00049520'), (0x49524, '*.xdr_int_00049524'), (0x49528, '*.bind_00049528'), (0x4952c, '*.getuid_0004952c'), (0x49530, '*.abort_00049530'), (0x49534, '*.execvp_00049534'), (0x49538, '*.islower_00049538'), (0x4953c, '*.fseek_0004953c'), (0x49540, '*.fchown_00049540'), (0x49544, '*.cfsetispeed_00049544'), (0x49548, '*.strncmp_00049548'), (0x4954c, '*.getdomainname_0004954c'), (0x49550, '*.rand_00049550'), (0x49554, '*.sysinfo_00049554'), (0x49558, '*.mount_00049558'), (0x4955c, '*.strspn_0004955c'), (0x49560, '*.fputs_00049560'), (0x49564, '*.chown_00049564'), (0x49568, '*.fcntl_00049568'), (0x4956c, '*.lseek_0004956c'), (0x49570, '*.pututline_00049570'), (0x49574, '*.socket_00049574'), (0x49578, '*.strcat_00049578'), (0x4957c, '*.klogctl_0004957c'), (0x49580, '*.pivot_root_00049580'), (0x49584, '*.fileno_00049584'), (0x49588, '*.cfgetospeed_00049588'), (0x4958c, '*.sethostname_0004958c'), (0x49590, '*.getrlimit_00049590'), (0x49594, '*.addmntent_00049594'), (0x49598, '*.xdr_enum_00049598'), (0x4959c, '*.pclose_0004959c'), (0x495a0, '*.isgraph_000495a0'), (0x495a4, '*.rmdir_000495a4'), (0x495a8, '*.getopt_long_000495a8'), (0x495ac, '*.getgroups_000495ac'), (0x495b0, '*.unsetenv_000495b0'), (0x495b4, '*.gethostbyname_000495b4'), (0x495b8, '*.fnmatch_000495b8'), (0x495bc, '*.setmntent_000495bc'), (0x495c0, '*.__libc_start_main_000495c0'), (0x495c4, '*.sigaction_000495c4'), (0x495c8, '*.syslog_000495c8'), (0x495cc, '*.ftruncate_000495cc'), (0x495d0, '*.isalpha_000495d0'), (0x495d4, '*.memcpy_000495d4'), (0x495d8, '*.sigemptyset_000495d8'), (0x495dc, '*.closelog_000495dc'), (0x495e0, '*.srand_000495e0'), (0x495e4, '*.execlp_000495e4'), (0x495e8, '*.setpgid_000495e8'), (0x495ec, '*.gai_strerror_000495ec'), (0x495f0, '*.vsyslog_000495f0'), (0x495f4, '*.getutent_000495f4'), (0x495f8, '*.dprintf_000495f8'), (0x495fc, '*.raise_000495fc'), (0x49600, '*.unlink_00049600'), (0x49604, '*.utime_00049604'), (0x49608, '*.clearenv_00049608'), (0x4960c, '*.toupper_0004960c'), (0x49610, '*.umount_00049610'), (0x49614, '*.clntudp_create_00049614'), (0x49618, '*.scandir_00049618'), (0x4961c, '*.clnt_spcreateerror_0004961c'), (0x49620, '*.tcflush_00049620'), (0x49624, '*.kill_00049624'), (0x49628, '*.popen_00049628'), (0x4962c, '*.longjmp_0004962c'), (0x49630, '*.ioctl_00049630'), (0x49634, '*.signal_00049634'), (0x49638, '*.snprintf_00049638'), (0x4963c, '*.dirname_0004963c'), (0x49640, '*.readdir_00049640'), (0x49644, '*.tcsetattr_00049644'), (0x49648, '*.getopt_00049648'), (0x4964c, '*.setsid_0004964c'), (0x49650, '*.strcmp_00049650'), (0x49654, '*.tcsetpgrp_00049654'), (0x49658, '*.sendto_00049658'), (0x4965c, '*.strrchr_0004965c'), (0x49660, '*.ctime_00049660'), (0x49664, '*.times_00049664'), (0x49668, '*.isupper_00049668'), (0x4966c, '*.getpid_0004966c'), (0x49670, '*.crypt_00049670'), (0x49674, '*.strdup_00049674'), (0x49678, '*.perror_00049678'), (0x4967c, '*.write_0004967c'), (0x49680, '*.getnetbyname_00049680'), (0x49684, '*.malloc_00049684'), (0x49688, '*.isprint_00049688'), (0x4968c, '*.ftello_0004968c'), (0x49690, '*.vsprintf_00049690'), (0x49694, '*.execl_00049694'), (0x49698, '*.gettimeofday_00049698'), (0x4969c, '*.fchmod_0004969c'), (0x496a0, '*.gethostname_000496a0'), (0x496a4, '*.stime_000496a4'), (0x496a8, '*.strlen_000496a8'), (0x496ac, '*.symlink_000496ac'), (0x496b0, '*.regcomp_000496b0'), (0x496b4, '*.getgid_000496b4'), (0x496b8, '*.regerror_000496b8'), (0x496bc, '*.__ctype_toupper_loc_000496bc'), (0x496c0, '*.sigfillset_000496c0'), (0x496c4, '*.regfree_000496c4'), (0x496c8, '*.__cxa_atexit_000496c8'), (0x496cc, '*.vasprintf_000496cc'), (0x496d0, '*.sync_000496d0'), (0x496d4, '*.xdr_u_int_000496d4'), (0x496d8, '*.setpgrp_000496d8'), (0x496dc, '*.strstr_000496dc'), (0x496e0, '*.isalnum_000496e0'), (0x496e4, '*.__errno_location_000496e4'), (0x496e8, '*.execve_000496e8'), (0x496ec, '*.inet_addr_000496ec'), (0x496f0, '*.mkdtemp_000496f0'), (0x496f4, '*.hstrerror_000496f4'), (0x496f8, '*.endmntent_000496f8'), (0x496fc, '*.getmntent_000496fc'), (0x49700, '*.setrlimit_00049700'), (0x49704, '*.strncpy_00049704'), (0x49708, '*.qsort_00049708'), (0x4970c, '*.execle_0004970c'), (0x49710, '*.ftell_00049710'), (0x49714, '*.alphasort_00049714'), (0x49718, '*.vprintf_00049718'), (0x4971c, '*.strtol_0004971c'), (0x49720, '*.tcgetpgrp_00049720'), (0x49724, '*.cfsetospeed_00049724'), (0x49728, '*.chroot_00049728'), (0x4972c, '*.strcpy_0004972c'), (0x49730, '*.mkfifo_00049730'), (0x49734, '*.isascii_00049734'), (0x49738, '*.getcwd_00049738'), (0x4973c, '*.__h_errno_location_0004973c'), (0x49740, '*.atoi_00049740'), (0x49744, '*.mkstemp_00049744'), (0x49748, '*.clnttcp_create_00049748'), (0x4974c, '*.__xmknod_0004974c'), (0x49750, '*.realpath_00049750'), (0x49754, '*.freeaddrinfo_00049754'), (0x49758, '*.memchr_00049758'), (0x4975c, '*.bsearch_0004975c'), (0x49760, '*.feof_00049760'), (0x49764, '*.sprintf_00049764'), (0x49768, '*._IO_putc_00049768'), (0x4976c, '*.strncasecmp_0004976c'), (0x49770, '*.regexec_00049770'), (0x49774, '*.getenv_00049774'), (0x49778, '*.strsignal_00049778'), (0x4977c, '*.endutent_0004977c'), (0x49780, '*.isblank_00049780'), (0x49784, '*.fprintf_00049784'), (0x49788, '*.openlog_00049788'), (0x4978c, '*.open_0004978c'), (0x49790, '*.strftime_00049790'), (0x49794, '*.authunix_create_default_00049794'), (0x49798, '*.fsync_00049798'), (0x4979c, '*.umask_0004979c'), (0x497a0, '*.chmod_000497a0'), (0x497a4, '*.tcgetattr_000497a4'), (0x497a8, '*.tolower_000497a8'), (0x497ac, '*.sscanf_000497ac'), (0x497b0, '*.readlink_000497b0'), (0x497b4, '*.setvbuf_000497b4'), (0x497b8, '*.setgid_000497b8'), (0x497bc, '*.isspace_000497bc'), (0x497c0, '*.getlogin_000497c0'), (0x497c4, '*.inet_aton_000497c4'), (0x497c8, '*.getpriority_000497c8'), (0x497cc, '*.rewind_000497cc'), (0x497d0, '*.sleep_000497d0'), (0x497d4, '*.realloc_000497d4'), (0x497d8, '*.mkdir_000497d8'), (0x497dc, '*.setgroups_000497dc'), (0x497e0, '*.setpriority_000497e0'), (0x497e4, '*.fgetc_000497e4'), (0x497e8, '*.utmpname_000497e8'), (0x497ec, '*.cfgetispeed_000497ec'), (0x497f0, '*.xdr_string_000497f0'), (0x497f4, '*.getmntent_r_000497f4'), (0x497f8, '*.access_000497f8'), (0x497fc, '*.localtime_000497fc'), (0x49800, '*.killpg_00049800'), (0x49804, '*.getnetbyaddr_00049804'), (0x49808, '*.xdr_opaque_00049808'), (0x4980c, '*.difftime_0004980c'), (0x49810, '*.ferror_00049810'), (0x49814, '*.memmove_00049814'), (0x49818, '*.setutent_00049818'), (0x4981c, '*.exit_0004981c'), (0x49820, '*.setsockopt_00049820'), (0x49824, '*.ispunct_00049824'), (0x49828, '*.link_00049828'), (0x4982c, '*.free_0004982c'), (0x49830, '*.fdopen_00049830'), (0x49834, '*.strerror_00049834'), (0x49838, '*.select_00049838'), (0x4983c, '*.getaddrinfo_0004983c'), (0x49840, '*.execv_00049840'), (0x49844, '*.clearerr_00049844'), (0x49848, '*.strtoll_00049848'), (0x4984c, '*.strncat_0004984c'), (0x49850, '*.getchar_00049850'), (0x49854, '*.gethostbyaddr_00049854'), (0x49858, '*.inet_pton_00049858'), (0x4985c, '*.setuid_0004985c'), (0x49860, '*.read_00049860'), (0x49864, '*.bindresvport_00049864'), (0x49868, '*.strtok_00049868'), (0x4986c, '*.updwtmp_0004986c'), (0x49870, '*.time_00049870'), (0x49874, '*.vfprintf_00049874'), (0x49878, '*.inet_ntop_00049878'), (0x4987c, '*.fork_0004987c'), (0x49880, '*.inet_ntoa_00049880'), (0x49884, '*.close_00049884'), (0x49888, '*.memcmp_00049888'), (0x4988c, '*._setjmp_0004988c'), (0x49890, '*.vfork_00049890'), (0x49894, '*.wait3_00049894'), (0x49898, '*.putenv_00049898'), (0x4989c, '*.getpgrp_0004989c'), (0x498a0, '*.swapon_000498a0'), (0x498a4, '*.rename_000498a4'), (0x498a8, '*.__fxstat_000498a8'), (0x498ac, '*.setenv_000498ac'), (0x498b0, '*.chdir_000498b0'), (0x498b4, '*.getppid_000498b4'), (0x498b8, '*.fclose_000498b8'), (0x498bc, '*.fopen_000498bc'), (0x498c0, '*.strsep_000498c0'), (0x498c4, '*.geteuid_000498c4'), (0x498c8, '*.strtod_000498c8'), (0x498cc, '*.pmap_getmaps_000498cc'), (0x498d0, '*.fread_000498d0'), (0x498d4, '*.strcspn_000498d4'), (0x498d8, '*.getegid_000498d8'), (0x498dc, '*.sysconf_000498dc'), (0x498e0, '*.__res_state_000498e0'), (0x498e4, '*.xdr_array_000498e4'), (0x498e8, '*.clnt_sperror_000498e8'), (0x498ec, '*.__lxstat_000498ec'), (0x498f0, '*.fputc_000498f0'), (0x498f4, '*.pipe_000498f4'), (0x498f8, '*._Jv_RegisterClasses_000498f8'), (0x498fc, '*.system_000498fc'), (0x49900, '*.strtoull_00049900'), (0x49904, '*.creat_00049904'), (0x49908, '*.__res_init_00049908'), (0x4990c, '*.printf_0004990c'), (0x49910, '*.alarm_00049910'), (0x49914, '*.ttyname_00049914'), (0x49918, '*.xdr_bytes_00049918'), (0x4991c, '*.getnameinfo_0004991c'), (0x49920, '*.wait_00049920'), (0x49924, '*.waitpid_00049924'), (0x49928, '*.pmap_getport_00049928'), (0x4992c, '*.opendir_0004992c'), (0x49934, '*.__gmon_start___00049934'), (0x49da8, 'sh.stdout'), (0x49dac, 'sh.__environ'), (0x49db0, 'sh.optind'), (0x49db4, 'sh.optarg'), (0x49db8, 'sh.stdin'), (0x49dbc, 'sh.stderr'), ], 'pltgot': [ ], }
[ 1477, 62, 7890, 796, 1391, 198, 220, 220, 220, 705, 320, 3742, 10354, 685, 198, 220, 220, 220, 220, 220, 220, 220, 357, 15, 87, 2920, 2920, 66, 11, 604, 11, 860, 11, 705, 24620, 8344, 85, 6738, 33809, 198, 220, 220, 220, 220, 22...
1.605513
34,394
import abc
[ 11748, 450, 66, 628 ]
3
4
import rclpy from rclpy.node import Node from builtin_interfaces.msg import Time from rclpy.clock import Clock from ros2_msg.msg import Test
[ 11748, 374, 565, 9078, 198, 6738, 374, 565, 9078, 13, 17440, 1330, 19081, 198, 6738, 3170, 259, 62, 3849, 32186, 13, 19662, 1330, 3862, 198, 6738, 374, 565, 9078, 13, 15750, 1330, 21328, 198, 6738, 686, 82, 17, 62, 19662, 13, 19662, ...
3.085106
47
from rest_framework import serializers from ..models import ProdottoCombinato from .barcode import BarcodeInline from .costo import CostoInline from .prezzo import PrezzoInline from .prodotto import ProdottoSerializerMininal from .prodotto_immagine import ProdottoImmagineInline
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 11485, 27530, 1330, 1041, 67, 17631, 20575, 259, 5549, 198, 6738, 764, 65, 5605, 1098, 1330, 2409, 8189, 818, 1370, 198, 6738, 764, 15805, 78, 1330, 6446, 78, 818, 1370, 198, ...
3.426829
82
from .single import AssetModel from ..utils import mass_broadcast import numpy as np
[ 6738, 764, 29762, 1330, 31433, 17633, 198, 6738, 11485, 26791, 1330, 2347, 62, 36654, 2701, 198, 198, 11748, 299, 32152, 355, 45941, 628, 628, 198 ]
3.6
25
n=int(input()) m=int(input()) mlist=list(range(n)) result=[] for i in range(n): for j in range(m-1): mlist.append(mlist.pop(0)) result.append(mlist.pop(0)) print(result)
[ 77, 28, 600, 7, 15414, 28955, 198, 76, 28, 600, 7, 15414, 28955, 198, 198, 4029, 396, 28, 4868, 7, 9521, 7, 77, 4008, 198, 198, 20274, 28, 21737, 198, 198, 1640, 1312, 287, 2837, 7, 77, 2599, 198, 220, 220, 220, 329, 474, 287, ...
2.054348
92
""" The MIT License (MIT) Copyright (c) 2016-2017 Elastic Email, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import requests from enum import Enum # API version 2.42.0 class ApiTypes: """ """ class AccessLevel(Enum): """ """ EENone = 0 """ """ ViewAccount = 1 """ """ ViewContacts = 2 """ """ ViewForms = 4 """ """ ViewTemplates = 8 """ """ ViewCampaigns = 16 """ """ ViewChannels = 32 """ """ ViewAutomations = 64 """ """ ViewSurveys = 128 """ """ ViewSettings = 256 """ """ ViewBilling = 512 """ """ ViewSubAccounts = 1024 """ """ ViewUsers = 2048 """ """ ViewFiles = 4096 """ """ ViewReports = 8192 """ """ ModifyAccount = 16384 """ """ ModifyContacts = 32768 """ """ ModifyForms = 65536 """ """ ModifyTemplates = 131072 """ """ ModifyCampaigns = 262144 """ """ ModifyChannels = 524288 """ """ ModifyAutomations = 1048576 """ """ ModifySurveys = 2097152 """ """ ModifyFiles = 4194304 """ """ Export = 8388608 """ """ SendSmtp = 16777216 """ """ SendSMS = 33554432 """ """ ModifySettings = 67108864 """ """ ModifyBilling = 134217728 """ """ ModifyProfile = 268435456 """ """ ModifySubAccounts = 536870912 """ """ ModifyUsers = 1073741824 """ """ Security = 2147483648 """ """ ModifyLanguage = 4294967296 """ """ ViewSupport = 8589934592 """ """ SendHttp = 17179869184 """ """ Modify2FA = 34359738368 """ """ ModifySupport = 68719476736 """ """ ViewCustomFields = 137438953472 """ """ ModifyCustomFields = 274877906944 """ """ ModifyWebNotifications = 549755813888 """ """ ExtendedLogs = 1099511627776 """ """ VerifyEmails = 2199023255552 """ """ ViewEmailVerifications = 4398046511104 """ """ class AccessToken: """ Access level or permission to be assigned to this Access Token. """ AccessLevel = None # ApiTypes.AccessLevel """ Name or email address of the token. """ Name = None # string """ """ MaskedToken = None # string """ Date this AccessToken was created. """ DateCreated = None # DateTime """ Date this AccessToken was last used. """ LastUse = None # DateTime? """ Date this AccessToken expires. """ Expires = None # DateTime? """ Comma separated list of CIDR notated IP ranges that this token can connect from. """ RestrictAccessToIPRange = None # string """ """ AllowUpdate = None # bool """ """ Type = None # ApiTypes.AccessTokenType """ """ class AccessTokenType(Enum): """ ApiKey that gives you access to our SMTP and HTTP API's. """ APIKey = 1 """ """ SMTPCredential = 2 """ Detailed information about your account """ class Account: """ Code used for tax purposes. """ TaxCode = None # string """ Public key for limited access to your Account such as contact/add so you can use it safely on public websites. """ PublicAccountID = None # string """ True, if Account is a Sub-Account. Otherwise, false """ IsSub = None # bool """ """ IsUser = None # bool """ The number of Sub-Accounts this Account has. """ SubAccountsCount = None # long """ Number of status: 1 - Active """ StatusNumber = None # int """ Account status: Active """ StatusFormatted = None # string """ URL form for payments. """ PaymentFormUrl = None # string """ URL to your logo image. """ LogoUrl = None # string """ HTTP address of your website. """ Website = None # string """ True: Turn on or off ability to send mails under your brand. Otherwise, false """ EnablePrivateBranding = None # bool """ """ EnablePrivateBrandingCss = None # bool """ Address to your support. """ SupportLink = None # string """ Subdomain for your rebranded service """ PrivateBrandingUrl = None # string """ """ PrivateBrandingCssUrl = None # string """ First name. """ FirstName = None # string """ Last name. """ LastName = None # string """ Company name. """ Company = None # string """ First line of address. """ Address1 = None # string """ Second line of address. """ Address2 = None # string """ City. """ City = None # string """ State or province. """ State = None # string """ Zip/postal code. """ Zip = None # string """ Numeric ID of country. A file with the list of countries is available <a href="http://api.elasticemail.com/public/countries"><b>here</b></a> """ CountryID = None # int? """ Phone number """ Phone = None # string """ Proper email address. """ Email = None # string """ URL for affiliating. """ AffiliateLink = None # string """ Numeric reputation """ Reputation = None # double """ Amount of emails sent from this Account """ TotalEmailsSent = None # long """ Amount of emails sent from this Account """ MonthlyEmailsSent = None # long? """ Current credit in Account for Pay as you go plans. """ Credit = None # decimal """ Amount of email credits """ EmailCredits = None # int """ Amount of emails sent from this Account """ PricePerEmail = None # decimal """ Why your clients are receiving your emails. """ DeliveryReason = None # string """ URL for making payments. """ AccountPaymentUrl = None # string """ Address of SMTP server. """ Smtp = None # string """ Address of alternative SMTP server. """ SmtpAlternative = None # string """ Status of automatic payments configuration. """ AutoCreditStatus = None # string """ When AutoCreditStatus is Enabled, the credit level that triggers the credit to be recharged. """ AutoCreditLevel = None # decimal """ When AutoCreditStatus is Enabled, the amount of credit to be recharged. """ AutoCreditAmount = None # decimal """ Amount of emails Account can send daily """ DailySendLimit = None # int """ Creation date. """ DateCreated = None # DateTime """ True, if you have enabled link tracking. Otherwise, false """ LinkTracking = None # bool """ Type of content encoding """ ContentTransferEncoding = None # string """ Enable contact delivery and optimization tools on your Account. """ EnableContactFeatures = None # bool """ """ NeedsSMSVerification = None # bool """ """ IsGoogleAccount = None # bool """ Indicates if EE logo in the footer is required (ex. for trial account on older plan) """ IsEELogoRequired = None # bool """ """ DisableGlobalContacts = None # bool """ """ UntrustedDeviceAlertDisabled = None # bool """ Basic overview of your account """ class AccountOverview: """ Amount of emails sent from this Account """ TotalEmailsSent = None # long """ Current credit in Account for Pay as you go plans. """ Credit = None # decimal """ Cost of 1000 emails """ CostPerThousand = None # decimal """ Number of messages in progress """ InProgressCount = None # long """ Number of contacts currently with blocked status of Unsubscribed, Complaint, Bounced or InActive """ BlockedContactsCount = None # long """ Numeric reputation """ Reputation = None # double """ Number of contacts """ ContactCount = None # long """ Number of created campaigns """ CampaignCount = None # long """ Number of available templates """ TemplateCount = None # long """ Number of created Sub-Accounts """ SubAccountCount = None # long """ Number of active referrals """ ReferralCount = None # long """ Maximum allowed Contacts limit if it's a Sub-Account. """ MaxContacts = None # int """ Lists advanced sending options of your account. """ class AdvancedOptions: """ True, if you want to track clicks. Otherwise, false """ EnableClickTracking = None # bool """ True, if you want to track by link tracking. Otherwise, false """ EnableLinkClickTracking = None # bool """ True, if you want to use template scripting in your emails {{}}. Otherwise, false """ EnableTemplateScripting = None # bool """ True, if text BODY of message should be created automatically. Otherwise, false """ AutoTextFormat = None # bool """ True, if you want bounce notifications returned. Otherwise, false """ EmailNotificationForError = None # bool """ True, if you want to receive low credit email notifications. Otherwise, false """ LowCreditNotification = None # bool """ True, if this Account is a Sub-Account. Otherwise, false """ IsSubAccount = None # bool """ True, if this Account resells Elastic Email. Otherwise, false. """ IsOwnedByReseller = None # bool """ True, if you want to enable list-unsubscribe header. Otherwise, false """ EnableUnsubscribeHeader = None # bool """ True, if you want to display your labels on your unsubscribe form. Otherwise, false """ ManageSubscriptions = None # bool """ True, if you want to only display labels that the contact is subscribed to on your unsubscribe form. Otherwise, false """ ManageSubscribedOnly = None # bool """ True, if you want to display an option for the contact to opt into transactional email only on your unsubscribe form. Otherwise, false """ TransactionalOnUnsubscribe = None # bool """ """ ConsentTrackingOnUnsubscribe = None # bool """ """ PreviewMessageID = None # string """ True, if you want to apply custom headers to your emails. Otherwise, false """ AllowCustomHeaders = None # bool """ Email address to send a copy of all email to. """ BccEmail = None # string """ Type of content encoding """ ContentTransferEncoding = None # string """ True, if you want to receive bounce email notifications. Otherwise, false """ EmailNotification = None # string """ Email addresses to send a copy of all notifications from our system. Separated by semicolon """ NotificationsEmails = None # string """ Emails, separated by semicolon, to which the notification about contact unsubscribing should be sent to """ UnsubscribeNotificationEmails = None # string """ True, if Account has tooltips active. Otherwise, false """ EnableUITooltips = None # bool """ True, if you want to use Contact Delivery Tools. Otherwise, false """ EnableContactFeatures = None # bool """ URL to your logo image. """ LogoUrl = None # string """ (0 means this functionality is NOT enabled) Score, depending on the number of times you have sent to a recipient, at which the given recipient should be moved to the Stale status """ StaleContactScore = None # int """ (0 means this functionality is NOT enabled) Number of days of inactivity for a contact after which the given recipient should be moved to the Stale status """ StaleContactInactiveDays = None # int """ Why your clients are receiving your emails. """ DeliveryReason = None # string """ True, if you want to enable Dashboard Tutotials """ TutorialsEnabled = None # bool? """ """ DisableStoreContact = None # bool """ Blocked Contact - Contact returning Hard Bounces """ class BlockedContact: """ Proper email address. """ Email = None # string """ Status of the given resource """ Status = None # string """ RFC error message """ FriendlyErrorMessage = None # string """ Last change date """ DateUpdated = None # DateTime? """ Summary of bounced categories, based on specified date range. """ class BouncedCategorySummary: """ Number of messages marked as SPAM """ Spam = None # long """ Number of blacklisted messages """ BlackListed = None # long """ Number of messages flagged with 'No Mailbox' """ NoMailbox = None # long """ Number of messages flagged with 'Grey Listed' """ GreyListed = None # long """ Number of messages flagged with 'Throttled' """ Throttled = None # long """ Number of messages flagged with 'Timeout' """ Timeout = None # long """ Number of messages flagged with 'Connection Problem' """ ConnectionProblem = None # long """ Number of messages flagged with 'SPF Problem' """ SpfProblem = None # long """ Number of messages flagged with 'Account Problem' """ AccountProblem = None # long """ Number of messages flagged with 'DNS Problem' """ DnsProblem = None # long """ Number of messages flagged with 'WhiteListing Problem' """ WhitelistingProblem = None # long """ Number of messages flagged with 'Code Error' """ CodeError = None # long """ Number of messages flagged with 'Not Delivered' """ NotDelivered = None # long """ Number of manually cancelled messages """ ManualCancel = None # long """ Number of messages flagged with 'Connection terminated' """ ConnectionTerminated = None # long """ Campaign """ class Campaign: """ ID number of selected Channel. """ ChannelID = None # int? """ Campaign's name """ Name = None # string """ Name of campaign's status """ Status = None # ApiTypes.CampaignStatus """ List of Segment and List IDs, preceded with 'l' for Lists and 's' for Segments, comma separated """ Targets = None # string[] """ Number of event, triggering mail sending """ TriggerType = None # ApiTypes.CampaignTriggerType """ Date of triggered send """ TriggerDate = None # DateTime? """ How far into the future should the campaign be sent, in minutes """ TriggerDelay = None # double """ When your next automatic mail will be sent, in minutes """ TriggerFrequency = None # double """ How many times should the campaign be sent """ TriggerCount = None # int """ Which Channel's event should trigger this Campaign """ TriggerChannelID = None # int? """ """ TriggerChannelName = None # string """ Data for filtering event campaigns such as specific link addresses. """ TriggerData = None # string """ What should be checked for choosing the winner: opens or clicks """ SplitOptimization = None # ApiTypes.SplitOptimization """ Number of minutes between sends during optimization period """ SplitOptimizationMinutes = None # int """ """ TimingOption = None # int """ Should the opens be tracked? If no value has been provided, Account's default setting will be used. """ TrackOpens = None # bool? """ Should the clicks be tracked? If no value has been provided, Account's default setting will be used. """ TrackClicks = None # bool? """ """ CampaignTemplates = None # List<ApiTypes.CampaignTemplate> """ """ SendStats = None # bool """ Channel """ class CampaignChannel: """ ID number of selected Channel. """ ChannelID = None # int """ Filename """ Name = None # string """ True, if you are sending a campaign. Otherwise, false. """ IsCampaign = None # bool """ Name of your custom IP Pool to be used in the sending process """ PoolName = None # string """ Date of creation in YYYY-MM-DDThh:ii:ss format """ DateAdded = None # DateTime """ Name of campaign's status """ Status = None # ApiTypes.CampaignStatus """ Date of last activity on Account """ LastActivity = None # DateTime? """ Datetime of last action done on campaign. """ LastProcessed = None # DateTime? """ Id number of parent channel """ ParentChannelID = None # int """ """ ParentChannelName = None # string """ List of Segment and List IDs, preceded with 'l' for Lists and 's' for Segments, comma separated """ Targets = None # string[] """ Number of event, triggering mail sending """ TriggerType = None # ApiTypes.CampaignTriggerType """ Date of triggered send """ TriggerDate = None # DateTime? """ How far into the future should the campaign be sent, in minutes """ TriggerDelay = None # double """ When your next automatic mail will be sent, in minutes """ TriggerFrequency = None # double """ How many times should the campaign be sent """ TriggerCount = None # int """ Which Channel's event should trigger this Campaign """ TriggerChannelID = None # int """ """ TriggerChannelName = None # string """ Data for filtering event campaigns such as specific link addresses. """ TriggerData = None # string """ What should be checked for choosing the winner: opens or clicks """ SplitOptimization = None # ApiTypes.SplitOptimization """ Number of minutes between sends during optimization period """ SplitOptimizationMinutes = None # int """ """ TimingOption = None # int """ ID number of template. """ TemplateID = None # int? """ Name of template. """ TemplateName = None # string """ Default subject of email. """ TemplateSubject = None # string """ Default From: email address. """ TemplateFromEmail = None # string """ Default From: name. """ TemplateFromName = None # string """ Default Reply: email address. """ TemplateReplyEmail = None # string """ Default Reply: name. """ TemplateReplyName = None # string """ Total emails clicked """ ClickedCount = None # int """ Total emails opened. """ OpenedCount = None # int """ Overall number of recipients """ RecipientCount = None # int """ Total emails sent. """ SentCount = None # int """ Total emails failed. """ FailedCount = None # int """ Total emails unsubscribed """ UnsubscribedCount = None # int """ Abuses - mails sent to user without their consent """ FailedAbuse = None # int """ List of CampaignTemplate for sending A-X split testing. """ TemplateChannels = None # List<ApiTypes.CampaignChannel> """ Should the opens be tracked? If no value has been provided, Account's default setting will be used. """ TrackOpens = None # bool? """ Should the clicks be tracked? If no value has been provided, Account's default setting will be used. """ TrackClicks = None # bool? """ The utm_source marketing parameter appended to each link in the campaign. """ UtmSource = None # string """ The utm_medium marketing parameter appended to each link in the campaign. """ UtmMedium = None # string """ The utm_campaign marketing parameter appended to each link in the campaign. """ UtmCampaign = None # string """ The utm_content marketing parameter appended to each link in the campaign. """ UtmContent = None # string """ """ SendStats = None # bool """ """ class CampaignStatus(Enum): """ Campaign is logically deleted and not returned by API or interface calls. """ Deleted = -1 """ Campaign is curently active and available. """ Active = 0 """ Campaign is currently being processed for delivery. """ Processing = 1 """ Campaign is currently sending. """ Sending = 2 """ Campaign has completed sending. """ Completed = 3 """ Campaign is currently paused and not sending. """ Paused = 4 """ Campaign has been cancelled during delivery. """ Cancelled = 5 """ Campaign is save as draft and not processing. """ Draft = 6 """ """ class CampaignTemplate: """ """ CampaignTemplateID = None # int? """ """ CampaignTemplateName = None # string """ Name of campaign's status """ Status = None # ApiTypes.CampaignStatus """ Name of your custom IP Pool to be used in the sending process """ PoolName = None # string """ ID number of template. """ TemplateID = None # int? """ Name of template. """ TemplateName = None # string """ Default subject of email. """ TemplateSubject = None # string """ Default From: email address. """ TemplateFromEmail = None # string """ Default From: name. """ TemplateFromName = None # string """ Default Reply: email address. """ TemplateReplyEmail = None # string """ Default Reply: name. """ TemplateReplyName = None # string """ The utm_source marketing parameter appended to each link in the campaign. """ UtmSource = None # string """ The utm_medium marketing parameter appended to each link in the campaign. """ UtmMedium = None # string """ The utm_campaign marketing parameter appended to each link in the campaign. """ UtmCampaign = None # string """ The utm_content marketing parameter appended to each link in the campaign. """ UtmContent = None # string """ """ class CampaignTriggerType(Enum): """ """ SendNow = 1 """ """ FutureScheduled = 2 """ """ OnAdd = 3 """ """ OnOpen = 4 """ """ OnClick = 5 """ """ class CertificateValidationStatus(Enum): """ """ ErrorOccured = -2 """ """ CertNotSet = 0 """ """ Valid = 1 """ """ NotValid = 2 """ SMTP and HTTP API channel for grouping email delivery """ class Channel: """ Channel identifier. """ ChannelID = None # int """ Descriptive name of the channel. """ Name = None # string """ The date the channel was added to your account. """ DateAdded = None # DateTime """ The date the channel was last sent through. """ LastActivity = None # DateTime? """ The number of email jobs this channel has been used with. """ JobCount = None # int """ The number of emails that have been clicked within this channel. """ ClickedCount = None # int """ The number of emails that have been opened within this channel. """ OpenedCount = None # int """ The number of emails attempted to be sent within this channel. """ RecipientCount = None # int """ The number of emails that have been sent within this channel. """ SentCount = None # int """ The number of emails that have been bounced within this channel. """ FailedCount = None # int """ The number of emails that have been marked as abuse or complaint within this channel. """ FailedAbuse = None # int """ The number of emails that have been unsubscribed within this channel. """ UnsubscribedCount = None # int """ The number of emails that have been stopped. """ SuppressedCount = None # int """ Percentage of delivered emails out of all emails """ DeliveredPercentage = None # double """ Percentage of opened emails out of delivered emails """ OpenedPercentage = None # double """ Percentage of clicked emails out of delivered emails """ ClickedPercentage = None # double """ Percentage of failed emails out of all emails """ FailedPercentage = None # double """ Percentage of emails marked as abuse out of delivered emails """ FailedAbusePercentage = None # double """ Percentage of emails marked as unsubscribed out of delivered emails """ UnsubscribedPercentage = None # double """ Percentage of suppressed (not delivered) emails out of all emails """ SuppressedPercentage = None # double """ The total cost for emails/attachments within this channel. """ Cost = None # decimal """ FileResponse compression format """ class CompressionFormat(Enum): """ No compression """ EENone = 0 """ Zip compression """ Zip = 1 """ """ class ConsentTracking(Enum): """ """ Unknown = 0 """ """ Allow = 1 """ """ Deny = 2 """ Contact """ class Contact: """ """ ContactScore = None # int """ Date of creation in YYYY-MM-DDThh:ii:ss format """ DateAdded = None # DateTime """ Proper email address. """ Email = None # string """ First name. """ FirstName = None # string """ Last name. """ LastName = None # string """ Status of the given resource """ Status = None # ApiTypes.ContactStatus """ RFC Error code """ BouncedErrorCode = None # int? """ RFC error message """ BouncedErrorMessage = None # string """ Total emails sent. """ TotalSent = None # int """ Total emails failed. """ TotalFailed = None # int """ Total emails opened. """ TotalOpened = None # int """ Total emails clicked """ TotalClicked = None # int """ Date of first failed message """ FirstFailedDate = None # DateTime? """ Number of fails in sending to this Contact """ LastFailedCount = None # int """ Last change date """ DateUpdated = None # DateTime """ Date of last status change. """ StatusChangeDate = None # DateTime? """ Source of URL of payment """ Source = None # ApiTypes.ContactSource """ RFC Error code """ ErrorCode = None # int? """ RFC error message """ FriendlyErrorMessage = None # string """ IP address """ CreatedFromIP = None # string """ IP address of consent to send this contact(s) your email. If not provided your current public IP address is used for consent. """ ConsentIP = None # string """ Date of consent to send this contact(s) your email. If not provided current date is used for consent. """ ConsentDate = None # DateTime? """ Does the contant consent to have their tracking data stored. """ ConsentTracking = None # ApiTypes.ConsentTracking """ Unsubscribed date in YYYY-MM-DD format """ UnsubscribedDate = None # DateTime? """ Free form field of notes """ Notes = None # string """ Website of contact """ WebsiteUrl = None # string """ Date this contact last opened an email """ LastOpened = None # DateTime? """ """ LastClicked = None # DateTime? """ """ BounceCount = None # int """ """ LastSent = None # DateTime? """ """ LastIP = None # string """ Custom contact field like companyname, customernumber, city etc. JSON serialized text like { "city":"london" } """ CustomFields = None # Dictionary<string, string> """ Collection of lists and segments """ class ContactCollection: """ Lists which contain the requested contact """ Lists = None # List<ApiTypes.ContactContainer> """ Segments which contain the requested contact """ Segments = None # List<ApiTypes.ContactContainer> """ List's or segment's short info """ class ContactContainer: """ ID of the list/segment """ ID = None # int """ Name of the list/segment """ Name = None # string """ """ class ContactHistEventType(Enum): """ Contact opened an e-mail """ Opened = 2 """ Contact clicked an e-mail """ Clicked = 3 """ E-mail sent to the contact bounced """ Bounced = 10 """ Contact unsubscribed """ Unsubscribed = 11 """ Contact complained to an e-mail """ Complained = 12 """ Contact clicked an activation link """ Activated = 20 """ Contact has opted to receive Transactional-only e-mails """ TransactionalUnsubscribed = 21 """ Contact's status was changed manually """ ManualStatusChange = 22 """ An Activation e-mail was sent """ ActivationSent = 24 """ Contact was deleted """ Deleted = 28 """ History of chosen Contact """ class ContactHistory: """ ID of history of selected Contact. """ ContactHistoryID = None # long """ Type of event occured on this Contact. """ EventType = None # string """ Numeric code of event occured on this Contact. """ EventTypeValue = None # ApiTypes.ContactHistEventType """ Formatted date of event. """ EventDate = None # DateTime """ Name of selected channel. """ ChannelName = None # string """ Name of template. """ TemplateName = None # string """ IP Address of the event. """ IPAddress = None # string """ Country of the event. """ Country = None # string """ Information about the event """ Data = None # string """ """ class ContactSort(Enum): """ """ Unknown = 0 """ Sort by date added ascending order """ DateAddedAsc = 1 """ Sort by date added descending order """ DateAddedDesc = 2 """ Sort by date updated ascending order """ DateUpdatedAsc = 3 """ Sort by date updated descending order """ DateUpdatedDesc = 4 """ """ class ContactSource(Enum): """ Source of the contact is from sending an email via our SMTP or HTTP API's """ DeliveryApi = 0 """ Contact was manually entered from the interface. """ ManualInput = 1 """ Contact was uploaded via a file such as CSV. """ FileUpload = 2 """ Contact was added from a public web form. """ WebForm = 3 """ Contact was added from the contact api. """ ContactApi = 4 """ Contact was added via the verification api. """ VerificationApi = 5 """ Contacts were added via bulk verification api. """ FileVerificationApi = 6 """ """ class ContactStatus(Enum): """ Only transactional email can be sent to contacts with this status. """ Transactional = -2 """ Contact has had an open or click in the last 6 months. """ Engaged = -1 """ Contact is eligible to be sent to. """ Active = 0 """ Contact has had a hard bounce and is no longer eligible to be sent to. """ Bounced = 1 """ Contact has unsubscribed and is no longer eligible to be sent to. """ Unsubscribed = 2 """ Contact has complained and is no longer eligible to be sent to. """ Abuse = 3 """ Contact has not been activated or has been de-activated and is not eligible to be sent to. """ Inactive = 4 """ Contact has not been opening emails for a long period of time and is not eligible to be sent to. """ Stale = 5 """ Contact has not confirmed their double opt-in activation and is not eligible to be sent to. """ NotConfirmed = 6 """ Number of Contacts, grouped by Status; """ class ContactStatusCounts: """ Number of engaged contacts """ Engaged = None # long """ Number of active contacts """ Active = None # long """ Number of complaint messages """ Complaint = None # long """ Number of unsubscribed messages """ Unsubscribed = None # long """ Number of bounced messages """ Bounced = None # long """ Number of inactive contacts """ Inactive = None # long """ Number of transactional contacts """ Transactional = None # long """ """ Stale = None # long """ """ NotConfirmed = None # long """ Number of Unsubscribed or Complaint Contacts, grouped by Unsubscribe Reason; """ class ContactUnsubscribeReasonCounts: """ """ Unknown = None # long """ """ NoLongerWant = None # long """ """ IrrelevantContent = None # long """ """ TooFrequent = None # long """ """ NeverConsented = None # long """ """ DeceptiveContent = None # long """ """ AbuseReported = None # long """ """ ThirdParty = None # long """ """ ListUnsubscribe = None # long """ Daily summary of log status, based on specified date range. """ class DailyLogStatusSummary: """ Date in YYYY-MM-DDThh:ii:ss format """ Date = None # DateTime """ Proper email address. """ Email = None # int """ Number of SMS """ Sms = None # int """ Number of delivered messages """ Delivered = None # int """ Number of opened messages """ Opened = None # int """ Number of clicked messages """ Clicked = None # int """ Number of unsubscribed messages """ Unsubscribed = None # int """ Number of complaint messages """ Complaint = None # int """ Number of bounced messages """ Bounced = None # int """ Number of inbound messages """ Inbound = None # int """ Number of manually cancelled messages """ ManualCancel = None # int """ Number of messages flagged with 'Not Delivered' """ NotDelivered = None # int """ """ Suppressed = None # long """ Percentage of delivered emails out of all emails """ DeliveredPercentage = None # double """ Percentage of opened emails out of delivered emails """ OpenedPercentage = None # double """ Percentage of clicked emails out of delivered emails """ ClickedPercentage = None # double """ Percentage of failed emails out of all emails """ BouncedPercentage = None # double """ Percentage of emails marked as abuse out of delivered emails """ ComplaintPercentage = None # double """ Percentage of emails marked as unsubscribed out of delivered emails """ UnsubscribedPercentage = None # double """ Percentage of suppressed (not delivered) emails out of all emails """ SuppressedPercentage = None # double """ Domain data, with information about domain records. """ class DomainDetail: """ Name of selected domain. """ Domain = None # string """ True, if domain is used as default. Otherwise, false, """ DefaultDomain = None # bool """ True, if SPF record is verified """ Spf = None # bool """ True, if DKIM record is verified """ Dkim = None # bool """ True, if MX record is verified """ MX = None # bool """ """ DMARC = None # bool """ True, if tracking CNAME record is verified """ IsRewriteDomainValid = None # bool """ True, if DKIM, SPF, or tracking are still to be verified """ Verify = None # bool """ """ Type = None # ApiTypes.TrackingType """ 0 - Validated successfully, 1 - NotValidated , 2 - Invalid, 3 - Broken (tracking was frequnetly verfied in given period and still is invalid). For statuses: 0, 1, 3 tracking will be verified in normal periods. For status 2 tracking will be verified in high frequent periods. """ TrackingStatus = None # ApiTypes.TrackingValidationStatus """ """ CertificateStatus = None # ApiTypes.CertificateValidationStatus """ """ CertificateValidationError = None # string """ """ TrackingTypeUserRequest = None # ApiTypes.TrackingType? """ """ VERP = None # bool """ """ CustomBouncesDomain = None # string """ """ IsCustomBouncesDomainDefault = None # bool """ Detailed information about email credits """ class EmailCredits: """ Date in YYYY-MM-DDThh:ii:ss format """ Date = None # DateTime """ Amount of money in transaction """ Amount = None # decimal """ Source of URL of payment """ Source = None # string """ Free form field of notes """ Notes = None # string """ """ class EmailJobFailedStatus: """ """ Address = None # string """ """ Error = None # string """ RFC Error code """ ErrorCode = None # int """ """ Category = None # string """ """ class EmailJobStatus: """ ID number of your attachment """ ID = None # string """ Name of status: submitted, complete, in_progress """ Status = None # string """ """ RecipientsCount = None # int """ """ Failed = None # List<ApiTypes.EmailJobFailedStatus> """ Total emails failed. """ FailedCount = None # int """ """ Sent = None # List<string> """ Total emails sent. """ SentCount = None # int """ Number of delivered messages """ Delivered = None # List<string> """ """ DeliveredCount = None # int """ """ Pending = None # List<string> """ """ PendingCount = None # int """ Number of opened messages """ Opened = None # List<string> """ Total emails opened. """ OpenedCount = None # int """ Number of clicked messages """ Clicked = None # List<string> """ Total emails clicked """ ClickedCount = None # int """ Number of unsubscribed messages """ Unsubscribed = None # List<string> """ Total emails unsubscribed """ UnsubscribedCount = None # int """ """ AbuseReports = None # List<string> """ """ AbuseReportsCount = None # int """ List of all MessageIDs for this job. """ MessageIDs = None # List<string> """ """ class EmailSend: """ ID number of transaction """ TransactionID = None # string """ Unique identifier for this email. """ MessageID = None # string """ Status information of the specified email """ class EmailStatus: """ Email address this email was sent from. """ From = None # string """ Email address this email was sent to. """ To = None # string """ Date the email was submitted. """ Date = None # DateTime """ Value of email's status """ Status = None # ApiTypes.LogJobStatus """ Name of email's status """ StatusName = None # string """ Date of last status change. """ StatusChangeDate = None # DateTime """ Date when the email was sent """ DateSent = None # DateTime """ Date when the email changed the status to 'opened' """ DateOpened = None # DateTime? """ Date when the email changed the status to 'clicked' """ DateClicked = None # DateTime? """ Detailed error or bounced message. """ ErrorMessage = None # string """ ID number of transaction """ TransactionID = None # Guid """ Envelope from address """ EnvelopeFrom = None # string """ """ class EmailValidationResult: """ """ Account = None # string """ Name of selected domain. """ Domain = None # string """ Proper email address. """ Email = None # string """ """ SuggestedSpelling = None # string """ """ Disposable = None # bool """ """ Role = None # bool """ Reason for blocking (1 - bounced, 2 - unsubscribed, 3 - spam). """ Reason = None # string """ """ Result = None # ApiTypes.EmailValidationStatus """ """ class EmailValidationStatus(Enum): """ """ EENone = 0 """ """ Valid = 1 """ """ Unknown = 2 """ """ Risky = 3 """ """ Invalid = 4 """ Email details formatted in json """ class EmailView: """ Body (HTML, otherwise plain text) of email """ Body = None # string """ Default subject of email. """ Subject = None # string """ From email address """ From = None # string """ Encoding type for the email headers """ class EncodingType(Enum): """ Encoding of the email is provided by the sender and not altered. """ UserProvided = -1 """ No endcoding is set for the email. """ EENone = 0 """ Encoding of the email is in Raw7bit format. """ Raw7bit = 1 """ Encoding of the email is in Raw8bit format. """ Raw8bit = 2 """ Encoding of the email is in QuotedPrintable format. """ QuotedPrintable = 3 """ Encoding of the email is in Base64 format. """ Base64 = 4 """ Encoding of the email is in Uue format. """ Uue = 5 """ Event logs for selected date range """ class EventLog: """ Starting date for search in YYYY-MM-DDThh:mm:ss format. """ From = None # DateTime? """ Ending date for search in YYYY-MM-DDThh:mm:ss format. """ To = None # DateTime? """ Number of recipients """ Recipients = None # List<ApiTypes.RecipientEvent> """ Record of exported data from the system. """ class Export: """ ID of the exported file """ PublicExportID = None # Guid """ Date the export was created. """ DateAdded = None # DateTime """ Type of export """ ExportType = None # ApiTypes.ExportType """ Status of the export """ ExportStatus = None # ApiTypes.ExportStatus """ Long description of the export. """ Info = None # string """ Name of the exported file. """ Filename = None # string """ Link to download the export. """ Link = None # string """ Log start date (for Type = Log only). """ LogFrom = None # DateTime? """ Log end date (for Type = Log only). """ LogTo = None # DateTime? """ Format of the exported file. """ class ExportFileFormats(Enum): """ Export in comma separated values format. """ Csv = 1 """ Export in xml format. """ Xml = 2 """ Export in json format. """ Json = 3 """ """ class ExportLink: """ Direct URL to the exported file """ Link = None # string """ ID of the exported file """ PublicExportID = None # Guid """ Current status of the export. """ class ExportStatus(Enum): """ Export had an error and can not be downloaded. """ Error = -1 """ Export is currently loading and can not be downloaded. """ Loading = 0 """ Export is currently available for downloading. """ Ready = 1 """ Export is no longer available for downloading. """ Expired = 2 """ Type of export. """ class ExportType(Enum): """ Export contains detailed email log information. """ Log = 1 """ Export contains detailed contact information. """ Contact = 2 """ Export contains detailed campaign information. """ Campaign = 3 """ Export contains detailed link tracking information. """ LinkTracking = 4 """ Export contains detailed survey information. """ Survey = 5 """ File information """ class File: """ Name of your file including extension. """ FileName = None # string """ Size of your attachment (in bytes). """ Size = None # int? """ Date of creation in YYYY-MM-DDThh:ii:ss format """ DateAdded = None # DateTime """ Date when the file will be deleted from your Account. """ ExpirationDate = None # DateTime? """ Content type of the file. """ ContentType = None # string """ Lists inbound options of your account. """ class InboundOptions: """ URL used for tracking action of inbound emails """ HubCallbackUrl = None # string """ Domain you use as your inbound domain """ InboundDomain = None # string """ True, if you want inbound email to only process contacts from your Account. Otherwise, false """ InboundContactsOnly = None # bool """ """ class IntervalType(Enum): """ Daily overview """ Summary = 0 """ Hourly, detailed information """ Hourly = 1 """ Object containig tracking data. """ class LinkTrackingDetails: """ Number of items. """ Count = None # int """ True, if there are more detailed data available. Otherwise, false """ MoreAvailable = None # bool """ """ TrackedLink = None # List<ApiTypes.TrackedLink> """ List of Lists, with detailed data about its contents. """ class List: """ ID number of selected list. """ ListID = None # int """ Name of your list. """ ListName = None # string """ This count is no longer supported and will always be 0. Use /contact/count instead. """ Count = None # int """ ID code of list. Please note that this is different from the listid field. """ PublicListID = None # Guid? """ Date of creation in YYYY-MM-DDThh:ii:ss format """ DateAdded = None # DateTime """ True: Allow unsubscribing from this list. Otherwise, false """ AllowUnsubscribe = None # bool """ Query used for filtering. """ Rule = None # string """ """ TrackHistory = None # bool """ Logs for selected date range """ class Log: """ Starting date for search in YYYY-MM-DDThh:mm:ss format. """ From = None # DateTime? """ Ending date for search in YYYY-MM-DDThh:mm:ss format. """ To = None # DateTime? """ Number of recipients """ Recipients = None # List<ApiTypes.Recipient> """ """ class LogEventStatus(Enum): """ Email is queued for sending. """ ReadyToSend = 1 """ Email has soft bounced and is scheduled to retry. """ WaitingToRetry = 2 """ Email is currently sending. """ Sending = 3 """ Email has errored or bounced for some reason. """ Error = 4 """ Email has been successfully delivered. """ Sent = 5 """ Email has been opened by the recipient. """ Opened = 6 """ Email has had at least one link clicked by the recipient. """ Clicked = 7 """ Email has been unsubscribed by the recipient. """ Unsubscribed = 8 """ Email has been complained about or marked as spam by the recipient. """ AbuseReport = 9 """ """ class LogJobStatus(Enum): """ All emails """ All = 0 """ Email has been submitted successfully and is queued for sending. """ ReadyToSend = 1 """ Email has soft bounced and is scheduled to retry. """ WaitingToRetry = 2 """ Email is currently sending. """ Sending = 3 """ Email has errored or bounced for some reason. """ Error = 4 """ Email has been successfully delivered. """ Sent = 5 """ Email has been opened by the recipient. """ Opened = 6 """ Email has had at least one link clicked by the recipient. """ Clicked = 7 """ Email has been unsubscribed by the recipient. """ Unsubscribed = 8 """ Email has been complained about or marked as spam by the recipient. """ AbuseReport = 9 """ Summary of log status, based on specified date range. """ class LogStatusSummary: """ Starting date for search in YYYY-MM-DDThh:mm:ss format. """ From = None # DateTime """ Ending date for search in YYYY-MM-DDThh:mm:ss format. """ To = None # DateTime """ Overall duration """ Duration = None # double """ Number of recipients """ Recipients = None # long """ Number of emails """ EmailTotal = None # long """ Number of SMS """ SmsTotal = None # long """ Number of delivered messages """ Delivered = None # long """ Number of bounced messages """ Bounced = None # long """ Number of messages in progress """ InProgress = None # long """ """ WaitingToRetry = None # long """ """ ReadyToSend = None # long """ Number of opened messages """ Opened = None # long """ Number of clicked messages """ Clicked = None # long """ Number of unsubscribed messages """ Unsubscribed = None # long """ Number of complaint messages """ Complaints = None # long """ Number of inbound messages """ Inbound = None # long """ Number of manually cancelled messages """ ManualCancel = None # long """ Number of messages flagged with 'Not Delivered' """ NotDelivered = None # long """ """ NotDeliveredCancelled = None # long """ """ Suppressed = None # long """ ID number of template used """ TemplateChannel = None # bool """ Percentage of delivered emails out of all emails """ DeliveredPercentage = None # double """ Percentage of opened emails out of delivered emails """ OpenedPercentage = None # double """ Percentage of clicked emails out of delivered emails """ ClickedPercentage = None # double """ Percentage of failed emails out of all emails """ FailedPercentage = None # double """ Percentage of emails marked as abuse out of delivered emails """ FailedAbusePercentage = None # double """ Percentage of emails marked as unsubscribed out of delivered emails """ UnsubscribedPercentage = None # double """ Percentage of suppressed (not delivered) emails out of all emails """ SuppressedPercentage = None # double """ Overall log summary information. """ class LogSummary: """ Summary of log status, based on specified date range. """ LogStatusSummary = None # ApiTypes.LogStatusSummary """ Summary of bounced categories, based on specified date range. """ BouncedCategorySummary = None # ApiTypes.BouncedCategorySummary """ Daily summary of log status, based on specified date range. """ DailyLogStatusSummary = None # List<ApiTypes.DailyLogStatusSummary> """ """ SubaccountSummary = None # ApiTypes.SubaccountSummary """ """ class MessageCategory(Enum): """ """ Unknown = 0 """ """ Ignore = 1 """ Number of messages marked as SPAM """ Spam = 2 """ Number of blacklisted messages """ BlackListed = 3 """ Number of messages flagged with 'No Mailbox' """ NoMailbox = 4 """ Number of messages flagged with 'Grey Listed' """ GreyListed = 5 """ Number of messages flagged with 'Throttled' """ Throttled = 6 """ Number of messages flagged with 'Timeout' """ Timeout = 7 """ Number of messages flagged with 'Connection Problem' """ ConnectionProblem = 8 """ Number of messages flagged with 'SPF Problem' """ SPFProblem = 9 """ Number of messages flagged with 'Account Problem' """ AccountProblem = 10 """ Number of messages flagged with 'DNS Problem' """ DNSProblem = 11 """ """ NotDeliveredCancelled = 12 """ Number of messages flagged with 'Code Error' """ CodeError = 13 """ Number of manually cancelled messages """ ManualCancel = 14 """ Number of messages flagged with 'Connection terminated' """ ConnectionTerminated = 15 """ Number of messages flagged with 'Not Delivered' """ NotDelivered = 16 """ """ class NotificationType(Enum): """ Both, email and web, notifications """ All = 0 """ Only email notifications """ Email = 1 """ Only web notifications """ Web = 2 """ Detailed information about existing money transfers. """ class Payment: """ Date in YYYY-MM-DDThh:ii:ss format """ Date = None # DateTime """ Amount of money in transaction """ Amount = None # decimal """ """ RegularAmount = None # decimal """ """ DiscountPercent = None # decimal """ Source of URL of payment """ Source = None # string """ Basic information about your profile """ class Profile: """ First name. """ FirstName = None # string """ Last name. """ LastName = None # string """ Company name. """ Company = None # string """ First line of address. """ Address1 = None # string """ Second line of address. """ Address2 = None # string """ City. """ City = None # string """ State or province. """ State = None # string """ Zip/postal code. """ Zip = None # string """ Numeric ID of country. A file with the list of countries is available <a href="http://api.elasticemail.com/public/countries"><b>here</b></a> """ CountryID = None # int? """ Two letter ISO 3166-1 code of the country """ CountryISO = None # string """ Phone number """ Phone = None # string """ Proper email address. """ Email = None # string """ Code used for tax purposes. """ TaxCode = None # string """ Why your clients are receiving your emails. """ DeliveryReason = None # string """ True if you want to receive newsletters from Elastic Email. Otherwise, false. Empty to leave the current value. """ MarketingConsent = None # bool? """ HTTP address of your website. """ Website = None # string """ URL to your logo image. """ LogoUrl = None # string """ Detailed information about message recipient """ class Recipient: """ True, if message is SMS. Otherwise, false """ IsSms = None # bool """ ID number of selected message. """ MsgID = None # string """ Ending date for search in YYYY-MM-DDThh:mm:ss format. """ To = None # string """ Name of recipient's status: Submitted, ReadyToSend, WaitingToRetry, Sending, Bounced, Sent, Opened, Clicked, Unsubscribed, AbuseReport """ Status = None # string """ Name of selected Channel. """ Channel = None # string """ Creation date """ Date = None # DateTime """ Date when the email was sent """ DateSent = None # DateTime? """ Date when the email changed the status to 'opened' """ DateOpened = None # DateTime? """ Date when the email changed the status to 'clicked' """ DateClicked = None # DateTime? """ Content of message, HTML encoded """ Message = None # string """ True, if message category should be shown. Otherwise, false """ ShowCategory = None # bool """ Name of message category """ MessageCategory = None # string """ ID of message category """ MessageCategoryID = None # ApiTypes.MessageCategory? """ Date of last status change. """ StatusChangeDate = None # DateTime """ Date of next try """ NextTryOn = None # DateTime? """ Default subject of email. """ Subject = None # string """ Default From: email address. """ FromEmail = None # string """ """ EnvelopeFrom = None # string """ ID of certain mail job """ JobID = None # string """ True, if message is a SMS and status is not yet confirmed. Otherwise, false """ SmsUpdateRequired = None # bool """ Content of message """ TextMessage = None # string """ Comma separated ID numbers of messages. """ MessageSid = None # string """ Recipient's last bounce error because of which this e-mail was suppressed """ ContactLastError = None # string """ """ IPAddress = None # string """ Detailed information about message recipient """ class RecipientEvent: """ ID of certain mail job """ JobID = None # string """ ID number of selected message. """ MsgID = None # string """ Default From: email address. """ FromEmail = None # string """ Ending date for search in YYYY-MM-DDThh:mm:ss format. """ To = None # string """ Default subject of email. """ Subject = None # string """ Name of recipient's status: Submitted, ReadyToSend, WaitingToRetry, Sending, Bounced, Sent, Opened, Clicked, Unsubscribed, AbuseReport """ EventType = None # string """ Creation date """ EventDate = None # DateTime """ Name of selected Channel. """ Channel = None # string """ ID number of selected Channel. """ ChannelID = None # int? """ Name of message category """ MessageCategory = None # string """ Date of next try """ NextTryOn = None # DateTime? """ Content of message, HTML encoded """ Message = None # string """ """ IPAddress = None # string """ """ IPPoolName = None # string """ Referral details for this account. """ class Referral: """ Current amount of dolars you have from referring. """ CurrentReferralCredit = None # decimal """ Number of active referrals. """ CurrentReferralCount = None # long """ Detailed sending reputation of your account. """ class ReputationDetail: """ Overall reputation impact, based on the most important factors. """ Impact = None # ApiTypes.ReputationImpact """ Percent of Complaining users - those, who do not want to receive email from you. """ AbusePercent = None # double """ Percent of Unknown users - users that couldn't be found """ UnknownUsersPercent = None # double """ """ OpenedPercent = None # double """ """ ClickedPercent = None # double """ Penalty from messages marked as spam. """ AverageSpamScore = None # double """ Percent of Bounced users """ FailedSpamPercent = None # double """ Reputation history of your account. """ class ReputationHistory: """ Creation date. """ DateCreated = None # DateTime """ Percent of Complaining users - those, who do not want to receive email from you. """ AbusePercent = None # double """ Percent of Unknown users - users that couldn't be found """ UnknownUsersPercent = None # double """ """ OpenedPercent = None # double """ """ ClickedPercent = None # double """ Penalty from messages marked as spam. """ AverageSpamScore = None # double """ Points from proper setup of your Account """ SetupScore = None # double """ Number of emails included in the current reputation score. """ RepEmailsSent = None # double """ Numeric reputation """ Reputation = None # double """ Overall reputation impact, based on the most important factors. """ class ReputationImpact: """ Abuses - mails sent to user without their consent """ Abuse = None # double """ Users, that could not be reached. """ UnknownUsers = None # double """ Number of opened messages """ Opened = None # double """ Number of clicked messages """ Clicked = None # double """ Penalty from messages marked as spam. """ AverageSpamScore = None # double """ Content analysis. """ ServerFilter = None # double """ Information about Contact Segment, selected by RULE. """ class Segment: """ ID number of your segment. """ SegmentID = None # int """ Filename """ Name = None # string """ Query used for filtering. """ Rule = None # string """ This count is no longer supported and will always be 0. Use /contact/count instead. """ LastCount = None # long """ """ TrackHistory = None # bool """ History of segment information. """ History = None # List<ApiTypes.SegmentHistory> """ Segment History """ class SegmentHistory: """ ID number of history. """ SegmentHistoryID = None # int """ ID number of your segment. """ SegmentID = None # int """ Date in YYYY-MM-DD format """ Day = None # int """ Number of items. """ Count = None # long """ Controls the Sub-Account's sending permissions. Main Account's always have All. """ class SendingPermission(Enum): """ Sending not allowed. """ EENone = 0 """ Allow sending via SMTP only. """ Smtp = 1 """ Allow sending via HTTP API only. """ HttpApi = 2 """ Allow sending via SMTP and HTTP API. """ SmtpAndHttpApi = 3 """ Allow sending via the website interface only. """ Interface = 4 """ Allow sending via SMTP and the website interface. """ SmtpAndInterface = 5 """ Allow sendnig via HTTP API and the website interface. """ HttpApiAndInterface = 6 """ Use access level sending permission. """ UseAccessLevel = 16 """ Sending allowed via SMTP, HTTP API and the website interface. """ All = 255 """ Spam check of specified message. """ class SpamCheck: """ Total spam score from """ TotalScore = None # string """ Date in YYYY-MM-DDThh:ii:ss format """ Date = None # DateTime? """ Default subject of email. """ Subject = None # string """ Default From: email address. """ FromEmail = None # string """ ID number of selected message. """ MsgID = None # string """ Name of selected channel. """ ChannelName = None # string """ """ Rules = None # List<ApiTypes.SpamRule> """ Single spam score """ class SpamRule: """ Spam score """ Score = None # string """ Name of rule """ Key = None # string """ Description of rule. """ Description = None # string """ """ class SplitOptimization(Enum): """ Number of opened messages """ Opened = 0 """ Number of clicked messages """ Clicked = 1 """ Detailed information about Sub-Account. """ class SubAccount: """ Public key for limited access to your Account such as contact/add so you can use it safely on public websites. """ PublicAccountID = None # string """ Proper email address. """ Email = None # string """ ID number of mailer """ MailerID = None # string """ Name of your custom IP Pool to be used in the sending process """ PoolName = None # string """ Date of last activity on Account """ LastActivity = None # DateTime """ Amount of email credits """ EmailCredits = None # string """ True, if Account needs credits to send emails. Otherwise, false """ RequiresEmailCredits = None # bool """ Amount of credits added to Account automatically """ MonthlyRefillCredits = None # double """ True, if Account can request for private IP on its own. Otherwise, false """ EnablePrivateIPRequest = None # bool """ Amount of emails sent from this Account """ TotalEmailsSent = None # long """ Percent of Unknown users - users that couldn't be found """ UnknownUsersPercent = None # double """ Percent of Complaining users - those, who do not want to receive email from you. """ AbusePercent = None # double """ Percent of Bounced users """ FailedSpamPercent = None # double """ Numeric reputation """ Reputation = None # double """ Amount of emails Account can send daily """ DailySendLimit = None # long """ Account's current status. """ Status = None # string """ Maximum size of email including attachments in MB's """ EmailSizeLimit = None # int """ Maximum number of contacts the Account can have """ MaxContacts = None # int """ Sending permission setting for Account """ SendingPermission = None # ApiTypes.SendingPermission """ """ HasModify2FA = None # bool """ """ ContactsCount = None # int """ Detailed settings of Sub-Account. """ class SubAccountSettings: """ Proper email address. """ Email = None # string """ True, if Account needs credits to send emails. Otherwise, false """ RequiresEmailCredits = None # bool """ Amount of credits added to Account automatically """ MonthlyRefillCredits = None # double """ Maximum size of email including attachments in MB's """ EmailSizeLimit = None # int """ Amount of emails Account can send daily """ DailySendLimit = None # int """ Maximum number of contacts the Account can have """ MaxContacts = None # int """ True, if Account can request for private IP on its own. Otherwise, false """ EnablePrivateIPRequest = None # bool """ True, if you want to use Contact Delivery Tools. Otherwise, false """ EnableContactFeatures = None # bool """ Sending permission setting for Account """ SendingPermission = None # ApiTypes.SendingPermission """ Name of your custom IP Pool to be used in the sending process """ PoolName = None # string """ Public key for limited access to your Account such as contact/add so you can use it safely on public websites. """ PublicAccountID = None # string """ True, if you want to allow two-factor authentication. Otherwise, false. """ Allow2FA = None # bool? """ """ class SubaccountSummary: """ """ EmailsSentToday = None # int """ """ EmailsSentThisMonth = None # int """ Add-on support options for your Account. """ class SupportPlan(Enum): """ Free support. """ Free = 0 """ In-app support option for $1/day. """ Priority = 1 """ In-app real-time chat support option for $7/day. """ Premium = 2 """ """ class TagType(Enum): """ """ Template = 0 """ """ LandingPage = 1 """ Template """ class Template: """ ID number of template. """ TemplateID = None # int """ 0 for API connections """ TemplateType = None # ApiTypes.TemplateType """ Filename """ Name = None # string """ Date of creation in YYYY-MM-DDThh:ii:ss format """ DateAdded = None # DateTime """ CSS style """ Css = None # string """ Default subject of email. """ Subject = None # string """ Default From: email address. """ FromEmail = None # string """ Default From: name. """ FromName = None # string """ HTML code of email (needs escaping). """ BodyHtml = None # string """ AMP code of email (needs escaping). """ BodyAmp = None # string """ Text body of email. """ BodyText = None # string """ ID number of original template. """ OriginalTemplateID = None # int """ """ OriginalTemplateName = None # string """ Enum: 0 - private, 1 - public, 2 - mockup """ TemplateScope = None # ApiTypes.TemplateScope """ Template's Tags """ Tags = None # List<string> """ List of templates (including drafts) """ class TemplateList: """ List of templates """ Templates = None # List<ApiTypes.Template> """ Total of templates """ TemplatesCount = None # int """ List of draft templates """ DraftTemplate = None # List<ApiTypes.Template> """ Total of draft templates """ DraftTemplatesCount = None # int """ """ class TemplateOrder(Enum): """ """ DateAddedAscending = 0 """ """ DateAddedDescending = 1 """ """ NameAscending = 2 """ """ NameDescending = 3 """ """ DateModifiedAscending = 4 """ """ DateModifiedDescending = 5 """ """ class TemplateScope(Enum): """ Template is available for this account only. """ Private = 0 """ Template is available for this account and it's sub-accounts. """ Public = 1 """ Template is a temporary draft, not to be used permanently. """ Draft = 2 """ Tag used for tagging multiple Templates """ class TemplateTag: """ Tag's value """ Name = None # string """ Tag type """ Type = None # ApiTypes.TagType """ A list of your personal and global Template Tags """ class TemplateTagList: """ List of personal Tags """ Tags = None # List<ApiTypes.TemplateTag> """ List of globally available Tags """ GlobalTags = None # List<ApiTypes.TemplateTag> """ """ class TemplateType(Enum): """ Template supports any valid HTML """ RawHTML = 0 """ Template is created for email and can only be modified in the drag and drop email editor """ DragDropEditor = 1 """ Template is created for landing page and can only be modified in the drag and drop langing page editor """ LandingPageEditor = 2 """ Information about tracking link and its clicks. """ class TrackedLink: """ URL clicked """ Link = None # string """ Number of clicks """ Clicks = None # string """ Percent of clicks """ Percent = None # string """ HTTP or HTTPS Protocal used for link tracking. """ class TrackingType(Enum): """ Tracking protocal that is not encrypted. """ Http = 0 """ Tracking protocal using an external SSL Certificate for encryption. """ ExternalHttps = 1 """ Tracking protocal using an internal SSL Certificate for encyrption. """ InternalCertHttps = 2 """ Tracking protocal using LetsEncrypt Certificate for encryption. """ LetsEncryptCert = 3 """ Status of ValidDomain to determine how often tracking validation should be performed. """ class TrackingValidationStatus(Enum): """ """ Validated = 0 """ """ NotValidated = 1 """ """ Invalid = 2 """ """ Broken = 3 """ Account usage """ class Usage: """ Proper email address. """ Email = None # string """ True, if this Account is a Sub-Account. Otherwise, false """ IsSubAccount = None # bool """ """ List = None # List<ApiTypes.UsageData> """ Detailed data about daily usage """ class UsageData: """ Date in YYYY-MM-DDThh:ii:ss format """ Date = None # DateTime """ Number of finished tasks """ JobCount = None # int """ Overall number of recipients """ RecipientCount = None # int """ Number of inbound emails """ InboundCount = None # int """ Number of attachments sent """ AttachmentCount = None # int """ Size of attachments sent """ AttachmentsSize = None # long """ Calculated cost of sending """ Cost = None # decimal """ Number of pricate IPs """ PrivateIPCount = None # int? """ """ PrivateIPCost = None # decimal """ Number of SMS """ SmsCount = None # int? """ Overall cost of SMS """ SmsCost = None # decimal """ Cost of email credits """ EmailCreditsCost = None # int? """ Daily cost of Contact Delivery Tools """ ContactCost = None # decimal """ Number of contacts """ ContactCount = None # long """ """ SupportCost = None # decimal """ """ EmailCost = None # decimal """ """ VerificationCost = None # decimal """ """ VerificationCount = None # int """ """ InboundEmailCost = None # decimal """ """ InboundEmailCount = None # int """ """ class ValidationError: """ """ TXTRecord = None # string """ """ Error = None # string """ """ class ValidationStatus: """ """ IsValid = None # bool """ """ Errors = None # List<ApiTypes.ValidationError> """ """ Log = None # string """ """ class ValidEmail: """ """ ValidEmailID = None # int """ Proper email address. """ Email = None # string """ """ Validated = None # bool """ Notification webhook setting """ class Webhook: """ Public webhook ID """ WebhookID = None # string """ Filename """ Name = None # string """ Creation date. """ DateCreated = None # DateTime? """ Last change date """ DateUpdated = None # DateTime? """ URL of notification. """ URL = None # string """ """ NotifyOncePerEmail = None # bool """ """ NotificationForSent = None # bool """ """ NotificationForOpened = None # bool """ """ NotificationForClicked = None # bool """ """ NotificationForUnsubscribed = None # bool """ """ NotificationForAbuseReport = None # bool """ """ NotificationForError = None # bool """ Lists web notification options of your account. """ class WebNotificationOptions: """ URL address to receive web notifications to parse and process. """ WebNotificationUrl = None # string """ True, if you want to send web notifications for sent email. Otherwise, false """ WebNotificationForSent = None # bool """ True, if you want to send web notifications for opened email. Otherwise, false """ WebNotificationForOpened = None # bool """ True, if you want to send web notifications for clicked email. Otherwise, false """ WebNotificationForClicked = None # bool """ True, if you want to send web notifications for unsubscribed email. Otherwise, false """ WebnotificationForUnsubscribed = None # bool """ True, if you want to send web notifications for complaint email. Otherwise, false """ WebNotificationForAbuse = None # bool """ True, if you want to send web notifications for bounced email. Otherwise, false """ WebNotificationForError = None # bool """ True, if you want to receive notifications for each type only once per email. Otherwise, false """ WebNotificationNotifyOncePerEmail = None # bool """ Manage your AccessTokens (ApiKeys) """ """ Methods for managing your account and subaccounts. """ """ Manage all aspects of your Campaigns. """ """ Manage SMTP and HTTP API Channels for grouping email delivery. """ """ Methods used to manage your Contacts. """ """ Manage sending domains and verify DNS configurations. """ """ Send your emails and see their statuses """ """ Manage all of the exported data from the system. """ """ Manage the files on your account """ """ API methods for managing your Lists """ """ Methods to check logs of your campaigns """ """ Manages your segments - dynamically created lists of contacts """ """ Send SMS text messages to your clients. """ """ Managing and editing templates of your emails """ """ Managing sender emails. """
[ 37811, 198, 464, 17168, 13789, 357, 36393, 8, 198, 198, 15269, 357, 66, 8, 1584, 12, 5539, 48567, 9570, 11, 3457, 13, 198, 198, 5990, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 1659, 428,...
2.753885
26,703
from typing import Optional from typing import Tuple import numpy as np import onnx from tests.utils.common import check_onnx_model from tests.utils.common import make_model_from_nodes
[ 6738, 19720, 1330, 32233, 198, 6738, 19720, 1330, 309, 29291, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 319, 77, 87, 198, 198, 6738, 5254, 13, 26791, 13, 11321, 1330, 2198, 62, 261, 77, 87, 62, 19849, 198, 6738, 5254, 13, ...
3.392857
56
# vehicles.py # Copyright 2020, Brigham Young University - Idaho. All rights reserved. # If this file was executed like this: # > python teach_solution.py # then call the main function. However, if this file # was simply imported, then skip the call to main. if __name__ == "__main__": main()
[ 2, 5672, 13, 9078, 198, 2, 15069, 12131, 11, 37434, 6960, 2059, 532, 20071, 13, 1439, 2489, 10395, 13, 628, 198, 2, 1002, 428, 2393, 373, 10945, 588, 428, 25, 198, 2, 1875, 21015, 4545, 62, 82, 2122, 13, 9078, 198, 2, 788, 869, ...
3.559524
84
import tweepy import sys import json #Twitter API credentials consumer_key = "" consumer_secret = "" access_key = "" access_secret = "" if __name__ == '__main__': #pass in the username of the account you want to download get_all_tweets(sys.argv[1])
[ 198, 11748, 4184, 538, 88, 198, 11748, 25064, 198, 11748, 33918, 198, 198, 2, 14254, 7824, 18031, 198, 49827, 62, 2539, 796, 13538, 198, 49827, 62, 21078, 796, 13538, 198, 15526, 62, 2539, 796, 13538, 198, 15526, 62, 21078, 796, 13538, ...
3
85
from unittest import TestCase from dependency_injector import providers from pet_store.services.unit_of_work import uow_provider from tests.e2e.utils.testing_infrastructure import ( get_session, destroy_testing_db, create_testing_db, uow_factory, ) # Changing the UnitOfWorkFactory using the dependency_injector library uow_provider.override(providers.Callable(uow_factory))
[ 6738, 555, 715, 395, 1330, 6208, 20448, 198, 198, 6738, 20203, 62, 259, 752, 273, 1330, 9549, 198, 198, 6738, 4273, 62, 8095, 13, 30416, 13, 20850, 62, 1659, 62, 1818, 1330, 334, 322, 62, 15234, 1304, 198, 6738, 5254, 13, 68, 17, ...
2.969925
133
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 6738, 42625, 14208, 13, 10414, 1330, ...
3.111111
45
N = int(input()) S = input() result = 0 for i in range(10): a = S.find(str(i)) if a == -1: continue for j in range(10): b = S.find(str(j), a + 1) if b == -1: continue for k in range(10): if S.find(str(k), b + 1) != -1: result += 1 print(result)
[ 45, 796, 493, 7, 15414, 28955, 198, 50, 796, 5128, 3419, 198, 198, 20274, 796, 657, 198, 1640, 1312, 287, 2837, 7, 940, 2599, 198, 220, 220, 220, 257, 796, 311, 13, 19796, 7, 2536, 7, 72, 4008, 198, 220, 220, 220, 611, 257, 6624...
1.774194
186
from recsys.datamodel.data import Data from recsys.algorithm.factorize import SVD from .base import BaseAlgorithm from recommends.converters import convert_vote_list_to_itemprefs
[ 6738, 664, 17597, 13, 19608, 321, 375, 417, 13, 7890, 1330, 6060, 198, 6738, 664, 17597, 13, 282, 42289, 13, 31412, 1096, 1330, 311, 8898, 198, 6738, 764, 8692, 1330, 7308, 2348, 42289, 198, 6738, 20829, 13, 1102, 332, 1010, 1330, 103...
3.396226
53
import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, list_output @click.command('get_invocations') @pass_context @custom_exception @list_output def cli(ctx): """Get a list containing all workflow invocations. Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] """ return ctx.gi.invocations.get_invocations()
[ 11748, 3904, 198, 6738, 1582, 2363, 13, 44506, 1330, 1208, 62, 22866, 11, 33918, 62, 46030, 198, 6738, 1582, 2363, 13, 12501, 273, 2024, 1330, 2183, 62, 1069, 4516, 11, 1351, 62, 22915, 628, 198, 31, 12976, 13, 21812, 10786, 1136, 62,...
2.070822
353
from django.contrib import admin from cookbook.ingredients.models import Category, Ingredient admin.site.register(Category) admin.site.register(Ingredient)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 4255, 2070, 13, 278, 23320, 13, 27530, 1330, 21743, 11, 17589, 445, 1153, 198, 198, 28482, 13, 15654, 13, 30238, 7, 27313, 8, 198, 28482, 13, 15654, 13, 30238, 7, 27682, 445, ...
3.488889
45
if __name__ == "__main__": print('Insert your text:') text = input() print('Insert your pattern:') pattern = input() alphabet = ''.join(set(text)) transitions = compute_transition_function(pattern, alphabet) finite_automaton_matcher(text, transitions, len(pattern))
[ 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 3601, 10786, 44402, 534, 2420, 25, 11537, 198, 220, 220, 220, 2420, 796, 5128, 3419, 198, 220, 220, 220, 3601, 10786, 44402, 534, 3912, 25, 11537, 198, ...
2.920792
101
import requests import json import sys from datetime import datetime, timedelta from disney_auth import get_header from disney_parks import Park from disney_points_of_interest import PointOfInterest import re
[ 11748, 7007, 198, 11748, 33918, 198, 11748, 25064, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 595, 1681, 62, 18439, 1330, 651, 62, 25677, 198, 6738, 595, 1681, 62, 1845, 591, 1330, 3250, 198, 6738, 595, 1681, ...
3.8
55
""" Item 29: Avoid Repeated Work in Comprehensions by Using Assignment Expressions """ stock = { 'nails': 125, 'screws': 35, 'wingnuts': 8, 'washers': 24, } order = ['screws', 'wingnuts', 'clips'] result = {} for name in order: count = stock.get(name, 0) batches = get_batches(count, 8) if batches: result[name] = batches print(f'result: {result}') # Use a dictionary comprehension to shorten this code. found = {name: get_batches(stock.get(name, 0), 8) for name in order if get_batches(stock.get(name, 0), 8)} print(f'found: {found}') # To avoid the repeated code above we can use the walrus operator. # Note that the assignment is made in the condition since this is evaluated first. # If the assignment is made in the value expression it will cause an NameError. found_better = {name: batches for name in order if (batches := get_batches(stock.get(name, 0), 8))} print(f'found_better: {found}') # One other advantage of the comprehensions is that they avoid the leakage caused by looping. # This example leaks because of the assignment operator. half = [(last := count // 2) for count in stock.values()] print(f'Last item of {half} is {last}') # This example leaks. for count in stock.values(): pass print(f'Last item of {list(stock.values())} is {count}') # This example has a loop variable in a comprehension and does not leak. half = [count_comp // 2 for count_comp in stock.values()] print(f'half = {half}') try: count_comp except NameError: print('Oops! name \'count_comp\' is not defined') # An assignment expression also works with generator expressions found = ((name, batches) for name in order if (batches := get_batches(stock.get(name, 0), 8))) print(f'next(found): {next(found)}') print(f'next(found): {next(found)}')
[ 37811, 198, 7449, 2808, 25, 220, 24390, 30558, 515, 5521, 287, 3082, 7345, 507, 416, 8554, 50144, 10604, 507, 198, 198, 37811, 198, 198, 13578, 796, 1391, 198, 197, 6, 77, 1768, 10354, 220, 13151, 11, 198, 197, 338, 42276, 82, 10354, ...
3.074783
575
"""The Graph model""" __docformat__ = "numpy" import datetime import logging import pandas as pd import requests from gamestonk_terminal.cryptocurrency.dataframe_helpers import ( very_long_number_formatter, ) from gamestonk_terminal.decorators import log_start_end logger = logging.getLogger(__name__) UNI_URL = "https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2" SWAPS_FILTERS = ["timestamp", "token0", "token1", "amountUSD"] POOLS_FILTERS = [ "volumeUSD", "token0.name", "token0.symbol", "token1.name", "token1.symbol", "volumeUSD", "txCount", ] TOKENS_FILTERS = [ "index", "symbol", "name", "tradeVolumeUSD", "totalLiquidity", "txCount", ] PAIRS_FILTERS = [ "created", "pair", "token0", "token1", "volumeUSD", "txCount", "totalSupply", ] # TODO: convert USD values to int. otherwise sort by these columns won't work @log_start_end(log=logger) def query_graph(url: str, query: str) -> dict: """Helper methods for querying graphql api. [Source: https://thegraph.com/en/] Parameters ---------- url: str Endpoint url query: str Graphql query Returns ------- dict: Dictionary with response data """ response = requests.post(url, json={"query": query}) if response.status_code == 200: return response.json()["data"] return {} @log_start_end(log=logger) def get_uni_tokens(skip: int = 0, limit: int = 100) -> pd.DataFrame: """Get list of tokens trade-able on Uniswap DEX. [Source: https://thegraph.com/en/] Parameters ---------- skip: int Skip n number of records. limit: int Show n number of records. Returns ------- pd.DataFrame Uniswap tokens with trading volume, transaction count, liquidity. """ limit = min(limit, 1000) query = f""" {{ tokens(first: {limit}, skip:{skip}) {{ symbol name tradeVolumeUSD totalLiquidity txCount }} }} """ data = query_graph(UNI_URL, query) if not data: return pd.DataFrame() return pd.DataFrame(data["tokens"]).reset_index() @log_start_end(log=logger) def get_uniswap_stats() -> pd.DataFrame: """Get base statistics about Uniswap DEX. [Source: https://thegraph.com/en/] uniswapFactory id: 0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f - ethereum address on which Uniswap Factory smart contract was deployed. The factory contract is deployed once from the off-chain source code, and it contains functions that make it possible to create exchange contracts for any ERC20 token that does not already have one. It also functions as a registry of ERC20 tokens that have been added to the system, and the exchange with which they are associated. More: https://docs.uniswap.org/protocol/V1/guides/connect-to-uniswap We use 0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f address to fetch all smart contracts that were created with usage of this factory. Returns ------- pd.DataFrame Uniswap DEX statistics like liquidity, volume, number of pairs, number of transactions. """ query = """ { uniswapFactory(id: "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f"){ totalVolumeUSD totalLiquidityUSD pairCount txCount totalLiquidityUSD totalLiquidityETH } } """ data = query_graph(UNI_URL, query) if not data: return pd.DataFrame() df = pd.Series(data["uniswapFactory"]).reset_index() df.columns = ["Metric", "Value"] df["Value"] = df["Value"].apply(lambda x: very_long_number_formatter(x)) return df @log_start_end(log=logger) def get_uniswap_pool_recently_added( last_days: int = 14, min_volume: int = 100, min_liquidity: int = 0, min_tx: int = 100, ) -> pd.DataFrame: """Get lastly added trade-able pairs on Uniswap with parameters like: * number of days the pair has been active, * minimum trading volume, * minimum liquidity, * number of transactions. [Source: https://thegraph.com/en/] Parameters ---------- last_days: int How many days back to look for added pairs. min_volume: int Minimum volume min_liquidity: int Minimum liquidity min_tx: int Minimum number of transactions done in given pool. Returns ------- pd.DataFrame Lastly added pairs on Uniswap DEX. """ days = int( (datetime.datetime.now() - datetime.timedelta(days=last_days)).timestamp() ) query = f""" {{ pairs(first: 1000, where: {{createdAtTimestamp_gt: "{days}", volumeUSD_gt: "{min_volume}", reserveUSD_gt: "{min_liquidity}", txCount_gt: "{min_tx}" }}, orderBy: createdAtTimestamp, orderDirection: desc) {{ token0 {{ symbol name }} token1 {{ symbol name }} reserveUSD volumeUSD createdAtTimestamp totalSupply txCount }} }} """ data = query_graph(UNI_URL, query) if not data: return pd.DataFrame() df = pd.json_normalize(data["pairs"]) df["createdAtTimestamp"] = df["createdAtTimestamp"].apply( lambda x: datetime.datetime.fromtimestamp(int(x)) ) df["pair"] = df["token0.symbol"] + "/" + df["token1.symbol"] df.rename( columns={ "createdAtTimestamp": "created", "token0.name": "token0", "token1.name": "token1", }, inplace=True, ) return df[ [ "created", "pair", "token0", "token1", "volumeUSD", "txCount", "totalSupply", ] ] @log_start_end(log=logger) def get_uni_pools_by_volume() -> pd.DataFrame: """Get uniswap pools by volume. [Source: https://thegraph.com/en/] Returns ------- pd.DataFrame Trade-able pairs listed on Uniswap by top volume. """ query = """ { pairs(first: 1000, where: {reserveUSD_gt: "1000", volumeUSD_gt: "10000"}, orderBy: volumeUSD, orderDirection: desc) { token0 { symbol name } token1 { symbol name } volumeUSD txCount } } """ data = query_graph(UNI_URL, query) if not data: return pd.DataFrame() df = pd.json_normalize(data["pairs"]) return df[ [ "token0.name", "token0.symbol", "token1.name", "token1.symbol", "volumeUSD", "txCount", ] ] @log_start_end(log=logger) def get_last_uni_swaps(limit: int = 100) -> pd.DataFrame: """Get the last 100 swaps done on Uniswap [Source: https://thegraph.com/en/] Parameters ------- limit: int Number of swaps to return. Maximum possible number: 1000. Returns ------- pd.DataFrame Last 100 swaps on Uniswap """ limit = min(limit, 1000) query = f""" {{ swaps(first: {limit}, orderBy: timestamp, orderDirection: desc) {{ timestamp pair {{ token0 {{ symbol }} token1 {{ symbol }} }} amountUSD }} }} """ data = query_graph(UNI_URL, query) if not data: return pd.DataFrame() df = pd.json_normalize(data["swaps"]) df["timestamp"] = df["timestamp"].apply( lambda x: datetime.datetime.fromtimestamp(int(x)) ) df.columns = ["amountUSD", "timestamp", "token0", "token1"] return df[["timestamp", "token0", "token1", "amountUSD"]]
[ 37811, 464, 29681, 2746, 37811, 198, 834, 15390, 18982, 834, 796, 366, 77, 32152, 1, 198, 198, 11748, 4818, 8079, 198, 11748, 18931, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 7007, 198, 198, 6738, 9106, 19115, 74, 62, 237...
2.128692
3,792
"""djangoApp URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path import loginRegister.views as loginviews import staticHome.views as staticviews import courses.views as courseviews urlpatterns = [ path('admin/', admin.site.urls), # Main page path('', staticviews.index, name="index"), path('aboutus', staticviews.aboutus, name="aboutus"), path('consortium', staticviews.consortium, name="consortium"), path('academy', staticviews.academy, name="academy"), path('products', staticviews.products, name="products"), path('program', staticviews.program, name="program"), # Contact forms path('contact/contactform', staticviews.contactform, name="contactform"), path('contact/academycontact', staticviews.academycontact, name="academycontact"), path('contact/consortiumcontact', staticviews.consortiumcontact, name="consortiumcontact"), # User oriented pages path('user/login', loginviews.user_login, name='user_login'), path('user/register', loginviews.user_register, name='user_register'), path('user/forgot-password', loginviews.user_forgot_password, name='user_forgot_password'), path('user/successLogin', loginviews.user_successLogin, name='user_successLogin'), path('user/logout', loginviews.logout, name='user_logout'), path('user/successPasswordReset', loginviews.successPasswordReset, name='success_password_reset'), # Instructor oriented pages path('instructor/instructor_login', loginviews.instructor_login, name='instructor_login'), path('instructor/instructor_register', loginviews.instructor_register, name='instructor_register'), path('instructor/instructor_forgot-password', loginviews.instructor_forgot_password, name='instructor_forgot_password'), path('instructor/instructor_successLogin', loginviews.instructor_successLogin, name='instructor_successLogin'), path('instructor/logout', loginviews.instructor_logout, name='instructor_logout'), # College oriented pages path('college/college_login', loginviews.college_login, name='college_login'), path('college/college_register', loginviews.college_register, name='college_register'), path('college/college_forgot-password', loginviews.college_forgot_password, name='college_forgot_password'), path('college/college_successLogin', loginviews.college_successLogin, name='college_successLogin'), path('college/logout', loginviews.college_logout, name='college_logout'), # Organisation oriented pages path('organisation/organisation_login', loginviews.organisation_login, name='organisation_login'), path('organisation/organisation_register', loginviews.organisation_register, name='organisation_register'), path('organisation/organisation_forgot-password', loginviews.organisation_forgot_password, name='organisation_forgot_password'), path('organisation/organisation_successLogin', loginviews.organisation_successLogin, name='organisation_successLogin'), path('organisation/logout', loginviews.organisation_logout, name='organisation_logout'), # Admin oriented pages path('admin/admin_login', loginviews.admin_login, name='admin_login'), path('admin/admin_register', loginviews.admin_register, name='admin_register'), path('admin/admin_forgot-password', loginviews.admin_forgot_password, name='admin_forgot_password'), path('admin/admin_successLogin', loginviews.admin_successLogin, name='admin_successLogin'), path('admin/admin_manage', loginviews.admin_manage, name='admin_manage'), path('admin/admin_addstructure', loginviews.admin_addstructure, name='admin_addstructure'), path('admin/admin_addstructure1', loginviews.admin_addstructure1, name='admin_addstructure1'), path('admin/admin_addCourse', loginviews.admin_addCourse, name='admin_addCourse'), path('admin/admin_addCourse1', loginviews.admin_addCourse1, name='admin_addCourse1'), path('admin/admin_addCourse2', loginviews.admin_addCourse2, name='admin_addCourse2'), path('admin/logout', loginviews.admin_logout, name='admin_logout'), path('admin/user_list', loginviews.user_list, name='user_list'), path('admin/instructor_list', loginviews.instructor_list, name='instructor_list'), path('admin/college_list', loginviews.college_list, name='college_list'), path('admin/organisation_list', loginviews.organisation_list, name='organisation_list'), path('admin/list_course', courseviews.course_list, name="course_list"), path('admin/new_course', courseviews.new_course, name="new_course"), path('admin/course_resource', courseviews.course_resource, name="course_resource"), path('course/delete/<int:courseID>', courseviews.delete_course, name="delete_course"), path('course/edit/<int:courseID>', courseviews.edit_course, name="edit_course"), # Industries related pages path('industries/automotive', staticviews.automotive, name="automotive"), path('industries/communication', staticviews.communication, name="communication"), path('industries/lifescience', staticviews.lifescience, name="lifescience"), path('industries/banking', staticviews.banking, name="banking"), path('industries/consumer', staticviews.consumer, name="consumer"), path('industries/travel', staticviews.travel, name="travel"), # Insights related pages path('insights/artificial', staticviews.artificial, name="artificial"), path('insights/blockchain', staticviews.blockchain, name="blockchain"), path('insights/iot', staticviews.iot, name="iot"), path('insights/futureworkforce', staticviews.futureworkforce, name="futureworkforce"), path('insights/cloudcomputing', staticviews.cloudcomputing, name="cloudcomputing"), path('insights/datascience', staticviews.datascience, name="datascience"), # Business related pages path('business/strategy', staticviews.strategy, name="strategy"), path('business/consulting', staticviews.consulting, name="consulting"), path('business/digital', staticviews.digital, name="digital"), path('business/technology', staticviews.technology, name="technology"), path('business/operations', staticviews.operations, name="operations"), path('business/Application', staticviews.Application, name="Application"), # Courses path('courses/uicourses', courseviews.uicourses, name="uicourses"), path('courses/backend', courseviews.backend, name="backend"), path('courses/fullstack', courseviews.fullstack, name="fullstack"), path('courses/functionaltesting', courseviews.functionaltesting, name="functionaltesting"), path('courses/mobility', courseviews.mobility, name="mobility"), path('courses/devops', courseviews.devops, name="devops"), path('courses/datascience', courseviews.datascience, name="datascience"), path('courses/cloud', courseviews.cloud, name="cloud"), path('courses/cyber', courseviews.cyber, name="cyber"), path('courses/digital', courseviews.digital, name="digital"), path('courses/erp', courseviews.erp, name="erp"), path('courses/it', courseviews.it, name="it"), path('courses/itcertification', courseviews.itcertification, name="itcertification"), path('courses/coursepage/coreui', courseviews.coreui, name="coreui"), path('courses/coursepage/advancedui', courseviews.advancedui, name="advancedui"), path('courses/coursepage/angularjs', courseviews.angularjs, name="angularjs"), path('courses/coursepage/reactjs', courseviews.reactjs, name="reactjs"), path('courses/coursepage/vuejs', courseviews.vuejs, name="vuejs"), path('courses/coursepage/java', courseviews.java, name="java"), path('courses/coursepage/dotnet', courseviews.dotnet, name="dotnet"), path('courses/coursepage/nodejs', courseviews.nodejs, name="nodejs"), path('courses/coursepage/advancejava', courseviews.advancejava, name="advancejava"), path('courses/coursepage/dotnetcore', courseviews.dotnetcore, name="dotnetcore"), path('courses/coursepage/adwordexpert', courseviews.adwordexpert, name="adwordexpert"), path('courses/coursepage/adwordsfoundation', courseviews.adwordsfoundation, name="adwordsfoundation"), path('courses/coursepage/agile', courseviews.agile, name="agile"), path('courses/coursepage/aimlexpertcourse', courseviews.aimlexpertcourse, name="aimlexpertcourse"), path('courses/coursepage/aimlfoundationcourse', courseviews.aimlfoundationcourse, name="aimlfoundationcourse"), path('courses/coursepage/android', courseviews.android, name="android"), path('courses/coursepage/ansible', courseviews.ansible, name="ansible"), path('courses/coursepage/automation', courseviews.automation, name="automation"), path('courses/coursepage/awscloudpractitioner', courseviews.awscloudpractitioner, name="awscloudpractitioner"), path('courses/coursepage/awsdeveloperassociate', courseviews.awsdeveloperassociate, name="awsdeveloperassociate"), path('courses/coursepage/awssolutionarchitectassociate', courseviews.awssolutionarchitectassociate, name="awssolutionarchitectassociate"), path('courses/coursepage/awssysopsassociateadministrator', courseviews.awssysopsassociateadministrator, name="awssysopsassociateadministrator"), path('courses/coursepage/awstechnicalessentials', courseviews.awstechnicalessentials, name="awstechnicalessentials"), path('courses/coursepage/branding', courseviews.branding, name="branding"), path('courses/coursepage/ccsp', courseviews.ccsp, name="ccsp"), path('courses/coursepage/ceh', courseviews.ceh, name="ceh"), path('courses/coursepage/chef', courseviews.chef, name="chef"), path('courses/coursepage/cisa', courseviews.cisa, name="cisa"), path('courses/coursepage/cism', courseviews.cism, name="cissp"), path('courses/coursepage/cloudtesting', courseviews.cloudtesting, name="cloudtesting"), path('courses/coursepage/comptiasecurity', courseviews.comptiasecurity , name="comptiasecurity"), path('courses/coursepage/contentmarketing', courseviews.contentmarketing, name="contentmarketing"), path('courses/coursepage/corejava', courseviews.corejava, name="corejava"), path('courses/coursepage/dataanalytics', courseviews.dataanalytics, name="dataanalytics"), path('courses/coursepage/datascientistcertification', courseviews.datascientistcertification, name="datascientistcertification"), path('courses/coursepage/deeplearning', courseviews.deeplearning, name="deeplearning"), path('courses/coursepage/devopsexpert', courseviews.devopsexpert, name="devopsexpert"), path('courses/coursepage/devopsfoundation', courseviews.devopsfoundation, name="devopsfoundation"), path('courses/coursepage/digitalmarketingexpert', courseviews.digitalmarketingexpert, name="digitalmarketingexpert"), path('courses/coursepage/digitalmarketingfoundation', courseviews.digitalmarketingfoundation, name="digitalmarketingfoundation"), path('courses/coursepage/docker', courseviews.docker, name="docker"), path('courses/coursepage/dsbootcamp', courseviews.dsbootcamp, name="dsbootcamp"), path('courses/coursepage/flutter', courseviews.flutter, name="flutter"), path('courses/coursepage/fullstacknet', courseviews.fullstacknet, name="fullstacknet"), path('courses/coursepage/fullstacktesting', courseviews.fullstacktesting, name="fullstacktesting"), path('courses/coursepage/golang', courseviews.golang, name="golang"), path('courses/coursepage/hadoop', courseviews.hadoop, name="hadoop"), path('courses/coursepage/inforln', courseviews.inforln, name="inforln"), path('courses/coursepage/ionic', courseviews.ionic, name="ionic"), path('courses/coursepage/ios', courseviews.ios, name="ios"), path('courses/coursepage/itilfoundation', courseviews.itilfoundation, name="itilfoundation"), path('courses/coursepage/kubernets', courseviews.kubernets, name="kubernets"), path('courses/coursepage/manualtesting', courseviews.manualtesting, name="manualtesting"), path('courses/coursepage/mean', courseviews.mean, name="mean"), path('courses/coursepage/measn', courseviews.measn, name="measn"), path('courses/coursepage/mevn', courseviews.mevn, name="mevn"), path('courses/coursepage/microsoftazureexpertcertification', courseviews.microsoftazureexpertcertification, name="microsoftazureexpertcertification"), path('courses/coursepage/microsoftazurefundamentals', courseviews.microsoftazurefundamentals, name="microsoftazurefundamentals"), path('courses/coursepage/microsoftdynamics', courseviews.microsoftdynamics, name="microsoftdynamics"), path('courses/coursepage/mlwithpython', courseviews.mlwithpython, name="mlwithpython"), path('courses/coursepage/naturallanguageprocessing', courseviews.naturallanguageprocessing, name="naturallanguageprocessing"), path('courses/coursepage/onsenui', courseviews.onsenui, name="onsenui"), path('courses/coursepage/openstack', courseviews.openstack, name="openstack"), path('courses/coursepage/oracle', courseviews.oracle, name="oracle"), path('courses/coursepage/pmiacp', courseviews.pmiacp, name="pmiacp"), path('courses/coursepage/pmp', courseviews.pmp, name="pmp"), path('courses/coursepage/prince2', courseviews.prince2, name="prince2"), path('courses/coursepage/python', courseviews.python, name="python"), path('courses/coursepage/reactnative', courseviews.reactnative, name="reactnative"), path('courses/coursepage/remedy', courseviews.remedy, name="remedy"), path('courses/coursepage/rootstack', courseviews.rootstack, name="rootstack"), path('courses/coursepage/rprogramming', courseviews.rprogramming, name="rprogramming"), path('courses/coursepage/ruby', courseviews.ruby, name="ruby"), path('courses/coursepage/rubyfullstack', courseviews.rubyfullstack, name="rubyfullstack"), path('courses/coursepage/salesforce', courseviews.salesforce, name="salesforce"), path('courses/coursepage/sap', courseviews.sap, name="sap"), path('courses/coursepage/scm', courseviews.scm, name="scm"), path('courses/coursepage/seo', courseviews.seo, name="seo"), path('courses/coursepage/servicenow', courseviews.servicenow, name="servicenow"), path('courses/coursepage/smm', courseviews.smm, name="smm"), path('courses/coursepage/smo', courseviews.smo, name="smo"), path('courses/coursepage/xamarin', courseviews.xamarin, name="xamarin"), path('courses/coursepage/vuejs', courseviews.vuejs, name="vuejs"), # Read more path('courses/coursedetails', courseviews.coursedetails, name="coursedetails"), ]
[ 37811, 28241, 14208, 4677, 10289, 28373, 198, 198, 464, 4600, 6371, 33279, 82, 63, 1351, 11926, 32336, 284, 5009, 13, 1114, 517, 1321, 3387, 766, 25, 198, 220, 220, 220, 3740, 1378, 31628, 13, 28241, 648, 404, 305, 752, 13, 785, 14, ...
2.275387
8,065
import urllib.parse import urllib.request, urllib.error import secrets import hashlib, hmac, base64 from mimetypes import guess_all_extensions from datetime import datetime from copy import deepcopy import re import os, sys, time import io from collections import OrderedDict import threading from PySide2.QtWebEngineWidgets import QWebEngineView from PySide2.QtCore import Qt, QUrl import requests from requests.exceptions import * from rauth import OAuth1Service from requests_oauthlib import OAuth2Session from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor from urllib.parse import urlparse, parse_qs, unquote import webbrowser import cchardet import json if sys.version_info.major < 3: from urllib import url2pathname else: from urllib.request import url2pathname import dateutil.parser from dialogs.folder import SelectFolderDialog from dialogs.webdialog import PreLoginWebDialog, BrowserDialog, WebPageCustom from server import LoginServer from widgets.paramedit import * from utilities import * try: from credentials import * except ImportError: credentials = {} class ApiTab(QScrollArea): """ Generic API Tab Class - parse placeholders - saves and load current settings - init basic inputs - handle requests """ streamingData = Signal(list, list, list) # Called when Facepager stops def idtostr(self, val): """ Return the Node-ID as a string """ return str(val).encode("utf-8") def parseURL(self, url): """ Parse any url and return the query-strings and base bath """ url = url.split('?', 1) path = url[0] query = url[1] if len(url) > 1 else '' query = urllib.parse.parse_qsl(query) query = OrderedDict((k, v) for k, v in query) return path, query def getURL(self, urlpath, params, nodedata,options): """ Replaces the Facepager placeholders ("<",">") by the Object-ID or any other Facepager-Placeholder Example: http://www.facebook.com/<Object-ID>/friends """ urlpath, urlparams = self.parseURL(urlpath) # Filter empty params params = {name: params[name] for name in params if (name != '') and (name != '<None>') and (params[name] != '<None>')} # Collect template parameters (= placeholders) templateparams = {} for name in params: match = re.match(r"^<(.*)>$", str(name)) if match: # Replace placeholders in parameter value value = self.parsePlaceholders(params[name], nodedata, {}, options) templateparams[match.group(1)] = value # Replace placeholders in parameters for name in params: match = re.match(r"^<(.*)>$", str(name)) if not match: # Replace placeholders in parameter value value = self.parsePlaceholders(params[name], nodedata, templateparams, options) if isinstance(value,list): urlparams[name] = [str(x) for x in value] else: urlparams[name] = str(value) # Replace placeholders in urlpath urlpath = self.parsePlaceholders(urlpath, nodedata, templateparams) return urlpath, urlparams, templateparams # Gets data from input fields or defaults (never gets credentials from default values!) # Populates input fields from loaded options, presets and default values @Slot(str) def loadDoc(self): ''' Loads and prepares documentation ''' # Add base path self.basepathEdit.clear() urls = self.mainWindow.apiWindow.getApiBasePaths(self.name) self.basepathEdit.insertItems(0,urls) def showDoc(self): ''' Open window with documentation ''' basepath = self.basepathEdit.currentText().strip() path = self.resourceEdit.currentText().strip() self.mainWindow.apiWindow.showDoc(self.name, basepath, path) def initInputs(self): ''' Create base path edit, resource edit and param edit Set resource according to the APIdocs, if any docs are available ''' #Base path self.basepathEdit = QComboBox(self) if not self.defaults.get('basepath',None) is None: self.basepathEdit.insertItems(0, [self.defaults.get('basepath','')]) self.basepathEdit.setEditable(True) self.mainLayout.addRow("Base path", self.basepathEdit) self.basepathEdit.currentIndexChanged.connect(self.onChangedBasepath) #Resource self.resourceLayout = QHBoxLayout() self.actionApiHelp = QAction('Open documentation if available.',self) self.actionApiHelp.setText('?') self.actionApiHelp.triggered.connect(self.showDoc) self.buttonApiHelp =QToolButton(self) self.buttonApiHelp.setToolButtonStyle(Qt.ToolButtonTextOnly) self.buttonApiHelp.setDefaultAction(self.actionApiHelp) self.buttonApiHelp.setVisible(False) self.resourceEdit = QComboBox(self) self.resourceEdit.setEditable(True) self.resourceLayout.addWidget(self.resourceEdit) self.resourceLayout.addWidget(self.buttonApiHelp) self.mainLayout.addRow("Resource", self.resourceLayout) #Parameters self.paramEdit = QParamEdit(self) self.mainLayout.addRow("Parameters", self.paramEdit) self.resourceEdit.currentIndexChanged.connect(self.onChangedResource) # Upload folder # Download folder @Slot() def onChangedBasepath(self, index = None): ''' Load API doc ''' if index is None: index = self.basepathEdit.findText(self.basepathEdit.currentText()) if index != -1: self.basepathEdit.setCurrentIndex(index) basepath = self.basepathEdit.currentText().strip() self.apidoc = self.mainWindow.apiWindow.getApiDoc(self.name,basepath) self.resourceEdit.clear() if self.apidoc and isinstance(self.apidoc,dict): # Add endpoints in reverse order endpoints = self.apidoc.get("paths",{}) paths = endpoints.keys() for path in list(paths): operations = endpoints[path] path = path.replace("{", "<").replace("}", ">") self.resourceEdit.addItem(path) idx = self.resourceEdit.count()-1 self.resourceEdit.setItemData(idx, wraptip(getDictValue(operations,"get.summary","")), Qt.ToolTipRole) #store params for later use in onChangedResource self.resourceEdit.setItemData(idx, operations, Qt.UserRole) self.buttonApiHelp.setVisible(True) # Path extension for Twitter (deprecated) self.defaults['extension'] = getDictValue(self.apidoc, "servers.0.x-facepager-suffix") # Default extract settings self.defaults['key_objectid'] = getDictValueOrNone(self.apidoc,"x-facepager-objectid") self.defaults['key_nodedata'] = getDictValueOrNone(self.apidoc,"x-facepager-extract") # Default pagination setting pagination = getDictValueOrNone(self.apidoc, "x-facepager-pagination", dump=False) self.defaults['paging_type'] = getDictValueOrNone(pagination,'method') self.defaults['param_paging'] = getDictValueOrNone(pagination,'param') self.defaults['key_paging'] = getDictValueOrNone(pagination,'key') self.defaults['paging_stop'] = getDictValueOrNone(pagination,'stop') else: self.resourceEdit.insertItem(0, "/<Object ID>") # set Resource self.onChangedResource() @Slot() def onChangedResource(self, index = None): ''' Handles the automated parameter suggestion for the current selected API relation/endpoint based on the OpenAPI specification 3.0.0 ''' if index is None: index = self.resourceEdit.findText(self.resourceEdit.currentText()) if index != -1: self.resourceEdit.setCurrentIndex(index) operations = self.resourceEdit.itemData(index,Qt.UserRole) params = getDictValue(operations,"get.parameters",False) if operations else [] self.paramEdit.setOpenAPIOptions(params) @Slot() def initSession(self, no=0, renew=False): """ Return existing session or create a new session if necessary :param no: Session number :return: session """ with self.lock_session: while (len(self.sessions) <= no): self.sessions.append(None) session = self.sessions[no] if not renew else None if session is None: session = requests.Session() session.proxies.update(self.getProxies()) # Mount new adapters = don't cache connections adapter = requests.adapters.HTTPAdapter() session.mount('http://', adapter) session.mount('https://', adapter) session.mount('file://', LocalFileAdapter()) self.sessions[no] = session return session def closeSession(self, no=0): """ Close the session :param no: number of session :return: None """ with self.lock_session: if (len(self.sessions) > no) and (self.sessions[no] is not None): self.sessions[no].close() self.sessions[no] = None def request(self, session_no=0, path=None, args=None, headers=None, method="GET", payload=None,foldername=None, filename=None, fileext=None, format='json'): """ Start a new threadsafe session and request """ #Throttle speed if (self.speed is not None) and (self.lastrequest is not None): pause = ((60 * 1000) / float(self.speed)) - self.lastrequest.msecsTo(QDateTime.currentDateTime()) while (self.connected) and (pause > 0): time.sleep(0.1) pause = ((60 * 1000) / float(self.speed)) - self.lastrequest.msecsTo(QDateTime.currentDateTime()) self.lastrequest = QDateTime.currentDateTime() if session_no is None: session_no = 0 self.closeSession(session_no) session = self.initSession(session_no) try: response = None try: maxretries = 3 while True: try: if (not session): raise Exception("No session available.") # Use cookie jar instead of header to persist redirects cookies = headers.pop('Cookie', None) if headers is not None else None if cookies is not None: cookies = dict(item.split("=",maxsplit=1) for item in cookies.split(";")) # Send request response = session.request(method,path, params=args, headers=headers, cookies=cookies, data=payload, timeout=self.timeout,stream=True,verify=True) # verify=False except (HTTPError, ConnectionError) as e: maxretries -= 1 # Try next request with new session if (maxretries > 0) and (self.connected): time.sleep(0.1) session = self.initSession(session_no, True) self.logMessage("Automatic retry: Request Error: {0}".format(str(e))) else: raise e else: break if int(response.headers.get('content-length',0)) > (self.maxsize * 1024 * 1024): raise DataTooBigError(f"File is too big, content length is {response.headers['content-length']}.") status = 'fetched' if response.ok else 'error' status = status + ' (' + str(response.status_code) + ')' headers = dict(list(response.headers.items())) # Download data data = { 'content-type': response.headers.get("content-type",""), 'sourcepath': path,'sourcequery': args,'finalurl': response.url } fullfilename, content = download(response, foldername, filename, fileext) if fullfilename is not None: data['filename'] = os.path.basename(fullfilename) data['filepath'] = fullfilename # Text if format == 'text': data['text'] = content # str(response.text) # Scrape links elif format == 'links': try: links, base = extractLinks(content, response.url) data['links'] = links data['base'] = base except Exception as e: data['error'] = 'Could not extract Links.' data['message'] = str(e) data['response'] = content # JSON elif format == 'json': try: data = json.loads(content) if content != '' else [] except Exception as e: # self.logMessage("No valid JSON data, try to convert XML to JSON ("+str(e)+")") # try: # data = xmlToJson(response.text) # except: data = { 'error': 'Data could not be converted to JSON', 'response': content, 'exception':str(e) } # JSON elif format == 'xml': try: data = xmlToJson(content) except Exception as e: data = { 'error': 'Data could not be converted to JSON', 'response': content, 'exception':str(e) } except Exception as e: #except (DataTooBigError, HTTPError, ReadTimeout, ConnectionError, InvalidURL, MissingSchema) as e: status = 'request error' data = {'error':str(e)} headers = {} #raise Exception("Request Error: {0}".format(str(e))) finally: if response is not None: response.close() return data, headers, status def disconnectSocket(self): """Used to hardly disconnect the streaming client""" self.connected = False while (len(self.sessions) > 0): session = self.sessions.pop() session.close() #self.response.raw._fp.close() #self.response.close() @Slot() @Slot() class AuthTab(ApiTab): """ Module providing authorization - init input fields - login windows - open authorization support """ # see YoutubeTab for keys in the options-parameter @Slot() @Slot() def doLogin(self, session_no = 0): """ Show login window :param session_no: the number of the session used for login :return: """ self.closeSession(session_no) options = self.getOptions() if options['auth_type'] == 'OAuth2 Client Credentials': self.doTwitterAppLogin(session_no) elif options['auth_type'] == 'OAuth1': self.doOAuth1Login(session_no) elif options['auth_type'] == 'Cookie': self.doCookieLogin(session_no) elif options['auth_type'] == 'API key': QMessageBox.information(self, "Facepager", "Manually enter your API key into the access token field or change the authentication method in the settings.") elif options['auth_type'] == 'Disable': QMessageBox.information(self, "Login disabled","No authentication method selected. Please choose a method in the settings.", QMessageBox.StandardButton.Ok) elif options['auth_type'] == 'OAuth2 External': self.doOAuth2ExternalLogin(session_no) else: self.doOAuth2Login(session_no) @Slot() @Slot() @Slot() @Slot() @Slot() @Slot() def showLoginWindow(self, caption='', url='',width=600,height=600): """ Create a SSL-capable WebView for the login-process Uses a Custom QT-Webpage Implementation Supply a onLoginWindowChanged-Slot to fetch the API-Token """ self.loginWindow = QMainWindow(self.mainWindow) self.loginWindow.setAttribute(Qt.WA_DeleteOnClose) self.loginWindow.resize(width, height) self.loginWindow.setWindowTitle(caption) self.loginWindow.stopped = False self.loginWindow.cookie = '' #create WebView with Facebook log-Dialog, OpenSSL needed self.loginStatus = self.loginWindow.statusBar() self.login_webview = QWebEngineView(self.loginWindow) self.loginWindow.setCentralWidget(self.login_webview) # Use the custom- WebPage class webpage = WebPageCustom(self.login_webview) webpage.logMessage.connect(self.logMessage) self.login_webview.setPage(webpage) #Connect to the onLoginWindowChanged-method self.login_webview.urlChanged.connect(self.onLoginWindowChanged) webpage.urlNotFound.connect(self.onLoginWindowChanged) #catch redirects to localhost or nonexistent uris # Connect to the loadFinished-Slot for an error message self.login_webview.loadFinished.connect(self.loadFinished) self.login_webview.load(QUrl(url)) self.login_webview.show() self.loginWindow.show() @Slot() @Slot() @Slot() @Slot() @Slot() def initSession(self, no=0, renew=False): """ Dispatch session initialization to specialized functions :param no: session number :return: session object """ options = self.getOptions() if options.get('auth_type') == 'OAuth1': return self.initOAuth1Session(no, renew) else: return self.initOAuth2Session(no, renew) def initOAuth1Session(self,no=0, renew=False): """ Return session or create if necessary :param no: session number :return: session object """ while (len(self.sessions) <= no): self.sessions.append(None) session = self.sessions[no] if not renew else None if session is None: if (self.tokenEdit.text() == '') or (self.tokensecretEdit.text() == ''): raise Exception("No access, login please!") service = self.getOAuth1Service() session = service.get_session((self.tokenEdit.text(), self.tokensecretEdit.text())) self.sessions[no] = session return session # Get Client ID # return custom client ID if provided # otherwise login to Facepager and return preregistered ID # or return None if login fails # Retrieves a user ID from the API that # later is hashed for maintaining the # anonymized user list. REimplement in the modules. # def onSslErrors(self, reply, errors): # url = str(reply.url().toString()) # reply.ignoreSslErrors() # self.logmessage.emit("SSL certificate error ignored: %s (Warning: Your connection might be insecure!)" % url) # see YoutubeTab for keys in the options-parameter # Get authorization header # See https://docs.aws.amazon.com/de_de/general/latest/gr/sigv4-signed-request-examples.html # https://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url class LocalFileAdapter(requests.adapters.BaseAdapter): """Protocol Adapter to allow Requests to GET file:// URLs @todo: Properly handle non-empty hostname portions. """ @staticmethod def _chkpath(method, path): """Return an HTTP status for the given filesystem path.""" if method.lower() in ('put', 'delete'): return 501, "Not Implemented" # TODO elif method.lower() not in ('get', 'head'): return 405, "Method Not Allowed" elif os.path.isdir(path): return 400, "Path Not A File" elif not os.path.isfile(path): return 404, "File Not Found" elif not os.access(path, os.R_OK): return 403, "Access Denied" else: return 200, "OK" def send(self, req, **kwargs): # pylint: disable=unused-argument """Return the file specified by the given request @type req: C{PreparedRequest} @todo: Should I bother filling `response.headers` and processing If-Modified-Since and friends using `os.stat`? """ path = os.path.normcase(os.path.normpath(url2pathname(req.path_url))) response = requests.Response() response.status_code, response.reason = self._chkpath(req.method, path) if response.status_code == 200 and req.method.lower() != 'head': try: response.raw = open(path, 'rb') response.encoding = cchardet.detect(response.content)['encoding'] except (OSError, IOError) as err: response.status_code = 500 response.reason = str(err) if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url response.request = req response.connection = self return response
[ 11748, 2956, 297, 571, 13, 29572, 198, 11748, 2956, 297, 571, 13, 25927, 11, 2956, 297, 571, 13, 18224, 198, 198, 11748, 13141, 198, 11748, 12234, 8019, 11, 289, 20285, 11, 2779, 2414, 198, 6738, 17007, 2963, 12272, 1330, 4724, 62, 43...
2.216771
10,029
# # pyJanus (Python Bindings for Janus) # # Copyright (c) 2022 Alwin Wang # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os import pyJanus def test_gridded_table_example(): """ This checks pyJanus against the Examples/GriddedTableExample.cpp program """ xml_path = f"{os.path.dirname(__file__)}/../../Examples/GriddedTableExample.xml" janus = pyJanus.Janus(xml_path) angle_of_attack = janus.get_variabledef("angleOfAttack") reynolds_number = janus.get_variabledef("reynoldsNumber") drag_coefficient = janus.get_variabledef("dragCoefficient") angle_of_attack.set_value(10) reynolds_number.set_value(0.36e6) assert round(drag_coefficient.get_value(), 2) == 0.01 angle_of_attack.set_value(30) assert round(drag_coefficient.get_value(), 2) == 0.58 reynolds_number.set_value(0.70e6) assert round(drag_coefficient.get_value(), 3) == 0.595
[ 2, 198, 2, 12972, 12128, 385, 357, 37906, 41211, 654, 329, 2365, 385, 8, 198, 2, 198, 2, 15069, 357, 66, 8, 33160, 978, 5404, 15233, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 2...
3.100162
619
import torch pthPath='/DATACENTER4/hao.yang/project/Qin/model_mb/unload/0530/unload_0530.pth' wgtPath=pthPath pth=torch.load(pthPath) cutLayer='module.roi_heads.box.predictor' cutParas=['cls_score.weight','cls_score.bias','bbox_pred.weight','bbox_pred.bias',] for cutPara in cutParas: pth['model'].pop('{}.{}'.format(cutLayer,cutPara)) # print(pth['model'].keys()) wgt=pth['model'] torch.save(wgt,wgtPath)
[ 11748, 28034, 201, 198, 79, 400, 15235, 11639, 14, 35, 1404, 2246, 3525, 1137, 19, 14, 23778, 13, 17859, 14, 16302, 14, 48, 259, 14, 19849, 62, 2022, 14, 403, 2220, 14, 2713, 1270, 14, 403, 2220, 62, 2713, 1270, 13, 79, 400, 6, ...
2.116162
198
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: syft_proto/execution/v1/type_wrapper.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='syft_proto/execution/v1/type_wrapper.proto', package='syft_proto.execution.v1', syntax='proto3', serialized_options=b'\n$org.openmined.syftproto.execution.v1', create_key=_descriptor._internal_create_key, serialized_pb=b'\n*syft_proto/execution/v1/type_wrapper.proto\x12\x17syft_proto.execution.v1\"2\n\x13InputTypeDescriptor\x12\x1b\n\ttype_name\x18\x01 \x01(\tR\x08typeName\"\xa3\x07\n\x11NestedTypeWrapper\x12[\n\x0cnested_types\x18\x01 \x01(\x0b\x32\x38.syft_proto.execution.v1.NestedTypeWrapper.TypeContainerR\x0bnestedTypes\x1aY\n\x08TypeList\x12M\n\x0cnested_types\x18\x01 \x03(\x0b\x32*.syft_proto.execution.v1.NestedTypeWrapperR\x0bnestedTypes\x1aZ\n\tTypeTuple\x12M\n\x0cnested_types\x18\x01 \x03(\x0b\x32*.syft_proto.execution.v1.NestedTypeWrapperR\x0bnestedTypes\x1a\xdd\x01\n\x07TypeMap\x12\x66\n\x0cnested_types\x18\x01 \x03(\x0b\x32\x43.syft_proto.execution.v1.NestedTypeWrapper.TypeMap.NestedTypesEntryR\x0bnestedTypes\x1aj\n\x10NestedTypesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12@\n\x05value\x18\x02 \x01(\x0b\x32*.syft_proto.execution.v1.NestedTypeWrapperR\x05value:\x02\x38\x01\x1a\x99\x03\n\rTypeContainer\x12O\n\x0bnested_type\x18\x01 \x01(\x0b\x32,.syft_proto.execution.v1.InputTypeDescriptorH\x00R\nnestedType\x12_\n\x10nested_type_list\x18\x02 \x01(\x0b\x32\x33.syft_proto.execution.v1.NestedTypeWrapper.TypeListH\x00R\x0enestedTypeList\x12\x62\n\x11nested_type_tuple\x18\x03 \x01(\x0b\x32\x34.syft_proto.execution.v1.NestedTypeWrapper.TypeTupleH\x00R\x0fnestedTypeTuple\x12^\n\x10nested_type_dict\x18\x04 \x01(\x0b\x32\x32.syft_proto.execution.v1.NestedTypeWrapper.TypeMapH\x00R\x0enestedTypeDictB\x12\n\x10nested_containerB&\n$org.openmined.syftproto.execution.v1b\x06proto3' ) _INPUTTYPEDESCRIPTOR = _descriptor.Descriptor( name='InputTypeDescriptor', full_name='syft_proto.execution.v1.InputTypeDescriptor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='type_name', full_name='syft_proto.execution.v1.InputTypeDescriptor.type_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='typeName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=121, ) _NESTEDTYPEWRAPPER_TYPELIST = _descriptor.Descriptor( name='TypeList', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeList', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='nested_types', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeList.nested_types', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypes', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=238, serialized_end=327, ) _NESTEDTYPEWRAPPER_TYPETUPLE = _descriptor.Descriptor( name='TypeTuple', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeTuple', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='nested_types', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeTuple.nested_types', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypes', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=329, serialized_end=419, ) _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY = _descriptor.Descriptor( name='NestedTypesEntry', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeMap.NestedTypesEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeMap.NestedTypesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='key', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeMap.NestedTypesEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='value', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=537, serialized_end=643, ) _NESTEDTYPEWRAPPER_TYPEMAP = _descriptor.Descriptor( name='TypeMap', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeMap', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='nested_types', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeMap.nested_types', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypes', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=422, serialized_end=643, ) _NESTEDTYPEWRAPPER_TYPECONTAINER = _descriptor.Descriptor( name='TypeContainer', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='nested_type', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer.nested_type', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='nested_type_list', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer.nested_type_list', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypeList', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='nested_type_tuple', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer.nested_type_tuple', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypeTuple', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='nested_type_dict', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer.nested_type_dict', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypeDict', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='nested_container', full_name='syft_proto.execution.v1.NestedTypeWrapper.TypeContainer.nested_container', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=646, serialized_end=1055, ) _NESTEDTYPEWRAPPER = _descriptor.Descriptor( name='NestedTypeWrapper', full_name='syft_proto.execution.v1.NestedTypeWrapper', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='nested_types', full_name='syft_proto.execution.v1.NestedTypeWrapper.nested_types', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, json_name='nestedTypes', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_NESTEDTYPEWRAPPER_TYPELIST, _NESTEDTYPEWRAPPER_TYPETUPLE, _NESTEDTYPEWRAPPER_TYPEMAP, _NESTEDTYPEWRAPPER_TYPECONTAINER, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=124, serialized_end=1055, ) _NESTEDTYPEWRAPPER_TYPELIST.fields_by_name['nested_types'].message_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPELIST.containing_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPETUPLE.fields_by_name['nested_types'].message_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPETUPLE.containing_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY.fields_by_name['value'].message_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY.containing_type = _NESTEDTYPEWRAPPER_TYPEMAP _NESTEDTYPEWRAPPER_TYPEMAP.fields_by_name['nested_types'].message_type = _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY _NESTEDTYPEWRAPPER_TYPEMAP.containing_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type'].message_type = _INPUTTYPEDESCRIPTOR _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_list'].message_type = _NESTEDTYPEWRAPPER_TYPELIST _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_tuple'].message_type = _NESTEDTYPEWRAPPER_TYPETUPLE _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_dict'].message_type = _NESTEDTYPEWRAPPER_TYPEMAP _NESTEDTYPEWRAPPER_TYPECONTAINER.containing_type = _NESTEDTYPEWRAPPER _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'].fields.append( _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type']) _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type'].containing_oneof = _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'] _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'].fields.append( _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_list']) _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_list'].containing_oneof = _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'] _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'].fields.append( _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_tuple']) _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_tuple'].containing_oneof = _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'] _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'].fields.append( _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_dict']) _NESTEDTYPEWRAPPER_TYPECONTAINER.fields_by_name['nested_type_dict'].containing_oneof = _NESTEDTYPEWRAPPER_TYPECONTAINER.oneofs_by_name['nested_container'] _NESTEDTYPEWRAPPER.fields_by_name['nested_types'].message_type = _NESTEDTYPEWRAPPER_TYPECONTAINER DESCRIPTOR.message_types_by_name['InputTypeDescriptor'] = _INPUTTYPEDESCRIPTOR DESCRIPTOR.message_types_by_name['NestedTypeWrapper'] = _NESTEDTYPEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) InputTypeDescriptor = _reflection.GeneratedProtocolMessageType('InputTypeDescriptor', (_message.Message,), { 'DESCRIPTOR' : _INPUTTYPEDESCRIPTOR, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.InputTypeDescriptor) }) _sym_db.RegisterMessage(InputTypeDescriptor) NestedTypeWrapper = _reflection.GeneratedProtocolMessageType('NestedTypeWrapper', (_message.Message,), { 'TypeList' : _reflection.GeneratedProtocolMessageType('TypeList', (_message.Message,), { 'DESCRIPTOR' : _NESTEDTYPEWRAPPER_TYPELIST, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper.TypeList) }) , 'TypeTuple' : _reflection.GeneratedProtocolMessageType('TypeTuple', (_message.Message,), { 'DESCRIPTOR' : _NESTEDTYPEWRAPPER_TYPETUPLE, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper.TypeTuple) }) , 'TypeMap' : _reflection.GeneratedProtocolMessageType('TypeMap', (_message.Message,), { 'NestedTypesEntry' : _reflection.GeneratedProtocolMessageType('NestedTypesEntry', (_message.Message,), { 'DESCRIPTOR' : _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper.TypeMap.NestedTypesEntry) }) , 'DESCRIPTOR' : _NESTEDTYPEWRAPPER_TYPEMAP, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper.TypeMap) }) , 'TypeContainer' : _reflection.GeneratedProtocolMessageType('TypeContainer', (_message.Message,), { 'DESCRIPTOR' : _NESTEDTYPEWRAPPER_TYPECONTAINER, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper.TypeContainer) }) , 'DESCRIPTOR' : _NESTEDTYPEWRAPPER, '__module__' : 'syft_proto.execution.v1.type_wrapper_pb2' # @@protoc_insertion_point(class_scope:syft_proto.execution.v1.NestedTypeWrapper) }) _sym_db.RegisterMessage(NestedTypeWrapper) _sym_db.RegisterMessage(NestedTypeWrapper.TypeList) _sym_db.RegisterMessage(NestedTypeWrapper.TypeTuple) _sym_db.RegisterMessage(NestedTypeWrapper.TypeMap) _sym_db.RegisterMessage(NestedTypeWrapper.TypeMap.NestedTypesEntry) _sym_db.RegisterMessage(NestedTypeWrapper.TypeContainer) DESCRIPTOR._options = None _NESTEDTYPEWRAPPER_TYPEMAP_NESTEDTYPESENTRY._options = None # @@protoc_insertion_point(module_scope)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 827, 701, 62, 1676, 1462, 14, 18558, 1009, 14, 85, 16, 14, 4906, 62, 4...
2.356267
7,093
import re import numpy as np from nltk.translate.bleu_score import sentence_bleu from nltk.translate.bleu_score import SmoothingFunction from collections import defaultdict # from others.perplexity import read_sentences_from_file, BigramLanguageModel # from others.perplexity import calculate_bigram_perplexity, calculate_unigram_perplexity REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def test_dist(cand): ''' reference --- calc_diversity() https://github.com/microsoft/DialoGPT/blob/457835e7d8acd08acf7f6f0e980f36fd327ea37c/dstc/metrics.py#L131 :param cand: filename :return: div1-unigram, div2-bigram ''' tokens = [0.0, 0.0] types = [defaultdict(int),defaultdict(int)] for line in open(cand, encoding='utf-8'): words = line.strip('\n').strip().split() for n in range(2): for idx in range(len(words)-n): ngram = ' '.join(words[idx:idx+n+1]) types[n][ngram] = 1 tokens[n] += 1 div1 = len(types[0].keys())/tokens[0] if tokens[1] == 0: tokens[1] = 1 div2 = len(types[1].keys())/tokens[1] return div1, div2 """ def test_ppl(cand): ''' score of perplexity 接口:将insertion transformer model得到的word ins scores中每个sentence的score list保存在文本中,cand即表示该文本路径 ''' ppl = [] for line in open(cand, encoding='utf-8'): word_probs = line.strip('\n').strip().split() if len(word_probs) == 0: continue sentence_prob_log_sum = 0.0 n = len(word_probablities) for word_prob in word_probs: sentence_prob_log_sum += math.log(word_prob, 2) ppl.append(math.pow(2, -sentence_probability_log_sum * (1.0/n))) ave_score = np.mean(ppl) return ppl, ave_score """ def tile(x, count, dim=0): """ Tiles x on dimension dim count times. """ if x is None: return None perm = list(range(len(x.size()))) if dim != 0: perm[0], perm[dim] = perm[dim], perm[0] x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count batch = x.size(0) x = x.contiguous()\ .view(batch, -1) \ .transpose(0, 1) \ .repeat(count, 1) \ .transpose(0, 1) \ .contiguous() \ .view(*out_size) if dim != 0: x = x.permute(perm).contiguous() return x """ def rouge_results_to_str(results_dict): if results_dict is None: return "No Results.\n" return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-P(1/2/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100, results_dict["rouge_1_precision"] * 100, results_dict["rouge_2_precision"] * 100, results_dict["rouge_l_precision"] * 100 ) """
[ 11748, 302, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 299, 2528, 74, 13, 7645, 17660, 13, 903, 84, 62, 26675, 1330, 6827, 62, 903, 84, 198, 6738, 299, 2528, 74, 13, 7645, 17660, 13, 903, 84, 62, 26675, 1330, 2439, 1025, 722, ...
1.96947
1,605
#!/usr/bin/env python import logging import sys import pyslet.info if sys.hexversion < 0x02060000: logging.error("pyslet requires Python Version 2.6 (or greater)") else: try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst') as f: long_description = f.read() setup(name=pyslet.info.name, version=pyslet.info.version, description=pyslet.info.title, long_description=long_description, author="Steve Lay", author_email="steve.w.lay@gmail.com", url=pyslet.info.home, packages=['pyslet', 'pyslet.http', 'pyslet.xml', 'pyslet.qtiv1', 'pyslet.qtiv2', 'pyslet.odata2'], package_data={'pyslet': ['imsbltiv1p0_metadata.xml', 'wsgi_metadata.xml', 'unicode5_blocks.pck', 'unicode5_catogories.pck', 'unicode5_blocks3.pck', 'unicode5_catogories3.pck'], 'pyslet.odata2': ['streamstore.xml']}, classifiers=['Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Education', 'Topic :: Education :: ' 'Computer Aided Instruction (CAI)', 'Topic :: Education :: Testing', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Topic :: Software Development :: ' 'Libraries :: Python Modules'] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 18931, 198, 11748, 25064, 198, 11748, 279, 893, 1616, 13, 10951, 198, 198, 361, 25064, 13, 33095, 9641, 1279, 657, 87, 15, 22136, 2388, 25, 198, 220, 220, 220, 18931, 13, ...
1.788539
1,239
import os import sys sys.path.append(os.getcwd()) from pathlib import Path import numpy as np import random import torch import torch.nn as nn import torch.optim as optim import torch.backends.cudnn as cudnn import argparse import string from iqra.utils import AttnLabelConverter from iqra.data import loader from iqra.models import OCRNet from iqra.trainer.task import TaskOCR import pytorch_lightning as pl from pytorch_lightning import loggers as pl_loggers from pytorch_lightning.metrics import Accuracy if __name__ == "__main__": parser = argparse.ArgumentParser(description='crnn.pytorch trainer cli apps') parser.add_argument('--resume', default=None, type=str, help='Choose pth file to resume training') parser.add_argument('--summary', default="top", type=str, help='there are three value that are accepter, "full", "top" and None') parser.add_argument('--manual_seed', type=int, default=1111, help='for random seed setting') parser.add_argument('--max_epoch', required=True, default=None, type=int, help='How many epoch to run training') parser.add_argument('--lr', '--learning-rate', default=1, type=float, help='choose learning rate for optimizer, default value is 0.01') parser.add_argument('--beta1', default=0.9, type=float, help='choose beta1 for optimizer, default value is 0.9') parser.add_argument('--beta2', default=0.95, type=float, help='choose beta2 for optimizer, default value is 0.999') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--grad_clip', default=5.0, type=float, help='choose gradient clip value for backward prop, default value is 5.0') parser.add_argument('--batch_size', default=32, type=int, help='choose batch size for data loader, default value is 32') parser.add_argument('--shuffle', default=True, type=bool, help='choose to shuffle data or not, default value is True') parser.add_argument('--num_workers', default=8, type=int, help='how many workers to load for running dataset') parser.add_argument('--trainset_path', required=True, type=str, help='path to synthtext dataset') parser.add_argument('--validset_path', required=True, type=str, help='path to synthtext dataset') parser.add_argument('--image_size', default='100x32', type=str, help='width and height of the image, default value is 100x32') parser.add_argument('--usage_ratio', default='0.5,0.5', type=str, help='training data usage ratio default is (0.5, 0.5)') parser.add_argument('--batch_max_length', default=25, type=int, help='choose batch size for data loader, default value is 32') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', type=bool, default=True, help='for sensitive character mode') parser.add_argument('--in_channel', type=int, default=1, help='the number of input channel of Feature extractor') parser.add_argument('--out_channel', type=int, default=512, help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state') parser.add_argument('--num_gpus', default=1, type=int, help='fill with zero to use cpu or fill with number 2 to use multigpu') parser.add_argument('--log_freq', default=10, type=int, help='show log every value, default value is 10') parser.add_argument('--max_steps', default=30000, type=int, help='max iteration step, default value is 30000') parser.add_argument('--valcheck_interval', default=2000, type=int, help='validation check interval in step, default value is 2000') parser.add_argument('--checkpoint_dir', default='checkpoints/', type=str, help='checkpoint directory for saving progress') parser.add_argument('--logs_dir', default='logs/', type=str, help='directory logs for tensorboard callback') args = parser.parse_args() w, h = args.image_size.split('x') w, h = int(w), int(h) MANUAL_SEED = args.manual_seed SUMMARY = args.summary BENCHMARK = True DETERMINISTIC = True MAX_EPOCH = args.max_epoch MAX_STEPS = args.max_steps LRATE = args.lr BETA1 = args.beta1 BETA2 = args.beta2 RHO = args.rho EPS = args.eps GRAD_CLIP = args.grad_clip BATCH_SIZE = args.batch_size NUM_WORKERS = args.num_workers SHUFFLE = args.shuffle IMG_SIZE = (h, w) USAGE_RATIO = list(map(float, args.usage_ratio.split(','))) # print(USAGE_RATIO) BATCH_MAX_LENGTH = args.batch_max_length SENSITIVE = args.sensitive if SENSITIVE: CHARACTER = string.printable[:-6] else: CHARACTER = args.character TRAINSET_PATH = args.trainset_path VALIDSET_PATH = args.validset_path IN_CHANNEL = args.in_channel OUT_CHANNEL = args.out_channel HIDDEN_SIZE = args.hidden_size NUM_GPUS = args.num_gpus SAVED_CHECKPOINT_PATH = args.checkpoint_dir SAVED_LOGS_PATH = args.logs_dir LOG_FREQ = args.log_freq VALCHECK_INTERVAL = args.valcheck_interval CHECKPOINT_RESUME = False CHECKPOINT_PATH = None WEIGHT_RESUME = False WEIGHT_PATH = None if args.resume: fpath = Path(args.resume) if fpath.is_file(): if fpath.suffix == 'ckpt': # it means checkpoint of pytorch lightning CHECKPOINT_RESUME = True CHECKPOINT_PATH = str(fpath) elif fpath.suffix == 'pth': # it means pytorch file original from model WEIGHT_RESUME = True WEIGHT_PATH = str(fpath) else: raise NotImplemented(f'File with {fpath.suffix} is not implemented! ' f'make sure you load valid file with ckpt or pth extension!') else: raise IOError(f'Path that you specified is not valid pytorch or pytorch-lighning path!') converter = AttnLabelConverter(CHARACTER) NUM_CLASS = len(converter.character) trainloader, trainset = loader.train_loader(TRAINSET_PATH, batch_size=BATCH_SIZE, shuffle=SHUFFLE, num_workers=NUM_WORKERS, img_size=IMG_SIZE, usage_ratio=USAGE_RATIO, is_sensitive=SENSITIVE, character=CHARACTER) validloader, validset = loader.valid_loader(VALIDSET_PATH, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, img_size=IMG_SIZE, is_sensitive=SENSITIVE, character=CHARACTER) # Model Preparation if WEIGHT_RESUME: model = OCRNet(num_class=NUM_CLASS, in_feat=IN_CHANNEL, hidden_size=HIDDEN_SIZE, im_size=IMG_SIZE) weights = torch.load(WEIGHT_PATH, map_location=torch.device('cpu')) model.load_state_dict(weights) ocrnet_state_dict = torch.load('weights/ocrnet_pretrained.pth', map_location=torch.device('cpu')) model.load_state_dict(ocrnet_state_dict) # model.freeze_encoder() else: model = OCRNet(num_class=NUM_CLASS, in_feat=IN_CHANNEL, hidden_size=HIDDEN_SIZE, im_size=IMG_SIZE) ocrnet_state_dict = torch.load('weights/ocrnet_pretrained.pth', map_location=torch.device('cpu')) model.load_state_dict(ocrnet_state_dict) # model.freeze_encoder() criterion = nn.CrossEntropyLoss(ignore_index=0) optimizer = optim.Adadelta(model.parameters(), lr=LRATE, rho=RHO, eps=EPS) task = TaskOCR(model, optimizer, criterion, converter) # DEFAULTS used by the Trainer checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=SAVED_CHECKPOINT_PATH, save_top_k=3, verbose=True, monitor='val_loss', mode='min', prefix='ocrnet' ) tb_logger = pl_loggers.TensorBoardLogger(SAVED_LOGS_PATH) DISTRIBUTED_BACKEND = None if NUM_GPUS > 1: DISTRIBUTED_BACKEND = 'ddp' # break before train # MAX_STEPS = None # VALCHECK_INTERVAL = 1.0 # seed befire train pl.trainer.seed_everything(MANUAL_SEED) trainer = pl.Trainer( weights_summary=SUMMARY, max_epochs=MAX_EPOCH, max_steps=MAX_STEPS, val_check_interval=VALCHECK_INTERVAL, gpus=NUM_GPUS, distributed_backend=DISTRIBUTED_BACKEND, log_every_n_steps=LOG_FREQ, deterministic=DETERMINISTIC, benchmark=BENCHMARK, logger=tb_logger, checkpoint_callback=checkpoint_callback, resume_from_checkpoint=CHECKPOINT_PATH ) trainer.fit(task, trainloader, validloader)
[ 11748, 28686, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 1136, 66, 16993, 28955, 198, 198, 6738, 3108, 8019, 1330, 10644, 628, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4738, 198, 198, 11748, 28034, 198, 198, ...
2.178256
4,415
""" Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water. Note: You may not slant the container and n is at least 2. """
[ 37811, 198, 15056, 299, 1729, 12, 31591, 37014, 257, 16, 11, 257, 17, 11, 2644, 11, 281, 11, 810, 1123, 6870, 257, 966, 379, 20435, 357, 72, 11, 257, 72, 737, 220, 198, 77, 11723, 3951, 389, 7428, 884, 326, 262, 734, 886, 13033, ...
3.485981
107
# -*- coding:utf-8 -*- __author__ = 'Ulric Qin' # -- app config -- DEBUG = True # -- db config -- DB_HOST = "127.0.0.1" DB_PORT = 3306 DB_USER = "root" DB_PASS = "" DB_NAME = "falcon_portal" # -- cookie config -- SECRET_KEY = "4e.5tyg8-u9ioj" SESSION_COOKIE_NAME = "falcon-portal" PERMANENT_SESSION_LIFETIME = 3600 * 24 * 30 UIC_ADDRESS = { 'internal': 'http://127.0.0.1:8080', 'external': 'http://11.11.11.11:8080', } UIC_TOKEN = '' MAINTAINERS = ['root'] CONTACT = 'ulric.qin@gmail.com' COMMUNITY = True try: from frame.local_config import * except Exception, e: print "[warning] %s" % e
[ 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 834, 9800, 834, 796, 705, 47920, 1173, 31482, 6, 198, 198, 2, 1377, 598, 4566, 1377, 198, 30531, 796, 6407, 198, 198, 2, 1377, 20613, 4566, 1377, 198, 11012, 62, 39, 1089...
2.169611
283
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import math from model.GCN import GCN from torch.autograd import Variable class GNN(nn.Module): """ GNN Module layer """ class DGCNLayer(nn.Module): """ DGCN Module layer """
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 11748, 10688, 198, 6738, 2746, 13, 15916, 45, 1330, 20145, 45, 198, 6738, 28034, 13, ...
2.60177
113
from logzero import logger as log from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from app_config import * Base = declarative_base() global_db_obj = None db_connection = None db_session = None
[ 6738, 2604, 22570, 1330, 49706, 355, 2604, 198, 6738, 44161, 282, 26599, 1330, 2251, 62, 18392, 198, 6738, 44161, 282, 26599, 13, 579, 1330, 6246, 10297, 198, 6738, 44161, 282, 26599, 13, 2302, 13, 32446, 283, 876, 1330, 2377, 283, 876,...
3.341176
85
VERSION = (0, 22, 0)
[ 43717, 796, 357, 15, 11, 2534, 11, 657, 8 ]
2.222222
9
""" Controler for the GA4GH Task Execution Schema Server """ import json import logging from random import randint from addict import Dict from connexion import request from flask import current_app from werkzeug.exceptions import BadRequest def __get_task_info(resources, params): ''' Helper function to estimate task queueing time and costs and build the response object (tesTaskInfo object). ''' costs_compute = __get_compute_costs( resources=resources, currency=params['currency'], unit_costs_cores=params['unit_costs']['cpu_usage'], unit_costs_memory=params['unit_costs']['memory_consumption'], ) costs_storage = __get_storage_costs( resources=resources, currency=params['currency'], unit_costs_storage=params['unit_costs']['data_storage'] ) time_queue = __get_queue_time() return { 'estimated_compute_costs': costs_compute, 'estimated_queue_time_sec': time_queue, 'estimated_storage_costs': costs_storage, 'unit_costs_data_transfer': { 'amount': params['unit_costs']['data_transfer'], 'currency': params['currency'], }, } def __get_compute_costs( resources, currency='BTC', unit_costs_cores=0, unit_costs_memory=0, ): ''' Helper function to estimate task costs from tesResources object. Returns a dictionary of tesCosts objects. ''' # Get execution time t = resources['execution_time_sec'] # Calculate partial compute costs c_cores = t * resources['cpu_cores'] * unit_costs_cores c_mem = t * resources['ram_gb'] * unit_costs_memory # Calculate total compute costs c_total = c_cores + c_mem # Return dictionary of tesCosts objects return { 'amount': c_total, 'currency': currency, } def __get_storage_costs( resources, currency='BTC', unit_costs_storage=0, ): ''' Helper function to estimate task storage costs from tesResources object. Returns a tesCosts object. ''' return { 'amount': resources['disk_gb'] * unit_costs_storage, 'currency': currency, } def __get_queue_time(): ''' Helper function to estimate task queue time from tesResources object. Returns a random float. ''' return float(randint(0, 3600)) def __update_task_info_config(config_new, config_old): ''' Helper function that updates the task info configuration given the old and new config. ''' if __hasExtraKeys(config_new, config_old): raise BadRequest else: config_old = Dict(config_old) config_old.update(config_new) return config_old def __hasExtraKeys(query, ref): ''' Helper function that returns `True` if dictionary `query` contains keys that dictionary `ref` does not contain. Works recursively. ''' for key in query: if key not in ref: return True elif isinstance(query[key], dict): return __hasExtraKeys(query[key], ref[key]) return False
[ 37811, 198, 4264, 305, 1754, 329, 262, 14545, 19, 17511, 15941, 37497, 10011, 2611, 9652, 198, 37811, 198, 11748, 33918, 198, 11748, 18931, 198, 6738, 4738, 1330, 43720, 600, 198, 198, 6738, 19678, 1330, 360, 713, 198, 6738, 369, 12413, ...
2.555371
1,201
# Copyright (c) 2017-2020 Neogeo-Technologies. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from idgo_store.views.ftp import CreateResourceStoreFtp from idgo_store.views.ftp import EditResourceStoreFtp from idgo_store.views.ftp import EmitResourceStoreFtp from idgo_store.views.ftp import DeleteResourceStoreFtp from idgo_store.views.ftp import ShowResourceStoreFtp from idgo_store.views.ftp import UpdateResourceStoreFtp from idgo_store.views.directory_storage import ShowDirectoryStorage from idgo_store.views.directory_storage import ShowDirectoryStorageGlob from idgo_store.views.upload import CreateResourceStoreUpload from idgo_store.views.upload import EditResourceStoreUpload from idgo_store.views.upload import EmitResourceStoreUpload from idgo_store.views.upload import DeleteResourceStoreUpload from idgo_store.views.upload import ShowResourceStoreUpload from idgo_store.views.upload import UpdateResourceStoreUpload __all__ = [ CreateResourceStoreFtp, CreateResourceStoreUpload, EditResourceStoreFtp, EditResourceStoreUpload, EmitResourceStoreFtp, EmitResourceStoreUpload, DeleteResourceStoreFtp, DeleteResourceStoreUpload, ShowDirectoryStorage, ShowDirectoryStorageGlob, ShowResourceStoreFtp, ShowResourceStoreUpload, UpdateResourceStoreFtp, UpdateResourceStoreUpload, ]
[ 2, 15069, 357, 66, 8, 2177, 12, 42334, 21227, 469, 78, 12, 25574, 5823, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 40...
3.483209
536
# -*- coding: utf-8 -*- from renormalizer.mps.matrix import tensordot, multi_tensor_contract, asnumpy, asxp from renormalizer.mps.backend import xp, USE_GPU from renormalizer.mps import Mps, Mpo, MpDm from renormalizer.mps.lib import Environ, compressed_sum from renormalizer.lib import davidson import numpy as np import scipy import logging import opt_einsum as oe import primme from collections import defaultdict logger = logging.getLogger(__name__) class TDA(object): r""" Tamm–Dancoff approximation (or called CIS) to calculate the excited states based on MPS. TDA use the first order tangent space to do excitation. The implementation is similar to J. Chem. Phys. 140, 024108 (2014). Parameters ---------- model: renormalizer.model.model Model of the system hmpo: renormalizer.mps.Mpo mpo of Hamiltonian mps: renormalizer.mps.Mps ground state mps (will be overwritten) nroots: int, optional number of roots to be calculated. Default is ``1``. algo: str, optional iterative diagonalization solver. Default is ``primme``. Valid option are ``davidson`` and ``primme``. Note ---- Quantum number is not used, thus the conservation is not guaranteed. """ def kernel(self, restart=False, include_psi0=False): r"""calculate the roots Parameters ---------- restart: bool, optional if restart from the former converged root. Default is ``False``. If ``restart = True``, ``include_psi0`` must be the same as the former calculation. include_psi0: bool, optional if the basis of Hamiltonian includes the ground state :math:`\Psi_0`. Default is ``False``. Returns ------- e: np.ndarray the energy of the states, if ``include_psi0 = True``, the first element is the ground state energy, otherwise, it is the energy of the first excited state. """ # right canonical mps mpo = self.hmpo nroots = self.nroots algo = self.algo site_num = mpo.site_num if not restart: # make sure that M is not redundant near the edge mps = self.mps.ensure_right_canon().canonicalise().normalize().canonicalise() logger.debug(f"reference mps shape, {mps}") mps_r_cano = mps.copy() assert mps.to_right tangent_u = [] for ims in range(len(mps)): shape = list(mps[ims].shape) u, s, vt = scipy.linalg.svd(mps[ims].l_combine(), full_matrices=True) rank = len(s) if include_psi0 and ims == site_num-1: tangent_u.append(u.reshape(shape[:-1]+[-1])) else: if rank < u.shape[1]: tangent_u.append(u[:,rank:].reshape(shape[:-1]+[-1])) else: tangent_u.append(None) # the tangent space is None mps[ims] = u[:,:rank].reshape(shape[:-1]+[-1]) vt = xp.einsum("i, ij -> ij", asxp(s), asxp(vt)) if ims == site_num-1: assert vt.size == 1 and xp.allclose(vt, 1) else: mps[ims+1] = asnumpy(tensordot(vt, mps[ims+1], ([-1],[0]))) mps_l_cano = mps.copy() mps_l_cano.to_right = False mps_l_cano.qnidx = site_num-1 else: mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn cguess = [] for iroot in range(len(tda_coeff_list)): tda_coeff = tda_coeff_list[iroot] x = [c.flatten() for c in tda_coeff if c is not None] x = np.concatenate(x,axis=None) cguess.append(x) cguess = np.stack(cguess, axis=1) xshape = [] xsize = 0 for ims in range(site_num): if tangent_u[ims] is None: xshape.append((0,0)) else: if ims == site_num-1: xshape.append((tangent_u[ims].shape[-1], 1)) else: xshape.append((tangent_u[ims].shape[-1], mps_r_cano[ims+1].shape[0])) xsize += np.prod(xshape[-1]) logger.debug(f"DMRG-TDA H dimension: {xsize}") if USE_GPU: oe_backend = "cupy" else: oe_backend = "numpy" mps_tangent = mps_r_cano.copy() environ = Environ(mps_tangent, mpo, "R") hdiag = [] for ims in range(site_num): ltensor = environ.GetLR( "L", ims-1, mps_tangent, mpo, itensor=None, method="System" ) rtensor = environ.GetLR( "R", ims+1, mps_tangent, mpo, itensor=None, method="Enviro" ) if tangent_u[ims] is not None: u = asxp(tangent_u[ims]) tmp = oe.contract("abc, ded, bghe, agl, chl -> ld", ltensor, rtensor, asxp(mpo[ims]), u, u, backend=oe_backend) hdiag.append(asnumpy(tmp)) mps_tangent[ims] = mps_l_cano[ims] hdiag = np.concatenate(hdiag, axis=None) count = 0 # recover the vector-like x back to the ndarray tda_coeff if algo == "davidson": if restart: cguess = [cguess[:,i] for i in range(cguess.shape[1])] else: cguess = [np.random.random(xsize) - 0.5] precond = lambda x, e, *args: x / (hdiag - e + 1e-4) e, c = davidson( hop, cguess, precond, max_cycle=100, nroots=nroots, max_memory=64000 ) if nroots == 1: c = [c] c = np.stack(c, axis=1) elif algo == "primme": if not restart: cguess = None A = scipy.sparse.linalg.LinearOperator((xsize,xsize), matvec=multi_hop, matmat=multi_hop) M = scipy.sparse.linalg.LinearOperator((xsize,xsize), matvec=precond, matmat=precond) e, c = primme.eigsh(A, k=min(nroots,xsize), which="SA", v0=cguess, OPinv=M, method="PRIMME_DYNAMIC", tol=1e-6) else: assert False logger.debug(f"H*C times: {count}") tda_coeff_list = [] for iroot in range(nroots): tda_coeff_list.append(reshape_x(c[:,iroot])) self.e = np.array(e) self.wfn = [mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list] return self.e def dump_wfn(self): r""" Dump wavefunction for restart and analysis Note ---- mps_l_cano.npz: left-canonical form of initial mps mps_r_cano.npz: right-canonical form of the initial mps tangent_u: the tangent space u of the mixed-canonical mps tda_coeff_{iroot}.npz: the tda_coeff of the ith root. """ mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn # store mps_l_cano mps_r_cano mps_l_cano.dump("mps_l_cano.npz") mps_r_cano.dump("mps_r_cano.npz") # store tangent_u tangent_u_dict = {f"{i}":mat for i, mat in enumerate(tangent_u) if mat is not None} np.savez(f"tangent_u.npz", **tangent_u_dict) # store tda coeff for iroot, tda_coeff in enumerate(tda_coeff_list): tda_coeff_dict = {f"{i}":mat for i, mat in enumerate(tda_coeff) if mat is not None} np.savez(f"tda_coeff_{iroot}.npz", **tda_coeff_dict) def load_wfn(self, model): r"""Load tda wavefunction """ mps_l_cano = Mps.load(model, "mps_l_cano.npz") mps_r_cano = Mps.load(model, "mps_r_cano.npz") tangent_u_dict = np.load("tangent_u.npz") tangent_u = [tangent_u_dict[str(i)] if str(i) in tangent_u_dict.keys() else None for i in range(mps_l_cano.site_num)] tda_coeff_list = [] for iroot in range(self.nroots): tda_coeff_dict = np.load(f"tda_coeff_{iroot}.npz") tda_coeff = [tda_coeff_dict[str(i)] if str(i) in tda_coeff_dict.keys() else None for i in range(mps_l_cano.site_num)] tda_coeff_list.append(tda_coeff) self.wfn = [mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list] def analysis_1ordm(self): r""" calculate one-orbital reduced density matrix of each tda root """ mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn for iroot in range(self.nroots): tda_coeff = tda_coeff_list[iroot] rdm = None for ims in range(mps_l_cano.site_num): if tangent_u[ims] is None: assert tda_coeff[ims] is None continue mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1) mps_tangent[ims] = tensordot(tangent_u[ims], tda_coeff[ims],[-1,0]) rdm_increment = mps_tangent.calc_1ordm() if rdm is None: rdm = rdm_increment else: rdm = [mat1+mat2 for mat1, mat2 in zip(rdm, rdm_increment)] dominant_config = {} for isite, mat in enumerate(rdm): quanta = np.argmax(np.diag(mat)) dominant_config[isite] = (quanta, np.diag(mat)[quanta]) logger.info(f"root: {iroot}, config: {dominant_config}") def analysis_dominant_config(self, thresh=0.8, alias=None, tda_m_trunc=20, return_compressed_mps=False): r""" analyze the dominant configuration of each tda root. The algorithm is to compress the tda wavefunction to a rank-1 Hartree state and get the ci coefficient of the largest configuration. Then, the configuration is subtracted from the tda wavefunction and redo the first step to get the second largest configuration. The two steps continue until the thresh is achieved. Parameters ---------- thresh: float, optional the threshold to stop the analysis procedure of each root. :math:`\sum_i |c_i|^2 > thresh`. Default is 0.8. alias: dict, optional The alias of each site. For example, ``alias={0:"v_0", 1:"v_2", 2:"v_1"}``. Default is `None`. tda_m_trunc: int, optional the ``m`` to compress a tda wavefunction. Default is 20. return_compressed_mps: bool, optional If ``True``, return the tda excited state as a single compressed mps. Default is `False`. Returns ------- configs: dict The dominant configration of each root. ``configs = {0:[(config0, config_name0, ci_coeff0),(config1, config_name1, ci_coeff1),...], 1:...}`` compressed_mps: List[renormalizer.mps.Mps] see the description in ``return_compressed_mps``. Note ---- The compressed_mps is an approximation of the tda wavefunction with ``m=tda_m_trunc``. """ mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn if alias is not None: assert len(alias) == mps_l_cano.site_num compressed_mps = [] for iroot in range(self.nroots): logger.info(f"iroot: {iroot}") tda_coeff = tda_coeff_list[iroot] mps_tangent_list = [] weight = [] for ims in range(mps_l_cano.site_num): if tangent_u[ims] is None: assert tda_coeff[ims] is None continue weight.append(np.sum(tda_coeff[ims]**2)) mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1) mps_tangent[ims] = asnumpy(tensordot(tangent_u[ims], tda_coeff[ims],[-1,0])) mps_tangent_list.append(mps_tangent) assert np.allclose(np.sum(weight), 1) # sort the mps_tangent from large weight to small weight mps_tangent_list = [mps_tangent_list[i] for i in np.argsort(weight,axis=None)[::-1]] coeff_square_sum = 0 mps_delete = None config_visited = [] while coeff_square_sum < thresh: if mps_delete is None: # first compress it to M=tda_m_trunc mps_rank1 = compressed_sum(mps_tangent_list, batchsize=5, temp_m_trunc=tda_m_trunc) else: mps_rank1 = compressed_sum([mps_delete] + mps_tangent_list, batchsize=5, temp_m_trunc=tda_m_trunc) if coeff_square_sum == 0 and return_compressed_mps: compressed_mps.append(mps_rank1.copy()) mps_rank1 = mps_rank1.canonicalise().compress(temp_m_trunc=1) # get config with the largest coeff config = [] for ims, ms in enumerate(mps_rank1): ms = ms.array.flatten()**2 quanta = int(np.argmax(ms)) config.append(quanta) # check if the config has been visited if config in config_visited: break config_visited.append(config) ci_coeff_list = [] for mps_tangent in mps_tangent_list: sentinel = xp.ones((1,1)) for ims, ms in enumerate(mps_tangent): sentinel = sentinel.dot(asxp(ms[:,config[ims],:])) ci_coeff_list.append(float(sentinel[0,0])) ci_coeff = np.sum(ci_coeff_list) coeff_square_sum += ci_coeff**2 if alias is not None: config_name = [f"{quanta}"+f"{alias[isite]}" for isite, quanta in enumerate(config) if quanta != 0] config_name = " ".join(config_name) self.configs[iroot].append((config, config_name, ci_coeff)) logger.info(f"config: {config}, {config_name}") else: self.configs[iroot].append((config, ci_coeff)) logger.info(f"config: {config}") logger.info(f"ci_coeff: {ci_coeff}, weight:{ci_coeff**2}") condition = {dof:config[idof] for idof, dof in enumerate(self.model.dofs)} mps_delete_increment = Mps.hartree_product_state(self.model, condition).scale(-ci_coeff) if mps_delete is None: mps_delete = mps_delete_increment else: mps_delete = mps_delete + mps_delete_increment logger.info(f"coeff_square_sum: {coeff_square_sum}") return self.configs, compressed_mps def merge(mpsl, mpsr, idx): """ merge two mps (mpsl, mpsr) at dix idx belongs mpsr, the other attributes are the same aas mpsl """ mps = mpsl.copy() for imps in range(idx, mpsr.site_num): mps[imps] = mpsr[imps] return mps
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 8851, 6636, 7509, 13, 76, 862, 13, 6759, 8609, 1330, 11192, 585, 313, 11, 5021, 62, 83, 22854, 62, 28484, 11, 355, 77, 32152, 11, 355, 42372, 198, 6738, 8851, ...
1.798633
8,780
from scipy.sparse.linalg.isolve.utils import make_system from scipy.sparse.linalg.isolve import _iterative from scipy._lib._util import _aligned_zeros import numpy as np import scipy from functools import partial mydict = { } _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} w1 = scipy.__version__.split('.') scipy_version = int(w1[0]) + int(w1[1])/10.0 + int(w1[2])/100.0 def gmres(A, b, verbose=False, convergence='resid', **kwargs): """ Interface function to Scipy's GMRES to allow convergence with respect to either the actual residual or the preconditioned residual A, b: see documentation to scipy.linalg.sparse.gmres verbose: whether to print residuals to screen during run kwargs: keyword args to be passed on to scipy gmres convergence: 'resid': converge when ||Ax-b|| < tol 'presid': converge when ||M^{-1}(Ax-b)|| < tol when 'presid' is specified, only the following kwargs are accepted: x0, tol, restart, maxiter, M The use of convergence 'presid' is useful in certain circumstances particularly when A is ill-conditioned """ if convergence == 'resid': if scipy_version > 1.0: gmres_func = direct_gmres else: raise Exception("Your version of scipy does not support GMRES with residual convergence. Set convergence='presid', or upgrade scipy to a version > 1.0.") elif convergence == 'presid': if scipy_version > 1.0: gmres_func = presid_gmres else: gmres_func = direct_gmres else: raise Exception("convergence must be set to 'resid' or 'presid'") return gmres_func(A, b, verbose, **kwargs) def right_gmres(A, b, verbose=False, **kwargs): """ Interface function to Scipy's GMRES to allow the use of right-preconditioning A, b: see documentation to scipy.linalg.sparse.gmres verbose: whether to print residuals to screen during run kwargs: keyword args to be passed on to scipy gmres for this function, M must be specified! (thanks to Floren Balboa-Usabiaga for the code) """ if 'M' not in kwargs: raise Exception('M must be a kwarg for right_gmres') M = kwargs['M'] kwargs.pop('M') A_LO = scipy.sparse.linalg.aslinearoperator(A) M_LO = scipy.sparse.linalg.aslinearoperator(M) # Define new LinearOperator A*P^{-1} APinv_partial = partial(APinv, A=A_LO, M=M_LO) APinv_partial_LO = scipy.sparse.linalg.LinearOperator(A.shape, matvec=APinv_partial, dtype=A.dtype) # Solve system A*P^{-1} * y = b y, info, resnorms = gmres(APinv_partial_LO, b, verbose, **kwargs, convergence='presid') # Solve system P*x = y x = M_LO.matvec(y) # Return solution and info return x, info, resnorms
[ 6738, 629, 541, 88, 13, 82, 29572, 13, 75, 1292, 70, 13, 271, 6442, 13, 26791, 1330, 787, 62, 10057, 198, 6738, 629, 541, 88, 13, 82, 29572, 13, 75, 1292, 70, 13, 271, 6442, 1330, 4808, 2676, 876, 198, 6738, 629, 541, 88, 13557,...
2.399829
1,168
#!/usr/bin/python ########################################################################### ## Scripts for NSF model ---------------------------------------------- # ## --------------------------------------------------------------------- # ## # ## Copyright (c) 2018 National Institute of Informatics # ## # ## THE NATIONAL INSTITUTE OF INFORMATICS AND THE CONTRIBUTORS TO THIS # ## WORK DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT # ## SHALL THE NATIONAL INSTITUTE OF INFORMATICS NOR THE CONTRIBUTORS # ## BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # ## DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # ## WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ## ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # ## OF THIS SOFTWARE. # ########################################################################### ## Author: Xin Wang # ## Date: 31 Oct. 2018 # ## Contact: wangxin at nii.ac.jp # ########################################################################### from __future__ import absolute_import from __future__ import print_function import os import sys import importlib # Load configure sys.path.append(os.getcwd()) try: cfg = importlib.import_module(sys.argv[1]) except IndexError: print("Error: missing argument. Usage: python **.py CONFIG_NAME") sys.exit(1) except ImportError: print("Error: cannot load library: ", sys.argv[1]) sys.exit(1) sys.path.append(cfg.path_pyTools) from pyTools import display import subprocess if cfg.step1: display.self_print_with_date('Step1. preparing data', 'h') assert len(cfg.step1s) == 5, 'len(step1s) should be 5 %s' % (sys.argv[1]) if os.path.dirname(cfg.tmp_data_dir): tmp_data_dir = cfg.tmp_data_dir else: tmp_data_dir = os.getcwd() + os.path.sep + cfg.tmp_data_dir tmp_data_scp_dir = tmp_data_dir + os.path.sep + cfg.tmp_scp_name tmp_train_lst = tmp_data_scp_dir + os.path.sep + cfg.tmp_scp_train_name tmp_val_lst = tmp_data_scp_dir + os.path.sep + cfg.tmp_scp_val_name tmp_idx_dir = tmp_data_dir + os.path.sep + cfg.tmp_idx_dir tmp_wav_mu_dir = tmp_data_dir + os.path.sep + cfg.tmp_wav_mu_law_dir tmp_wav_float_dir = tmp_data_dir + os.path.sep + cfg.tmp_wav_float_dir try: os.mkdir(tmp_data_dir) except OSError: pass if cfg.step1s[0]: if hasattr(cfg, 'trn_list') and hasattr(cfg, 'val_list'): if os.path.isfile(cfg.trn_list) and os.path.isfile(cfg.val_list): display.self_print_with_date('step1.1 copying data lists', 'm') tmp_acous_path = ','.join(cfg.path_acous_feats) tmp_feat_ext = ','.join(cfg.ext_acous_feats) tmp_feat_dim = '_'.join([str(x) for x in cfg.dim_acous_feats]) cmd = 'python %s' % (cfg.path_scripts) + os.path.sep + 'sub_01_check_list.py' cmd = cmd + ' %s,%s' % (tmp_acous_path, cfg.path_waveform) cmd = cmd + ' %s,.wav' % (tmp_feat_ext) cmd = cmd + ' %s_1' % (tmp_feat_dim) cmd = cmd + ' %s' % (tmp_data_scp_dir) cmd = cmd + ' %s' % (cfg.trn_list) cmd = cmd + ' %s' % (cfg.val_list) exe_cmd(cmd, cfg.debug) else: display.self_print('cannot find %s %s' % (cfg.trn_list, cfg.val_list), 'error') sys.exit(1) else: display.self_print_with_date('step1.1 generating data lists', 'm') tmp_acous_path = ','.join(cfg.path_acous_feats) tmp_feat_ext = ','.join(cfg.ext_acous_feats) tmp_feat_dim = '_'.join([str(x) for x in cfg.dim_acous_feats]) cmd = 'python %s' % (cfg.path_scripts) + os.path.sep + 'sub_01_prepare_list.py' cmd = cmd + ' %s,%s' % (tmp_acous_path, cfg.path_waveform) cmd = cmd + ' %s,.wav' % (tmp_feat_ext) cmd = cmd + ' %s_1' % (tmp_feat_dim) cmd = cmd + ' %s' % (tmp_data_scp_dir) cmd = cmd + ' %f' % (cfg.train_utts) exe_cmd(cmd, cfg.debug) if cfg.step1s[1]: display.self_print_with_date('step1.2 pre-process waveform', 'm') tmp_wav_pre_dir = tmp_data_dir + os.path.sep + cfg.tmp_wav_pre_dir # loop over train and validation sets for tmp_lst in [tmp_train_lst, tmp_val_lst]: if os.path.isfile(tmp_lst): cmd = 'sh %s' % (cfg.path_scripts) + os.path.sep + 'sub_02_waveform_process.sh' cmd = cmd + ' %s %s %s %d' % (cfg.path_waveform, tmp_wav_pre_dir, tmp_lst, cfg.wav_samp) cmd = cmd + ' %s %s %s' % (cfg.path_pyTools_scripts, cfg.path_sox, cfg.path_sv56) exe_cmd(cmd, cfg.debug) if cfg.waveform_mu_law_bits > 0: # mu-law waveform cmd = 'sh %s' % (cfg.path_scripts)+os.path.sep+'sub_03_waveform_mulaw_float.sh' cmd = cmd + ' %s %s None' % (tmp_wav_pre_dir, tmp_wav_mu_dir) cmd = cmd + ' %s %d %s' % (tmp_lst, cfg.waveform_mu_law_bits, cfg.path_pyTools_scripts) exe_cmd(cmd, cfg.debug) # create a mdn.config for mulaw network tmp_mdn_config = os.getcwd() + os.path.sep + cfg.tmp_data_dir tmp_mdn_config = tmp_mdn_config + os.path.sep + cfg.tmp_mdn_config_name cmd = 'python %s' % (cfg.path_pyTools_scripts) cmd = cmd + os.path.sep + 'networkTool' + os.path.sep + 'netCreate.py' cmd = cmd + ' %s' % (tmp_mdn_config) cmd = cmd + ' wavenet-mu-law %d' % (cfg.waveform_mu_law_bits) exe_cmd(cmd, cfg.debug) if not os.path.isfile(tmp_mdn_config): display.self_print('Error %s not generated' % (tmp_mdn_config), 'error') sys.exit(1) else: # float waveform cmd = 'sh %s' % (cfg.path_scripts)+os.path.sep+'sub_03_waveform_mulaw_float.sh' cmd = cmd + ' %s None %s' % (tmp_wav_pre_dir, tmp_wav_float_dir) cmd = cmd + ' %s %d %s' % (tmp_lst, cfg.waveform_mu_law_bits, cfg.path_pyTools_scripts) exe_cmd(cmd, cfg.debug) else: if tmp_lst == tmp_train_lst: display.self_print('Error %s not found' % (tmp_train_lst), 'error') sys.exit(1) if cfg.step1s[2]: display.self_print_with_date('step1.3 time index files', 'm') # loop over train and validation sets for tmp_lst in [tmp_train_lst, tmp_val_lst]: if os.path.isfile(tmp_lst): cmd = 'sh %s' % (cfg.path_scripts) + os.path.sep + 'sub_04_timeidx_get.sh' cmd = cmd + ' %s %d %s' % (cfg.path_acous_feats[0], cfg.dim_acous_feats[0], cfg.ext_acous_feats[0]) cmd = cmd + ' %d %s %s %s' % (cfg.upsampling_rate, tmp_idx_dir, tmp_lst, cfg.path_pyTools_scripts) exe_cmd(cmd, cfg.debug) if cfg.step1s[3]: display.self_print_with_date('step1.4 data.nc generating for CURRENNT', 'm') tmp_nc_dir = tmp_data_dir + os.path.sep + cfg.tmp_nc_dir try: os.mkdir(tmp_nc_dir) except OSError: pass tmp_data_nc_config = cfg.tmp_data_nc_config for tmp_lst, tmp_sub_nc_dir in zip([tmp_train_lst, tmp_val_lst], [tmp_nc_dir + os.path.sep + cfg.tmp_nc_dir_train, tmp_nc_dir + os.path.sep + cfg.tmp_nc_dir_val]): if not os.path.isfile(tmp_lst): continue cmd = 'sh %s' % (cfg.path_scripts) + os.path.sep + 'sub_05_package_datanc.sh' cmd = cmd + ' %s %s' % (tmp_sub_nc_dir, tmp_idx_dir) if cfg.waveform_mu_law_bits > 0: # mu-law waveform cmd = cmd + ' %s %s %s %s' % (tmp_wav_mu_dir, tmp_lst, tmp_data_nc_config, cfg.path_pyTools_scripts) else: # float waveform cmd = cmd + ' %s %s %s %s' % (tmp_wav_float_dir, tmp_lst, tmp_data_nc_config, cfg.path_pyTools_scripts) exe_cmd(cmd, cfg.debug) if cfg.step1s[4]: display.self_print_with_date('step1.5 get mean/std of acoustic features', 'm') tmp_mv_file = os.getcwd() + os.path.sep + cfg.tmp_name_mean_file cmd = 'python %s' % (cfg.path_scripts) + os.path.sep + 'sub_06_norm_acousticfeature.py' cmd = cmd + ' %s' % (tmp_train_lst) cmd = cmd + ' %s %s %s NONE' % (','.join(cfg.path_acous_feats), ','.join(cfg.ext_acous_feats), '_'.join([str(dim) for dim in cfg.dim_acous_feats])) cmd = cmd + ' %s %s %s %s' % (str(cfg.f0_ext), tmp_data_scp_dir, tmp_mv_file, cfg.path_pyTools_scripts) exe_cmd(cmd, cfg.debug) # Done display.self_print_with_date('Finish data preparation', 'ok') else: display.self_print_with_date('Skip step1 (preparing data)', 'h')
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 29113, 29113, 7804, 21017, 198, 2235, 220, 12327, 82, 329, 10896, 37, 2746, 20368, 26171, 220, 1303, 198, 2235, 16529, 30934, 220, 1303, 198, 2235, 220, 220, 220, 220, 220, 220, 220, 220, 220...
1.793381
5,711
import model import corrections import codecs WORDS = model.load_obj('data') ERRORS = model.load_obj('errors')
[ 11748, 2746, 198, 11748, 26251, 198, 11748, 40481, 82, 198, 45359, 5258, 796, 2746, 13, 2220, 62, 26801, 10786, 7890, 11537, 198, 24908, 50, 796, 2746, 13, 2220, 62, 26801, 10786, 48277, 11537 ]
3.333333
33
#!/usr/bin/env python3 # encoding: utf-8 # # Copyright (c) 2010 Doug Hellmann. All rights reserved. # """Default handling. """ #end_pymotw_header import configparser import sys parser = configparser.ConfigParser() parser.add_section('bug_tracker') parser.set('bug_tracker', 'url', 'http://localhost:8080/bugs') parser.set('bug_tracker', 'username', 'dhellmann') parser.set('bug_tracker', 'password', 'secret') parser.write(sys.stdout)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 2, 198, 2, 15069, 357, 66, 8, 3050, 15115, 5783, 9038, 13, 220, 1439, 2489, 10395, 13, 198, 2, 198, 37811, 19463, 9041, 13, 198, 37811, 1...
2.857143
154
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from maro.cli.utils.details_validity_wrapper import check_details_validity from maro.cli.utils.operation_lock_wrapper import operation_lock @check_details_validity @operation_lock
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 198, 2, 49962, 739, 262, 17168, 5964, 13, 628, 198, 6738, 1667, 78, 13, 44506, 13, 26791, 13, 36604, 62, 12102, 414, 62, 48553, 1330, 2198, 62, 36604, 62, 12102, 414, 198, 6738, 1667, 78, 13,...
3.569444
72
#!/usr/bin/python3 import magjoint import sys import numpy as np if len(sys.argv) < 2: print("\nUSAGE: ./magnetic_collision.py ball_joint_config visualize_only, e.g. \n python3 magnetic_collision.py two_magnets.yaml 1\n") sys.exit() balljoint_config = sys.argv[1] visualize_only = sys.argv[2]=='1' ball = magjoint.BallJoint(balljoint_config) magnets = ball.gen_magnets() if visualize_only: ball.plotMagnets(magnets) sys.exit() print('\n----------------first course search\n') grid_positions = [] for i in np.arange(-80,80,10): for j in np.arange(-80,80,10): for k in np.arange(-80,80,10): grid_positions.append([i,j,k]) sensor_values,pos = ball.generateMagneticDataGrid(grid_positions) colliders,magnetic_field_differences = ball.calculateCollisions(sensor_values,pos,1.44*10) if len(colliders)>0: print('there are %d collisions'%len(colliders)) for c,dif in zip(colliders,magnetic_field_differences): pos_diff = ((c[0][0]-c[1][0])**2+(c[0][1]-c[1][1])**2+(c[0][2]-c[1][2])**2)**0.5 if pos_diff>20: print(c) print("magnetic dif %f"%dif) print("pos_dif %f"%pos_diff) magnet_A = ball.gen_magnets() ball.rotateMagnets(magnet_A,c[0]) magnet_B = ball.gen_magnets() ball.rotateMagnets(magnet_B,c[1]) ball.compareMagnets(magnet_A,magnet_B) else: print('no collisions detected in course search, congrats!') print('\n----------------second fine search\n') grid_positions = [] for i in np.arange(-50,50,5): for j in np.arange(-50,50,5): for k in np.arange(-90,90,5): grid_positions.append([i,j,k]) sensor_values,pos = ball.generateMagneticDataGrid(grid_positions) colliders,magnetic_field_differences = ball.calculateCollisions(sensor_values,pos,1.44*5) if len(colliders)>0: print('there are %d collisions'%len(colliders)) min_value = min(magnetic_field_differences) index = magnetic_field_differences.index(min_value) print('minimum: %f index %d'%(min_value,index)) print(colliders[index]) grid_positions = [] magnetic_diffs = [] for c,dif in zip(colliders,magnetic_field_differences): grid_positions.append(c[0]) magnetic_diffs.append(dif) grid_positions.append(c[1]) magnetic_diffs.append(dif) ball.visualizeCloud(magnetic_diffs,grid_positions) print('\n----------------second even finer search\n') grid_positions = [] for i in np.arange(-50,50,3): for j in np.arange(-50,50,3): for k in np.arange(-90,90,3): grid_positions.append([i,j,k]) sensor_values,pos = ball.generateMagneticDataGrid(grid_positions) colliders,magnetic_field_differences = ball.calculateCollisions(sensor_values,pos,1.44*3) if len(colliders)>0: print('there are %d collisions'%len(colliders)) min_value = min(magnetic_field_differences) index = magnetic_field_differences.index(min_value) print('minimum: %f index %d'%(min_value,index)) print(colliders[index]) grid_positions = [] magnetic_diffs = [] for c,dif in zip(colliders,magnetic_field_differences): grid_positions.append(c[0]) magnetic_diffs.append(dif) grid_positions.append(c[1]) magnetic_diffs.append(dif) ball.visualizeCloud(magnetic_diffs,grid_positions)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 11748, 2153, 73, 1563, 198, 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 198, 361, 18896, 7, 17597, 13, 853, 85, 8, 1279, 362, 25, 198, 220, 220, 220, 3601, 7203, 59, 77, 29...
2.291696
1,433
from GraphReprBase import GraphReprBase
[ 6738, 29681, 6207, 81, 14881, 1330, 29681, 6207, 81, 14881, 628 ]
3.727273
11
import os import sys from typing import List, Tuple import matplotlib.pyplot as plt import numpy as np import pandas as pd from icecream import ic from pandas import DataFrame from stage_1.utils import as_windowed_np ic.includeContext = True max_raw_vals = {"Acc1": 32768, "Acc2": 8192, "Gyro": 32768, "Mag": 8192, "Force": 4096} max_sis = {"Acc1": 2, "Acc2": 2, "Gyro": 1000, "Mag": 2.4, "Force": 5.32} BLANK_CHAR_LABEL = "0" CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" CHAR_CLASSES = [BLANK_CHAR_LABEL] + list(CHARS) # Add 'no char'/blank class def get_label_per_t(data: DataFrame, labels: DataFrame) -> DataFrame: """Add a label for each time step in data. Args: data (DataFrame): Sensor data of shape [T, F], where T time and F is feature dimension. labels (DataFrame): Labels of shape [N, 3] containing N char labels with start and end position. Returns: label_per_t (DataFrame): Label for each time step. """ label_per_t = [] label_gen = labels.iterrows() cur_label = next(label_gen)[1] # Get the first label for data_sample in data.iterrows(): cur_t = data_sample[0] cur_start = cur_label["start"] cur_end = cur_label["stop"] if cur_t >= cur_end: # Update the label. This assumes try: cur_label = next(label_gen)[1] except StopIteration: # We don't have any labels for these time steps anymore # Just use the current label with a time step in the future. This will # result in a blank label. cur_label["start"] = cur_t + 1 cur_label["end"] = cur_t + 2 # Also update start and end labels cur_start = cur_label["start"] cur_end = cur_label["stop"] if cur_start <= cur_t < cur_end: # TODO: Use char or index of CHAR_CLASSES here? label_per_t.append(cur_label["Label"]) else: # cur_t < cur_label or cur_t >= cur_label label_per_t.append(BLANK_CHAR_LABEL) return DataFrame(label_per_t, columns=["Label"]) def split_data_by_label( data: DataFrame, labels: DataFrame ) -> List[Tuple[str, DataFrame]]: """Split data into parts that correspond to a char. Args: data (DataFrame): Sensor data of shape [T, F], where T time and F is feature dimension. labels (DataFrame): Labels of shape [N, 3] containing N char labels with start and end position. Returns: labeled data (List[str, DataFrame]): List of tuples containing the label char and the corresponding sensor data. """ out = [] for _, label in labels.iterrows(): df = data[(data.index >= label["start"]) & (data.index < label["stop"])] out.append((label["Label"], df)) return out def extract_blanks( data: DataFrame, labels: DataFrame, min_len_ms: int = 50 ) -> List[Tuple[str, DataFrame]]: """Extract blank parts between the labeled chars. Args: data (DataFrame): Sensor data of shape [T, F], where T time and F is feature dimension. labels (DataFrame): Labels of shape [N, 3] containing N char labels with start and end position. Returns: labeled data (List[str, DataFrame]): List of tuples containing the blank label and the corresponding sensor data. """ out = [] label_gen = labels.iterrows() # Label generator _, label = next(label_gen) for _, next_label in label_gen: df = data[(data.index >= label["stop"]) & (data.index < next_label["start"])] if df.shape[0] > 1 and df.index[-1] - df.index[0] >= min_len_ms: out.append((BLANK_CHAR_LABEL, df)) label = next_label return out def get_relevant_label_segment( sample: DataFrame, force_thresh=None, n_consecutive=1 ) -> Tuple[int, int]: """Get the relevant segment, that according to the force only contains the part where the letter was actually written. Args: sample (DataFrame): A sample containing sensor data of exactly one letter. force_thresh (Optional[float]): Threshold that is used to detect if the pen is currently used to write a letter. If 'None' use 1% of max_range. n_consecutive (int): Number of consecutive samples that are above the threshold. Returns: start (int): Start position in [ms] where 'Force' is > 0, i.e. the pen is actually writing. end (int): End of force application. """ assert isinstance(sample, DataFrame) force = sample["Force"].to_numpy() if force_thresh is None: force_thresh = 0.01 * max_sis["Force"] mask = force > force_thresh if n_consecutive > 1: mask = as_windowed_np(mask, window_length=n_consecutive) mask = mask.min(axis=-1) # Extend mask again, so it has the same shape as force mask = np.append(mask, [mask[-1]] * n_consecutive) start = mask.argmax(axis=0) end = mask.shape[0] - mask[::-1].argmax(axis=0) - 1 end += n_consecutive - 1 return start, end def test_pipeline(folder): """Test the data loading pipeline.""" calib = read_calibration(folder) data = read_sensor_data(folder) labels = read_labels(folder) data = apply_calibration(data, calib) labeled_data = split_data_by_label(data, labels) label, sample = labeled_data[0] start, end = get_relevant_label_segment(sample, force_thresh=0, n_consecutive=3) t_offset = sample.index.min() plt.plot(idx_to_s(sample.index, "min"), sample["Force"]) plt.plot( idx_to_s(sample.index[start], t_offset), sample["Force"].iat[start], "ro", ) plt.plot(idx_to_s(sample.index[end], t_offset), sample["Force"].iat[end], "ro") plt.savefig("test_relevant_points.png") feat_names = list(sample) fig, axes = plt.subplots(len(feat_names), figsize=(10, 20), sharex=True) time = idx_to_s(sample.index, "min") for i, f_n in enumerate(feat_names): feat = sample[f_n] axes[i].plot(time, feat) axes[i].set_title(f_n) plt.savefig("test_all_feat.png") if __name__ == "__main__": plot_histograms_per_feat(sys.argv[1])
[ 11748, 28686, 198, 11748, 25064, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 4771,...
2.402062
2,619
from django_otp.admin import OTPAdminSite
[ 6738, 42625, 14208, 62, 313, 79, 13, 28482, 1330, 440, 7250, 46787, 29123, 628 ]
3.071429
14
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import pytest from hydra.utils import get_class, instantiate from omegaconf import OmegaConf import torch.nn.modules.loss as loss from torch.tensor import Tensor from typing import Any @pytest.mark.parametrize( "modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected", [ pytest.param( "nn.modules.loss", "BCELoss", {}, [], {"weight": Tensor([1])}, loss.BCELoss(), id="BCELossConf", ), pytest.param( "nn.modules.loss", "BCEWithLogitsLoss", {}, [], {"weight": Tensor([1]), "pos_weight": Tensor([1])}, loss.BCEWithLogitsLoss(), id="BCEWithLogitsLossConf", ), pytest.param( "nn.modules.loss", "CosineEmbeddingLoss", {}, [], {}, loss.CosineEmbeddingLoss(), id="CosineEmbeddingLossConf", ), pytest.param( "nn.modules.loss", "CTCLoss", {}, [], {}, loss.CTCLoss(), id="CTCLossConf", ), pytest.param( "nn.modules.loss", "L1Loss", {}, [], {}, loss.L1Loss(), id="L1LossConf", ), pytest.param( "nn.modules.loss", "HingeEmbeddingLoss", {}, [], {}, loss.HingeEmbeddingLoss(), id="HingeEmbeddingLossConf", ), pytest.param( "nn.modules.loss", "KLDivLoss", {}, [], {}, loss.KLDivLoss(), id="KLDivLossConf", ), pytest.param( "nn.modules.loss", "MarginRankingLoss", {}, [], {}, loss.MarginRankingLoss(), id="MarginRankingLossConf", ), pytest.param( "nn.modules.loss", "MSELoss", {}, [], {}, loss.MSELoss(), id="MSELossConf", ), pytest.param( "nn.modules.loss", "MultiLabelMarginLoss", {}, [], {}, loss.MultiLabelMarginLoss(), id="MultiLabelMarginLossConf", ), pytest.param( "nn.modules.loss", "MultiLabelSoftMarginLoss", {}, [], {"weight": Tensor([1])}, loss.MultiLabelSoftMarginLoss(), id="MultiLabelSoftMarginLossConf", ), pytest.param( "nn.modules.loss", "MultiMarginLoss", {}, [], {"weight": Tensor([1])}, loss.MultiMarginLoss(), id="MultiMarginLossConf", ), pytest.param( "nn.modules.loss", "NLLLoss", {}, [], {"weight": Tensor([1])}, loss.NLLLoss(), id="NLLLossConf", ), pytest.param( "nn.modules.loss", "NLLLoss2d", {}, [], {"weight": Tensor([1])}, loss.NLLLoss2d(), id="NLLLoss2dConf", ), pytest.param( "nn.modules.loss", "PoissonNLLLoss", {}, [], {}, loss.PoissonNLLLoss(), id="PoissonNLLLossConf", ), pytest.param( "nn.modules.loss", "SmoothL1Loss", {}, [], {}, loss.SmoothL1Loss(), id="SmoothL1LossConf", ), pytest.param( "nn.modules.loss", "SoftMarginLoss", {}, [], {}, loss.SoftMarginLoss(), id="SoftMarginLossConf", ), pytest.param( "nn.modules.loss", "TripletMarginLoss", {}, [], {}, loss.TripletMarginLoss(), id="TripletMarginLossConf", ), ], )
[ 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 1439, 6923, 33876, 198, 11748, 12972, 9288, 198, 6738, 25039, 13, 26791, 1330, 651, 62, 4871, 11, 9113, 9386, 198, 6738, 267, 28917, 7807, 69, 1330, 19839, 18546, 198, 1...
1.571738
2,767
result = "" table = { 'kappa_pride': '2', 'pepe': '3', 'kappa': '0', 'look_at_this_dude': '4', 'trollface': '1' } with open("meme_or_not", "r") as f: for line in f: cs = line.split() char = '' for c in cs: char += table[c] result += chr(int(char, 5)) print(result)
[ 20274, 796, 13538, 198, 11487, 796, 1391, 198, 220, 220, 220, 705, 74, 20975, 62, 1050, 485, 10354, 705, 17, 3256, 198, 220, 220, 220, 705, 431, 431, 10354, 705, 18, 3256, 198, 220, 220, 220, 705, 74, 20975, 10354, 705, 15, 3256, ...
1.839779
181
import logging import allure logger = logging.getLogger(__name__)
[ 11748, 18931, 198, 198, 11748, 477, 495, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 628, 198 ]
2.916667
24
import pytest from diofant import (Derivative, E, Function, I, Integer, Integral, O, Rational, Subs, Symbol, cos, exp, log, oo, pi, sin, sqrt, symbols) from diofant.abc import h, x, y, z f = Function('f') __all__ = () @pytest.mark.xfail(reason='https://github.com/diofant/diofant/pull/158') @pytest.mark.slow @pytest.mark.slow
[ 11748, 12972, 9288, 198, 198, 6738, 288, 952, 69, 415, 1330, 357, 28532, 452, 876, 11, 412, 11, 15553, 11, 314, 11, 34142, 11, 15995, 1373, 11, 440, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.108247
194
import mathlib import pytest @pytest.mark.windows @pytest.mark.windows @pytest.mark.mac @pytest.mark.mac
[ 11748, 10688, 8019, 198, 11748, 12972, 9288, 198, 198, 31, 9078, 9288, 13, 4102, 13, 28457, 198, 198, 31, 9078, 9288, 13, 4102, 13, 28457, 198, 198, 31, 9078, 9288, 13, 4102, 13, 20285, 198, 198, 31, 9078, 9288, 13, 4102, 13, 20285 ...
2.511628
43
#!/usr/bin/env python3 # # Copyright (c) 2019 Miklos Vajna and contributors. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The ranges module contains functionality related to the Ranges class.""" from typing import List from typing import Optional from typing import cast class Range: """A range object represents an odd or even range of integer numbers.""" def get_start(self) -> int: """The smallest integer.""" return self.__start def get_end(self) -> int: """The largest integer.""" return self.__end def is_odd(self) -> Optional[bool]: """None for all house numbers on one side, bool otherwise.""" return self.__is_odd class Ranges: """A Ranges object contains an item if any of its Range objects contains it.""" def get_items(self) -> List[Range]: """The list of contained Range objects.""" return self.__items # vim:set shiftwidth=4 softtabstop=4 expandtab:
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2, 15069, 357, 66, 8, 13130, 17722, 33280, 39838, 2616, 290, 20420, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, ...
3.038576
337
#!/usr/bin/env python3 import os import sys if 'pypy' in sys.executable: sys.path.append("./captcha") from captcha.audio import AudioCaptcha else: from captcha.captcha.audio import AudioCaptcha import random import secrets CHARSET = "0123456789" if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 361, 705, 79, 4464, 88, 6, 287, 25064, 13, 18558, 18187, 25, 198, 220, 220, 220, 25064, 13, 6978, 13, 33295, 7, 1911, 14, 27144, ...
2.640351
114
import datetime as dt if __name__ == "__main__": today = dt.date.today() next_fm: dt.date = next_festa_major(today) print(f"Today is {today}. The next festa major will be on {next_fm}")
[ 11748, 4818, 8079, 355, 288, 83, 628, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1909, 796, 288, 83, 13, 4475, 13, 40838, 3419, 198, 220, 220, 220, 1306, 62, 38353, 25, 288, 83, 13, 4475...
2.463415
82
from django.template.defaultfilters import register @register.filter(name='add') def add(d, k): '''Returns the given key from a dictionary.''' return d + k - 1
[ 6738, 42625, 14208, 13, 28243, 13, 12286, 10379, 1010, 1330, 7881, 628, 198, 31, 30238, 13, 24455, 7, 3672, 11639, 2860, 11537, 198, 4299, 751, 7, 67, 11, 479, 2599, 198, 220, 220, 220, 705, 7061, 35561, 262, 1813, 1994, 422, 257, 2...
3.072727
55
import random if __name__ == "__main__": main()
[ 201, 198, 11748, 4738, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 201, 198, 220, 220, 220, 1388, 3419, 201, 198 ]
1.780488
41
@given(u'a input markdown file') @when(u'we convert that file through pandoc') @then(u'we have a docx file as output')
[ 31, 35569, 7, 84, 6, 64, 5128, 1317, 2902, 2393, 11537, 198, 198, 31, 12518, 7, 84, 6, 732, 10385, 326, 2393, 832, 19798, 420, 11537, 198, 198, 31, 8524, 7, 84, 6, 732, 423, 257, 2205, 87, 2393, 355, 5072, 11537, 198 ]
2.813953
43
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import itertools import time import datetime import threading import traceback import shutil import re import math import wx import pygame from pygame.locals import MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP, USEREVENT import cw from cw.util import synclock # build_exe.pyによって作られる一時モジュール # cw.versioninfoからビルド時間の情報を得る try: import versioninfo except ImportError: versioninfo = None class _Singleton(object): """継承専用クラス""" _mutex_postevent = threading.Lock() @synclock(_mutex_postevent) def post_pygameevent(event): """pygameイベントをキューへ投入する。 投入に失敗した場合は一度だけ入力イベントを クリアしてからの再投入を試みる。 """ try: pygame.event.post(event) except: # 入力イベントが輻輳している場合はクリアする cw.cwpy.clear_inputevents() pygame.event.post(event) if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 201, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 11748, 25064, 201, 198, 11748, 28686, 201, 198, 11748, 340, 861, 10141, 201, 198, 11748, 640, 201, ...
1.777143
525
from .base import BaseHandler from prometheus_client import REGISTRY, generate_latest, CONTENT_TYPE_LATEST from tornado import web import ipaddress
[ 6738, 764, 8692, 1330, 7308, 25060, 198, 6738, 1552, 36916, 62, 16366, 1330, 23337, 1797, 40405, 11, 7716, 62, 42861, 11, 22904, 3525, 62, 25216, 62, 43, 1404, 6465, 198, 6738, 33718, 1330, 3992, 198, 11748, 20966, 21975, 628 ]
3.820513
39
import os import time import hashlib import config import math from get_pair_file_extra import generate_pair_file from get_pair_file_2_extra import generate_pair_file2 from blocking import generate_pair_by_blocking, update_result_to_intfile import blocking import numpy as np from collections import defaultdict def build_save_pairfile_3(pairfile_path, name_freq_file_path, internal_pairfile_path): """ Please see MINDFIRL design document to see the format of input pairfile The MINDFIRL system internally need 3 files: a different pairfile, file1, and file2 This function turn the input pairfile and name_freq_file into those 3 files """ table_head = 'ID,voter_reg_num,first_name,last_name,dob,sex,race,info1,info2,info3,info4,info5,type,file_id\n' fout = open(internal_pairfile_path, 'w+') fout.write(table_head) pairfile = open(pairfile_path, 'r') cnt = 0 for l in pairfile: if cnt != 0: l = l.strip().split(',') newline = [l[0], l[4], l[5], l[6], l[7], l[8], l[9], l[10], l[11], l[12], l[13], l[14]] newline.append('1') if cnt%2 == 0: newline.append('1-A') else: newline.append('1-B') newline = ','.join(newline) fout.write(newline+'\n') cnt += 1 fout.close() pairfile.close() def get_block_num(block_id, pf_file): """ get how many blocks exist in the pf_file """ pair_id = list() with open(pf_file, 'r') as fin: for line in fin: data = line.rstrip().split(',') pair_id.append(int(data[0])) pair_num = 0 for cur_block in block_id: flag = False for value in cur_block: if value in pair_id: flag = True break if flag: pair_num += 1 return pair_num def save_project(mongo, data): """ by pair file """ project_name = data['project_name'] project_des = data['project_des'] owner = data['owner'] pair_file = data['pair_file'] name_freq_file = data['name_freq_file'] pairfile_path = os.path.join(config.DATA_DIR, 'database', owner+'_'+project_name+'_pairfile.csv') name_freq_file_path = os.path.join(config.DATA_DIR, 'database', owner+'_'+project_name+'_freqfile.csv') internal_pairfile_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_pairfile.csv') pf_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_pf.csv') result_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_result.csv') final_result_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_finalresult.csv') # create result file f = open(result_path, 'w+') f.close() f = open(final_result_path, 'w+') f.close() pair_file.save(pairfile_path) name_freq_file.save(name_freq_file_path) build_save_pairfile_3(pairfile_path, name_freq_file_path, internal_pairfile_path) #pf_result = generate_pair_file2(internal_pairfile_path, name_freq_file_path, pf_path) pf_result = generate_pair_file(internal_pairfile_path, internal_pairfile_path, internal_pairfile_path, pf_path) total_pairs = get_total_pairs_from_pairfile(internal_pairfile_path) # get block_id #block_id = get_blockid_from_groupfile(groupfile_path) block_id = get_blockid_from_pairfile(pairfile_path) assigner = Assign_generator(internal_pairfile_path, block_id) assignee_items = data['assignee_area'].rstrip(';').split(';') assignee_list = list() assignee_stat = list() for assignee_item in assignee_items: cur_assignee, cur_kapr, cur_percentage, display_mode, isfull = assignee_item.split(',') if display_mode.lower() == 'base': isfull = 'true' assignee_list.append(cur_assignee) percentage = float(cur_percentage)/100.0 tmp_file = os.path.join(config.DATA_DIR, 'internal', owner+'_'+cur_assignee+'_'+project_name+'_pairfile.csv') #assigned_id = assigner.random_assign_pairfile(tmp_file=tmp_file, pair_num=int(total_pairs*percentage)) assigned_id = assigner.random_assign(tmp_file=tmp_file, pair_num=math.ceil(total_pairs*percentage), block_id=block_id) pf_file = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_'+cur_assignee+'_pf.csv') # TODO pf_result = generate_pair_file2(tmp_file, name_freq_file_path, pf_file) delete_file(tmp_file) total_blocks = get_block_num(block_id=block_id, pf_file=pf_file) # create assignee result file cur_result = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_'+cur_assignee+'_result.csv') f = open(cur_result, 'w+') f.close() assignee_stat.append({ 'assignee': cur_assignee, 'pf_path': pf_file, 'result_path': cur_result, 'assigned_id': assigned_id, 'current_page': 0, #'page_size': math.ceil(int(total_pairs*percentage)/6), 'page_size': total_blocks, 'pair_idx': 0, 'total_pairs': get_block_id_len(assigned_id), 'kapr_limit': cur_kapr, 'display_mode': display_mode, 'isfull': isfull, 'current_kapr': 0, }) project_key = owner+'-'+project_name+str(time.time()) project_key = project_key.encode('utf-8') pid = hashlib.sha224(project_key).hexdigest() project_data = { 'pid': pid, 'project_name': project_name, 'project_des': project_des, 'owner': owner, 'created_by': 'pairfile', 'pairfile_path': pairfile_path, 'internal_pairfile_path': internal_pairfile_path, 'pf_path': pf_path, 'result_path': result_path, 'final_result_path': final_result_path, 'block_id': block_id, 'assignee': assignee_list, 'assignee_stat': assignee_stat } mongo.db.projects.insert(project_data) return pid def save_project2(mongo, data): """ by blocking """ project_name = data['project_name'] project_des = data['project_des'] owner = data['owner'] # generate pair_file for record linkage file1_path = os.path.join(config.DATA_DIR, 'database', owner+'_'+project_name+'_file1.csv') file2_path = os.path.join(config.DATA_DIR, 'database', owner+'_'+project_name+'_file2.csv') intfile_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_intfile.csv') pairfile_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_pairfile.csv') pf_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_pf.csv') result_path = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_result.csv') file1 = data['file1'] file2 = data['file2'] file1.save(file1_path) file2.save(file2_path) total_pairs, block_id = generate_pair_by_blocking(blocking=data['blocking'], file1=file1_path, file2=file2_path, intfile=intfile_path, pair_file=pairfile_path) # if blocking_result is False, need to consider this pf_result = generate_pair_file(pairfile_path, file1_path, file2_path, pf_path) # create result file f = open(result_path, 'w+') f.close() # assign the pairfile to each assignee, generate pf_file for them assigner = Assign_generator(pairfile_path, block_id) assignee_items = data['assignee_area'].rstrip(';').split(';') assignee_list = list() assignee_stat = list() for assignee_item in assignee_items: cur_assignee, cur_kapr, cur_percentage, display_mode, isfull = assignee_item.split(',') assignee_list.append(cur_assignee) percentage = float(cur_percentage)/100.0 tmp_file = os.path.join(config.DATA_DIR, 'internal', owner+'_'+cur_assignee+'_'+project_name+'_pairfile.csv') assigned_id = assigner.random_assign(tmp_file=tmp_file, pair_num=math.ceil(total_pairs*percentage), block_id=block_id) pf_file = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_'+cur_assignee+'_pf.csv') pf_result = generate_pair_file(tmp_file, file1_path, file2_path, pf_file) delete_file(tmp_file) total_blocks = get_block_num(block_id=block_id, pf_file=pf_file) # create assignee result file cur_result = os.path.join(config.DATA_DIR, 'internal', owner+'_'+project_name+'_'+cur_assignee+'_result.csv') f = open(cur_result, 'w+') f.close() assignee_stat.append({ 'assignee': cur_assignee, 'pf_path': pf_file, 'result_path': cur_result, 'assigned_id': assigned_id, 'current_page': 0, 'page_size': total_blocks, 'kapr_limit': cur_kapr, 'current_kapr': 0, 'pair_idx': 0, 'total_pairs': pf_result['size'], 'display_mode': display_mode, 'isfull': isfull, }) project_key = owner+'-'+project_name+str(time.time()) project_key = project_key.encode('utf-8') pid = hashlib.sha224(project_key).hexdigest() project_data = { 'pid': pid, 'project_name': project_name, 'project_des': project_des, 'owner': owner, 'created_by': 'blocking', 'blocking_on': data['blocking'], 'block_id': block_id, 'file1_path': file1_path, 'file2_path': file2_path, 'intfile_path': intfile_path, 'pairfile_path': pairfile_path, 'pf_path': pf_path, 'result_path': result_path, 'assignee': assignee_list, 'assignee_stat': assignee_stat } mongo.db.projects.insert(project_data) return pid def get_assignment_status(mongo, username, pid): """ assignment status = { 'current_page': 1, 'page_size': 6 } """ assignment = mongo.db.projects.find_one({'pid': pid}) assignee_stat = assignment['assignee_stat'] user_idx = 0 while user_idx < len(assignee_stat) and assignee_stat[user_idx]['assignee'] != username: user_idx += 1 if user_idx == len(assignee_stat): print("error: cannot find user as assignee in this project, pid: %s, username: %s" % (pid, username)) return assignee_stat[user_idx] current_page = assignee_stat[user_idx]['current_page'] page_size = assignee_stat[user_idx]['page_size'] kapr_limit = assignee_stat[user_idx]['kapr_limit'] current_kapr = assignee_stat[user_idx]['current_kapr'] ret = { 'current_page': int(current_page), 'page_size': int(page_size), 'kapr_limit': float(kapr_limit), 'current_kapr': float(current_kapr) } return ret def get_data_mode(assignment_id, ids, r, data_mode='masked', default_mode='M'): """ if is None, then insert 'M' into the redis """ data_modes = { 'base': ['base', 'base', 'base', 'base', 'base', 'base', 'base', 'base', 'base', 'base', 'base'], 'full': ['full', 'full', 'full', 'full', 'full', 'full', 'full', 'full', 'full', 'full', 'full'], 'masked': ['masked', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked'], 'minimum': ['partial', 'partial', 'partial', 'partial', 'full', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked'], 'moderate': ['partial', 'partial', 'partial', 'partial', 'full', 'masked', 'masked', 'masked', 'masked', 'masked', 'masked'], } mode_dict = {'M': 'masked', 'P': 'partial', 'F': 'full', 'B': 'base'} mode_dict2 = {'masked': 'M', 'partial': 'P', 'full': 'F', 'base': 'B'} data_mode_list = [] for (id1, id2) in ids: cur_list = [] for attribute_id1 in id1: key = assignment_id + '-' + attribute_id1 mode = r.get(key) if mode != None: if mode in ['masked', 'partial', 'full', 'base']: cur_list.append(mode) else: cur_list.append(mode_dict[mode]) else: attribute = int(attribute_id1.split('-')[-1]) default_mode = data_modes[data_mode][attribute] r.set(key, mode_dict2[default_mode]) cur_list.append(default_mode) data_mode_list.append(cur_list) return data_mode_list def get_conflict_data_mode(pid, ids, mongo, r, manager_assignment_id, isfull=False): """ the data mode for resolve conflicts is the Union of each assignee's data mode """ if isfull: data_mode_list = len(ids)*[11*['base']] return data_mode_list data_mode_list = len(ids)*[11*['masked']] project = mongo.db.projects.find_one({'pid': pid}) assignee_stat = project['assignee_stat'] for assignee in assignee_stat: username = assignee['assignee'] assignment_id = pid + '-' + username cur_data_mode_list = get_data_mode(assignment_id, ids, r) for i in range(len(ids)): data_mode_list[i] = _union_data_mode(data_mode_list[i], cur_data_mode_list[i]) # insert data mode to redis as manager role id1, id2 = ids[i] j = 0 for attribute_id1 in id1: key = manager_assignment_id + '-' + attribute_id1 r.set(key, data_mode_list[i][j]) j += 1 return data_mode_list def save_working_answers(assignment_id, data, r): """ save answered responses to redis data: string """ answers = list() for d in data: if d['type'] == 'final_answer': answers.append(d['value']) working_answers = ','.join(answers) key = assignment_id + '-working_answers' r.delete(key) r.set(key, working_answers) return True def save_answers(mongo, pid, username, data): """ save one page answers to file """ data_to_write = list() for d in data: if d['type'] == 'final_answer': answer = d['value'] pair_num = int(answer.split('a')[0][1:]) choice = int(answer.split('a')[1]) decision = 1 if choice > 3 else 0 line = ','.join([str(pair_num), str(decision), str(choice)]) data_to_write.append(line) filename = get_assignee_result_path(mongo=mongo, pid=pid, assignee=username) with open(filename, 'a') as f: for item in data_to_write: f.write(item + '\n') return True def update_resolve_conflicts(mongo, pid): """ update the result file if pair_id not in the conflicts: copy it to the final result else: use the resolve conflicts result as the final result """ conflict_project = mongo.db.conflicts.find_one({'pid': pid}) conflict_result = conflict_project['result_path'] final_answer = dict() with open(conflict_result, 'r') as fin: for line in fin: answer = line.rstrip().split(',') pair_num = int(answer[0]) final_answer[pair_num] = line.rstrip() project = mongo.db.projects.find_one({'pid': pid}) result_file = project['result_path'] results = dict() with open(result_file, 'r') as fin: for line in fin: if line.strip() == '': continue pair_id, decision, choice = line.strip().split(',') if int(pair_id) in final_answer: results[int(pair_id)] = final_answer[int(pair_id)] else: results[int(pair_id)] = line.strip() with open(result_file, 'w+') as fout: for pair_id in sorted(results): fout.write(results[pair_id]+'\n') return True def save_resolve_conflicts(mongo, pid, username, data): """ save the resolve conflicts result file """ data_to_write = list() for d in data: if d['type'] == 'final_answer': answer = d['value'] pair_num = int(answer.split('a')[0][1:]) choice = int(answer.split('a')[1]) decision = 1 if choice > 3 else 0 line = ','.join([str(pair_num), str(decision), str(choice)]) data_to_write.append(line) project = mongo.db.conflicts.find_one({'pid': pid}) filename = project['result_path'] print(filename) with open(filename, 'a') as f: for item in data_to_write: f.write(item + '\n') return True def get_current_block(mongo, pid, assignee): """ get pair id for current block (one block per page) """ assignment = mongo.db.projects.find_one({'pid': pid}) assignee_stat = assignment['assignee_stat'] for item in assignee_stat: if item['assignee'] == assignee: cur_assignee = item break pair_idx = cur_assignee['pair_idx'] assigned_id = cur_assignee['assigned_id'] pos = 0 i = 0 while pair_idx != pos and i < len(assigned_id): pos += len(assigned_id[i]) i += 1 ret = assigned_id[i] return ret, pair_idx def combine_result(mongo, pid): """ combine assignee result file into final result file. if the answer are the same, just keep one. """ project = mongo.db.projects.find_one({'pid': pid}) result_file = project['result_path'] pairfile_path = project['pairfile_path'] results = list() answers = dict() assignee_stat = project['assignee_stat'] for assignee in assignee_stat: cur_result = assignee['result_path'] with open(cur_result, 'r') as fin: for line in fin: if line: pair_id, decision, choice = line.rstrip().split(',') pair_id = int(pair_id) decision = int(decision) if pair_id not in answers: answers[pair_id] = decision results.append(line) else: if answers[pair_id] == decision: continue else: results.append(line) # reset (cannot reset yet. resolve conflict need assignee's result) #with open(cur_result, 'w+') as fout: # fout.write('') with open(result_file, 'a') as fout: for item in results: fout.write(item) return True def get_users_choices(mongo, pid, indices): """ Retures: { pair_num: [[username, decision, choice], [username, decision, choice]], pair_num: [[username, decision, choice], [username, decision, choice]], } """ choices = defaultdict(list) choice_map = {1:'H', 2:'M', 3:'L', 4:'L', 5:'M', 6:'H'} project = mongo.db.projects.find_one({'pid': pid}) assignee_stat = project['assignee_stat'] for assignee in assignee_stat: result_path = assignee['result_path'] with open(result_path, 'r') as fin: data = fin.readlines() answers = [line.rstrip().split(',') for line in data if len(line.rstrip()) > 0] for answer in answers: if int(answer[0]) in indices: choices[int(answer[0])].append([assignee['assignee'], int(answer[1]), choice_map[int(answer[2])]]) choice_cnt = dict() for idx in indices: choice_cnt[idx] = list([0, 0]) for k, v in choices.items(): for item in v: decision = item[1] choice_cnt[k][decision] += 1 return choices, choice_cnt
[ 11748, 28686, 198, 11748, 640, 198, 11748, 12234, 8019, 198, 11748, 4566, 198, 11748, 10688, 198, 6738, 651, 62, 24874, 62, 7753, 62, 26086, 1330, 7716, 62, 24874, 62, 7753, 198, 6738, 651, 62, 24874, 62, 7753, 62, 17, 62, 26086, 1330...
2.162124
9,098
"""Manages a Vision Card spreadsheet.""" from wotv_bot_common import ExposableException from admin_utils import AdminUtils from worksheet_utils import WorksheetUtils from vision_card_common import VisionCard class VisionCardManager: """Manages a Vision Card spreadsheet.""" def findVisionCardRow(self, user_name: str, search_text: str): """Performs a fuzzy lookup for a unit, returning the row number and the text from within the one matched cell.""" return WorksheetUtils.fuzzyFindRow(self.spreadsheet_app, self.vision_card_spreadsheet_id, user_name, search_text, "B") def addVisionCardRow(self, user_id: str, name: str, url: str, above_or_below: str, row_1_based: str): """Add a new row for a Vision Card. The above_or_below parameter needs to be either the string 'above' or 'below'. The row should be in 1-based notation, i.e. the first row is row 1, not row 0. """ if not AdminUtils.isAdmin(self.spreadsheet_app, self.access_control_spreadsheet_id, user_id): raise ExposableException('You do not have permission to add a vision card.') spreadsheet = self.spreadsheet_app.get(spreadsheetId=self.vision_card_spreadsheet_id).execute() allRequests = WorksheetUtils.generateRequestsToAddRowToAllSheets( spreadsheet, int(row_1_based), above_or_below, True, # Set a header column... 'B', # ... On the second column (A1 notation) name, # With text content being the vision card name url) # As a hyperlink to the url requestBody = { 'requests': [allRequests] } # Execute the whole thing as a batch, atomically, so that there is no possibility of partial update. self.spreadsheet_app.batchUpdate(spreadsheetId=self.vision_card_spreadsheet_id, body=requestBody).execute() return @staticmethod def intOrNone(rawValueString) -> int: """Parse a raw value string and return either the integer it represents, or None if it does not represent an integer.""" try: return int(rawValueString, 10) except ValueError: return None @staticmethod def valueOrNone(rowdata: [], index: int): """Return either the nth element of the specified array, or None if the array does not have that many elements.""" if len(rowdata) > index: return rowdata[index] return None @staticmethod def valueOrEmpty(value): """Either return a string representation of the supplied value or, if the supplied value is None, return the empty string.""" if value is not None: return str(value) return '' @staticmethod def toMultiLineString(values: []): """If the supplied list is not None and is a non-empty list of items, returns a string representation of the elements of the list, joined by newlines. Otherwise, returns the empty string.""" if values is None or len(values) == 0: return '' return '\n'.join(values) @staticmethod def fromMultiLineString(value: []): """If the supplied string is not None and is non-empty, returns a list representation of the lines in the string. Otherwise, returns the empty string.""" if value is None: return '' return str(value).splitlines() def __readVisionCardFromRawRow(self, row): """Read a VisionCard object out of a raw row of strings from the spreadsheet. The first element of the row must be the vision card name.""" # Columns: # Name,Awakening,Level,Cost,HP,DEF,TP,SPR,AP,DEX,ATK,AGI,MAG,Luck,Party Ability,Bestowed Abilities # (B) ..........................................................................(Q) name_from_sheet = VisionCardManager.valueOrNone(row, 0) # TODO: Read awakening and level once they are available # awakening = VisionCardManager.valueOrNone(row, 1) # level = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 2)) cost = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 3)) hp_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 4)) def_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 5)) tp_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 6)) spr_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 7)) ap_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 8)) dex_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 9)) atk_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 10)) agi_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 11)) mag_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 12)) luck_value = VisionCardManager.intOrNone(VisionCardManager.valueOrNone(row, 13)) party_ability = VisionCardManager.valueOrNone(row, 14) bestowed_abilities = VisionCardManager.fromMultiLineString(VisionCardManager.valueOrNone(row, 15)) result = VisionCard( name_from_sheet, cost, hp_value, def_value, tp_value, spr_value, ap_value, dex_value, atk_value, agi_value, mag_value, luck_value, party_ability, bestowed_abilities) return result def readVisionCardByName(self, user_name: str, user_id: str, vision_card_name: str) -> VisionCard: """Read and return a VisionCard containing the stats for the specified vision card name, for the given user. Set either the user name or the user ID, but not both. If the ID is set, the tab name for the lookup is done as an indirection through the access control spreadsheet to map the ID of the user to the correct tab. This is best for self-lookups, so that even if a user changes their own nickname, they are still reading their own data and not the data of, e.g., another user who has their old nickname. """ if (user_name is not None) and (user_id is not None): print('internal error: both user_name and user_id specified. Specify one or the other, not both.') raise ExposableException('Internal error') if user_id is not None: user_name = AdminUtils.findAssociatedTab(self.spreadsheet_app, self.access_control_spreadsheet_id, user_id) row_number, _ = self.findVisionCardRow(user_name, vision_card_name) # We have the location. Get the value! range_name = WorksheetUtils.safeWorksheetName(user_name) + '!B' + str(row_number) + ':Q' + str(row_number) result = self.spreadsheet_app.values().get(spreadsheetId=self.vision_card_spreadsheet_id, range=range_name).execute() rows = result.get('values', []) if not rows: raise ExposableException('{0} is not tracking any data for vision card {1}'.format(user_name, vision_card_name)) return self.__readVisionCardFromRawRow(rows[0]) def setVisionCard(self, user_id: str, vision_card: VisionCard) -> None: """Copy the vision card data from the specified object into the spreadsheet.""" user_name = AdminUtils.findAssociatedTab(self.spreadsheet_app, self.access_control_spreadsheet_id, user_id) row_index_1_based, _ = self.findVisionCardRow(user_name, vision_card.Name) spreadsheet = self.spreadsheet_app.get(spreadsheetId=self.vision_card_spreadsheet_id).execute() sheet_id = None for sheet in spreadsheet['sheets']: sheetTitle = sheet['properties']['title'] if sheetTitle == user_name: sheet_id = sheet['properties']['sheetId'] break if sheet_id is None: raise ExposableException( 'Internal error: sheet not found for {0}.'.format(user_name)) # Columns: # Name,Awakening,Level,Cost,HP,DEF,TP,SPR,AP,DEX,ATK,AGI,MAG,Luck,Party Ability,Bestowed Abilities # (B) ..........................................................................(Q) new_values = [] # TODO: Write awakening and level once they are available new_values.append('') # Awakening new_values.append('') # Level new_values.append(VisionCardManager.valueOrEmpty(vision_card.Cost)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.HP)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.DEF)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.TP)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.SPR)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.AP)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.DEX)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.ATK)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.AGI)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.MAG)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.Luck)) new_values.append(VisionCardManager.valueOrEmpty(vision_card.PartyAbility)) new_values.append(VisionCardManager.toMultiLineString(vision_card.BestowedEffects)) allRequests = [WorksheetUtils.generateRequestToSetRowText(sheet_id, row_index_1_based, 'C', new_values)] requestBody = { 'requests': [allRequests] } # Execute the whole thing as a batch, atomically, so that there is no possibility of partial update. self.spreadsheet_app.batchUpdate(spreadsheetId=self.vision_card_spreadsheet_id, body=requestBody).execute() def searchVisionCardsByAbility(self, user_name: str, user_id: str, search_text: str) -> [VisionCard]: """Search for and return all VisionCards matching the specified search text, for the given user. Returns an empty list if there are no matches. Set either the user name or the user ID, but not both. If the ID is set, the tab name for the lookup is done as an indirection through the access control spreadsheet to map the ID of the user to the correct tab. This is best for self-lookups, so that even if a user changes their own nickname, they are still reading their own data and not the data of, e.g., another user who has their old nickname. """ if (user_name is not None) and (user_id is not None): print('internal error: both user_name and user_id specified. Specify one or the other, not both.') raise ExposableException('Internal error') if user_id is not None: user_name = AdminUtils.findAssociatedTab(self.spreadsheet_app, self.access_control_spreadsheet_id, user_id) party_ability_row_tuples = WorksheetUtils.fuzzyFindAllRows( self.spreadsheet_app, self.vision_card_spreadsheet_id, user_name, search_text, 'P', 2) bestowed_ability_row_tuples = WorksheetUtils.fuzzyFindAllRows( self.spreadsheet_app, self.vision_card_spreadsheet_id, user_name, search_text, 'Q', 2) if len(party_ability_row_tuples) == 0 and len(bestowed_ability_row_tuples) == 0: return [] # Accumulate all the matching rows all_matching_row_numbers = set() for (row_number, _) in party_ability_row_tuples: all_matching_row_numbers.add(row_number) for (row_number, _) in bestowed_ability_row_tuples: all_matching_row_numbers.add(row_number) all_matching_row_numbers = sorted(all_matching_row_numbers) range_name = WorksheetUtils.safeWorksheetName(user_name) + '!B2:Q' # Fetch everything from below the header row, starting with the name result = self.spreadsheet_app.values().get(spreadsheetId=self.vision_card_spreadsheet_id, range=range_name).execute() all_rows = result.get('values', []) all_matching_vision_cards = [] for row_number in all_matching_row_numbers: all_matching_vision_cards.append(self.__readVisionCardFromRawRow(all_rows[row_number - 1])) # -1 for the header row return all_matching_vision_cards def addUser(self, user_name: str) -> None: """Adds the user with the specified name by creating a new tab that duplicates the first tab in the spreadsheet. Raises an exception on failure. Otherwise, you may assume that the new sheet was successfully created. """ spreadsheet = self.spreadsheet_app.get(spreadsheetId=self.vision_card_spreadsheet_id).execute() home_sheet_id = spreadsheet['sheets'][0]['properties']['sheetId'] allRequests = [WorksheetUtils.generateRequestToDuplicateSheetInAlphabeticOrder( spreadsheet, home_sheet_id, user_name, True)] # True to skip the 'Home' tab, the first tab in the spreadsheet, for sorting purposes requestBody = { 'requests': [allRequests] } # Execute the whole thing as a batch, atomically, so that there is no possibility of partial update. self.spreadsheet_app.batchUpdate(spreadsheetId=self.vision_card_spreadsheet_id, body=requestBody).execute() return
[ 37811, 5124, 1095, 257, 19009, 5172, 30117, 526, 15931, 198, 6738, 266, 313, 85, 62, 13645, 62, 11321, 1330, 1475, 1930, 540, 16922, 198, 6738, 13169, 62, 26791, 1330, 32053, 18274, 4487, 198, 6738, 2499, 25473, 62, 26791, 1330, 10933, ...
2.694602
4,928
#!/usr/bin/env python # -*- coding: utf-8 -*- """ GPIO module Only for easily use and clean-code (and still looks crappy..) """ import helpers, time import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) pc_pin = 4 relay_pin1 = 26 # monitor relay_pin2 = 19 # pc relay_pin3 = 13 # sound relay_pin4 = 06 # light out = [pc_pin, relay_pin1, relay_pin2, relay_pin3, relay_pin4] for i in out: GPIO.setup(i, GPIO.OUT); GPIO.output(i, GPIO.LOW)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 50143, 8265, 198, 5514, 329, 3538, 779, 290, 3424, 12, 8189, 357, 392, 991, 3073, 40805, 492, 8, 198...
2.42487
193
from collections import OrderedDict from datetime import timedelta from itertools import product from django.contrib.auth import get_user_model from django.contrib.postgres.fields import JSONField from django.core.exceptions import ValidationError from django.core.validators import RegexValidator from django.db import models from django.db.models import When, Case, Q from django.urls import reverse, NoReverseMatch from django.utils import dateformat from django.utils.functional import cached_property from django.utils.timezone import now from django_better_admin_arrayfield.models.fields import ArrayField from apps.exercises.constants import SIGNATURE_CHOICES, KEY_SIGNATURES from apps.exercises.utils.transpose import transpose import re User = get_user_model() class RawJSONField(JSONField): """ To preserve the data order. """
[ 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 6738, 340, 861, 10141, 1330, 1720, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208,...
3.510288
243
# -*- coding: utf-8 -*- # vim: set fileencoding=utf-8 # Copyright (c) 2008, 2010 Janne Blomqvist # This source code file is subject to the terms of the MIT (Expat) # License. See the file LICENSE for details. """This module contains unit tests for the vasputil.dos module.""" import unittest import vasputil.dos as d class LdosTestCase(unittest.TestCase): """Testcase for vasputil.dos.LDOS class.""" if __name__ == "__main__": unittest.main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 43907, 25, 900, 2393, 12685, 7656, 28, 40477, 12, 23, 198, 198, 2, 15069, 357, 66, 8, 3648, 11, 3050, 2365, 710, 1086, 296, 44179, 396, 198, 198, 2, 770, 2723, ...
2.815951
163
#!/usr/bin/env python # encoding: utf-8 """ genotype.py This is a class with information about genotypecalls that follows the (GATK) .vcf standard. The indata, that is the genotype call, is allways on the form x/x, so they look like 0/0, 1/2, 1/1 and so on. The first sign inidcates what we find on the first allele, the second is a separator on the form '/' or '|' and the third indicates what is seen on the second allele. The alleles are unordered. Attributes: - genotype STRING (Same as in VCF-standard) - allele_1 STRING (Base on allele 1) - allele_2 STRING (Base on allele 2) - nocall BOOL - heterozygote BOOL - homo_alt BOOL (If individual is homozygote alternative) - homo_ref BOOL (If individual is homozygote reference) - has_variant BOOL (If individual is called and not homozygote reference) - ref_depth INT - alt_depth INT - phred_likelihoods LIST with FLOAT - depth_of_coverage INT - genotype_quality FLOAT - phased BOOL If a variant is present, that is if homo_alt or heterozygote is true, then has_variant is True When dealing with phased data we will see the '|'-delimiter #TODO: Should we allow '1/2', '2/2' and so on? This type of call looses it's point when moving from vcf -> bed since bed files only have one kind of variant on each line. For now we will only allow './.', '0/0', '0/1', '1/1' Created by Måns Magnusson on 2014-06-30. Copyright (c) 2013 __MyCompanyName__. All rights reserved. """ import sys import os class Genotype(object): """Holds information about a genotype""" def __str__(self): """Specifies what will be printed when printing the object.""" return self.allele_1+'/'+self.allele_2
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 37811, 198, 5235, 8690, 13, 9078, 198, 198, 1212, 318, 257, 1398, 351, 1321, 546, 2429, 8690, 66, 5691, 326, 5679, 262, 357, 38, 1404, 42, 8, ...
2.847291
609
# -*- coding: utf-8 -*- """ Created on Fri Mar 9 12:28:39 2018 @author: Frank Dip """ x = 1 print(x) x_str = str(x) print("My favorite number is", x, ".", "x=", x) print("My favorite number is " + x_str + ". x=" + x_str)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 19480, 1526, 220, 860, 1105, 25, 2078, 25, 2670, 2864, 198, 198, 31, 9800, 25, 5278, 43945, 198, 37811, 198, 198, 87, 796, 352, 198, 4798, 7, ...
2.306122
98
#!/usr/bin/python3 # -*- coding: utf8 -*- import pytest import tkinter as tk import time import tk_gui_tools.template_manager as template_manager @pytest.fixture class Event: """ Event object """ x = 20 y = 25 @pytest.fixture
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 23, 532, 9, 12, 198, 198, 11748, 12972, 9288, 198, 11748, 256, 74, 3849, 355, 256, 74, 198, 11748, 640, 198, 198, 11748, 256, 74, 62, 48317, 62, ...
2.401869
107