code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Copyright 2014-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wa import Instrument, Parameter
from wa.framework.exception import InstrumentError
from wa.framework.instrument import slow
from wa.utils.misc import ensure_file_directory_exists as _f
class DmesgInstrument(Instrument):
# pylint: disable=no-member,attribute-defined-outside-init
"""
Collected dmesg output before and during the run.
"""
name = 'dmesg'
parameters = [
Parameter('loglevel', kind=int, allowed_values=range(8),
description='Set loglevel for console output.')
]
loglevel_file = '/proc/sys/kernel/printk'
def initialize(self, context):
self.need_root = self.target.os == 'android'
if self.need_root and not self.target.is_rooted:
raise InstrumentError('Need root to collect dmesg on Android')
def setup(self, context):
if self.loglevel:
self.old_loglevel = self.target.read_int(self.loglevel_file)
self.target.write_value(self.loglevel_file, self.loglevel, verify=False)
self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
@slow
def start(self, context):
with open(self.before_file, 'w') as wfh:
wfh.write(self.target.execute('dmesg', as_root=self.need_root))
context.add_artifact('dmesg_before', self.before_file, kind='data')
if self.target.is_rooted:
self.target.execute('dmesg -c', as_root=True)
@slow
def stop(self, context):
with open(self.after_file, 'w') as wfh:
wfh.write(self.target.execute('dmesg', as_root=self.need_root))
context.add_artifact('dmesg_after', self.after_file, kind='data')
def teardown(self, context): # pylint: disable=unused-argument
if self.loglevel:
self.target.write_value(self.loglevel_file, self.old_loglevel, verify=False)
|
[
"os.path.join",
"wa.framework.exception.InstrumentError"
] |
[((1346, 1402), 'wa.framework.exception.InstrumentError', 'InstrumentError', (['"""Need root to collect dmesg on Android"""'], {}), "('Need root to collect dmesg on Android')\n", (1361, 1402), False, 'from wa.framework.exception import InstrumentError\n'), ((1648, 1705), 'os.path.join', 'os.path.join', (['context.output_directory', '"""dmesg"""', '"""before"""'], {}), "(context.output_directory, 'dmesg', 'before')\n", (1660, 1705), False, 'import os\n'), ((1736, 1792), 'os.path.join', 'os.path.join', (['context.output_directory', '"""dmesg"""', '"""after"""'], {}), "(context.output_directory, 'dmesg', 'after')\n", (1748, 1792), False, 'import os\n')]
|
from flask import Flask
from flask_restplus import Api, Resource, Namespace, reqparse
from server.instance import server
from models.models import delete_parser, create_addon_parser
from environment.logger_flask import logger
from environment.logger_aws import Logger
from environment.template import write_jinja_file, zip_function_upload, streaming_output, create_cdk_json
from lib.s3 import S3, S3Client
from lib.cloudformation import Cloudformation
from lib.iam import IAM
import os
from time import sleep
#from lib.eks import EKS
#import os
#import uuid
#import json
app, region, api = server.app, server.region, server.api
aws_logger = Logger(loglevel='info')
response_dict = {
200: 'OK',
400: 'Invalid Arguement',
500: 'Mapping Key Error'
}
###############
#### Delete ###
###############
delete_ns = Namespace(
name='Delete',
description='My create related routes',
path='/delete'
)
api.add_namespace(delete_ns)
@delete_ns.route('/<string:name>')
class Delete(Resource):
@delete_ns.expect(delete_parser)
@delete_ns.doc(responses=response_dict)
def post(self, name):
'''
1. Delete EKS cluster cloudformation
2. Delete CDK stack
3. Delete Items from s3 bucket
'''
aws_logger.info('/delete/{} POST'.format(name))
args = delete_parser.parse_args()
aws_logger.info(args)
s3r = S3(aws_logger, region=region)
config = s3r.download_dict(f"{name}.json", args['s3bucket'])
if config is None:
return f"already deleted stack {name}"
aws_logger.info(config)
cf = Cloudformation(aws_logger, region=region)
if config['addons']['togca']:
try:
cfoutput = cf.describe_stack(StackName=config['cloudformation_ng'])
for keys in cfoutput['Stacks'][0]['Outputs']:
if keys['OutputKey'] == 'InstanceRoleARN':
iam_role_arn = keys['OutputValue']
iam_role_arn = iam_role_arn.split("/")[-1]
if iam_role_arn:
iam = IAM(aws_logger, region=region)
aws_logger.info(f"trying to delete 'ASG-Policy-For-Worker' from role {iam_role_arn}")
iam.delete_role_policy(RoleName=iam_role_arn, PolicyName='ASG-Policy-For-Worker')
except Exception as e:
aws_logger.info(f"error removing ASG policy from worker nodes, consider manually removing policy: {e}")
cf.delete_stack(StackName=config['cloudformation_ng'])
for i in range(60):
check = cf.describe_stack(StackName=config['cloudformation_ng'])
if check:
sleep(4)
i+= 1
else:
aws_logger.info("NodeGroup Stack Deleted")
break
cf.delete_stack(StackName=config['cloudformation_cp'])
for i in range(120):
check = cf.describe_stack(StackName=config['cloudformation_cp'])
if check:
sleep(4)
i+= 1
else:
aws_logger.info("ControlPlane Stack Deleted")
break
chdir = os.getcwd()
streaming_output(["cdk", "destroy", "-f"], f"{chdir}/cdk/", aws_logger)
s3c = S3Client(aws_logger, region=region)
s3c.delete_object(bucket_name=args['s3bucket'], key=f"{config['name']}.json")
try:
return config
except KeyError as e:
print(e)
api.abort(500, e.__doc__, status = "Could not save information", statusCode = "500")
except Exception as e:
print(e)
api.abort(400, e.__doc__, status = "Could not save information", statusCode = "400")
|
[
"lib.s3.S3Client",
"os.getcwd",
"models.models.delete_parser.parse_args",
"environment.template.streaming_output",
"time.sleep",
"environment.logger_aws.Logger",
"lib.cloudformation.Cloudformation",
"lib.iam.IAM",
"flask_restplus.Namespace",
"lib.s3.S3"
] |
[((643, 666), 'environment.logger_aws.Logger', 'Logger', ([], {'loglevel': '"""info"""'}), "(loglevel='info')\n", (649, 666), False, 'from environment.logger_aws import Logger\n'), ((823, 908), 'flask_restplus.Namespace', 'Namespace', ([], {'name': '"""Delete"""', 'description': '"""My create related routes"""', 'path': '"""/delete"""'}), "(name='Delete', description='My create related routes', path='/delete'\n )\n", (832, 908), False, 'from flask_restplus import Api, Resource, Namespace, reqparse\n'), ((1314, 1340), 'models.models.delete_parser.parse_args', 'delete_parser.parse_args', ([], {}), '()\n', (1338, 1340), False, 'from models.models import delete_parser, create_addon_parser\n'), ((1385, 1414), 'lib.s3.S3', 'S3', (['aws_logger'], {'region': 'region'}), '(aws_logger, region=region)\n', (1387, 1414), False, 'from lib.s3 import S3, S3Client\n'), ((1607, 1648), 'lib.cloudformation.Cloudformation', 'Cloudformation', (['aws_logger'], {'region': 'region'}), '(aws_logger, region=region)\n', (1621, 1648), False, 'from lib.cloudformation import Cloudformation\n'), ((3194, 3205), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3203, 3205), False, 'import os\n'), ((3214, 3285), 'environment.template.streaming_output', 'streaming_output', (["['cdk', 'destroy', '-f']", 'f"""{chdir}/cdk/"""', 'aws_logger'], {}), "(['cdk', 'destroy', '-f'], f'{chdir}/cdk/', aws_logger)\n", (3230, 3285), False, 'from environment.template import write_jinja_file, zip_function_upload, streaming_output, create_cdk_json\n'), ((3300, 3335), 'lib.s3.S3Client', 'S3Client', (['aws_logger'], {'region': 'region'}), '(aws_logger, region=region)\n', (3308, 3335), False, 'from lib.s3 import S3, S3Client\n'), ((2708, 2716), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (2713, 2716), False, 'from time import sleep\n'), ((3045, 3053), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (3050, 3053), False, 'from time import sleep\n'), ((2098, 2128), 'lib.iam.IAM', 'IAM', (['aws_logger'], {'region': 'region'}), '(aws_logger, region=region)\n', (2101, 2128), False, 'from lib.iam import IAM\n')]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Utility methods."""
import numpy as np
import pandas as pd
import scipy.sparse
import sparse as sp
import itertools
from operator import getitem
from collections import defaultdict, Counter
from sklearn import clone
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso
from functools import reduce, wraps
from sklearn.utils import check_array, check_X_y
import warnings
from warnings import warn
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from collections.abc import Iterable
from sklearn.utils.multiclass import type_of_target
import numbers
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import summary_return
from statsmodels.compat.python import lmap
import copy
from inspect import signature
MAX_RAND_SEED = np.iinfo(np.int32).max
class IdentityFeatures(TransformerMixin):
"""Featurizer that just returns the input data."""
def fit(self, X):
"""Fit method (does nothing, just returns self)."""
return self
def transform(self, X):
"""Perform the identity transform, which returns the input unmodified."""
return X
def parse_final_model_params(coef, intercept, d_y, d_t, d_t_in, bias_part_of_coef, fit_cate_intercept):
dt = d_t
if (d_t_in != d_t) and (d_t[0] == 1): # binary treatment
dt = ()
cate_intercept = None
if bias_part_of_coef:
cate_coef = coef.reshape(d_y + dt + (-1,))[..., 1:]
if fit_cate_intercept:
cate_intercept = coef.reshape(d_y + dt + (-1,))[..., 0]
else:
cate_coef = coef.reshape(d_y + dt + (-1,))
if fit_cate_intercept:
cate_intercept = np.reshape(intercept, d_y + dt)
if (cate_intercept is not None) and (np.ndim(cate_intercept) == 0):
cate_intercept = cate_intercept.item()
return cate_coef, cate_intercept
def check_high_dimensional(X, T, *, threshold, featurizer=None, discrete_treatment=False, msg=""):
# Check if model is sparse enough for this model
X, T = check_input_arrays(X, T)
if X is None:
d_x = 1
elif featurizer is None:
d_x = X.shape[1]
else:
d_x = clone(featurizer, safe=False).fit_transform(X[[0], :]).shape[1]
if discrete_treatment:
d_t = len(set(T.flatten())) - 1
else:
d_t = 1 if np.ndim(T) < 2 else T.shape[1]
if d_x * d_t < threshold:
warn(msg, UserWarning)
def inverse_onehot(T):
"""
Given a one-hot encoding of a value, return a vector reversing the encoding to get numeric treatment indices.
Note that we assume that the first column has been removed from the input.
Parameters
----------
T : array (shape (n, d_t-1))
The one-hot-encoded array
Returns
-------
A : vector of int (shape (n,))
The un-encoded 0-based category indices
"""
assert ndim(T) == 2
# note that by default OneHotEncoder returns float64s, so need to convert to int
return (T @ np.arange(1, T.shape[1] + 1)).astype(int)
def issparse(X):
"""Determine whether an input is sparse.
For the purposes of this function, both `scipy.sparse` matrices and `sparse.SparseArray`
types are considered sparse.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is sparse
"""
return scipy.sparse.issparse(X) or isinstance(X, sp.SparseArray)
def iscoo(X):
"""Determine whether an input is a `sparse.COO` array.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is a `COO` array
"""
return isinstance(X, sp.COO)
def tocoo(X):
"""
Convert an array to a sparse COO array.
If the input is already an `sparse.COO` object, this returns the object directly; otherwise it is converted.
"""
if isinstance(X, sp.COO):
return X
elif isinstance(X, sp.DOK):
return sp.COO(X)
elif scipy.sparse.issparse(X):
return sp.COO.from_scipy_sparse(X)
else:
return sp.COO.from_numpy(X)
def todense(X):
"""
Convert an array to a dense numpy array.
If the input is already a numpy array, this may create a new copy.
"""
if scipy.sparse.issparse(X):
return X.toarray()
elif isinstance(X, sp.SparseArray):
return X.todense()
else:
# TODO: any way to avoid creating a copy if the array was already dense?
# the call is necessary if the input was something like a list, though
return np.array(X)
def size(X):
"""Return the number of elements in the array.
Parameters
----------
a : array_like
Input data
Returns
-------
int
The number of elements of the array
"""
return X.size if issparse(X) else np.size(X)
def shape(X):
"""Return a tuple of array dimensions."""
return X.shape if issparse(X) else np.shape(X)
def ndim(X):
"""Return the number of array dimensions."""
return X.ndim if issparse(X) else np.ndim(X)
def reshape(X, shape):
"""Return a new array that is a reshaped version of an input array.
The output will be sparse iff the input is.
Parameters
----------
X : array_like
The array to reshape
shape : tuple of ints
The desired shape of the output array
Returns
-------
ndarray or SparseArray
The reshaped output array
"""
if scipy.sparse.issparse(X):
# scipy sparse arrays don't support reshaping (even for 2D they throw not implemented errors),
# so convert to pydata sparse first
X = sp.COO.from_scipy_sparse(X)
if len(shape) == 2:
# in the 2D case, we can convert back to scipy sparse; in other cases we can't
return X.reshape(shape).to_scipy_sparse()
return X.reshape(shape)
def _apply(op, *XS):
"""
Apply a function to a sequence of sparse or dense array arguments.
If any array is sparse then all arrays are converted to COO before the function is applied;
if all of the arrays are scipy sparse arrays, and if the result is 2D,
the returned value will be a scipy sparse array as well
"""
all_scipy_sparse = all(scipy.sparse.issparse(X) for X in XS)
if any(issparse(X) for X in XS):
XS = tuple(tocoo(X) for X in XS)
result = op(*XS)
if all_scipy_sparse and len(shape(result)) == 2:
# both inputs were scipy and we can safely convert back to scipy because it's 2D
return result.to_scipy_sparse()
return result
def tensordot(X1, X2, axes):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Parameters
----------
X1, X2 : array_like, len(shape) >= 1
Tensors to "dot"
axes : int or (2,) array_like
integer_like
If an int N, sum over the last N axes of `X1` and the first N axes
of `X2` in order. The sizes of the corresponding axes must match
(2,) array_like
Or, a list of axes to be summed over, first sequence applying to `X1`,
second to `X2`. Both elements array_like must be of the same length.
"""
def td(X1, X2):
return sp.tensordot(X1, X2, axes) if iscoo(X1) else np.tensordot(X1, X2, axes)
return _apply(td, X1, X2)
def cross_product(*XS):
"""
Compute the cross product of features.
Parameters
----------
X1 : n x d1 matrix
First matrix of n samples of d1 features
(or an n-element vector, which will be treated as an n x 1 matrix)
X2 : n x d2 matrix
Second matrix of n samples of d2 features
(or an n-element vector, which will be treated as an n x 1 matrix)
Returns
-------
A : n x (d1*d2*...) matrix
Matrix of n samples of d1*d2*... cross product features,
arranged in form such that each row t of X12 contains:
[X1[t,0]*X2[t,0]*..., ..., X1[t,d1-1]*X2[t,0]*..., X1[t,0]*X2[t,1]*..., ..., X1[t,d1-1]*X2[t,1]*..., ...]
"""
for X in XS:
assert 2 >= ndim(X) >= 1
n = shape(XS[0])[0]
for X in XS:
assert n == shape(X)[0]
def cross(XS):
k = len(XS)
XS = [reshape(XS[i], (n,) + (1,) * (k - i - 1) + (-1,) + (1,) * i) for i in range(k)]
return reshape(reduce(np.multiply, XS), (n, -1))
return _apply(cross, XS)
def stack(XS, axis=0):
"""
Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if axis=0 it will be the first dimension and if axis=-1 it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape
axis : int, optional
The axis in the result array along which the input arrays are stacked
Returns
-------
ndarray or SparseArray
The stacked array, which has one more dimension than the input arrays.
It will be sparse if the inputs are.
"""
def st(*XS):
return sp.stack(XS, axis=axis) if iscoo(XS[0]) else np.stack(XS, axis=axis)
return _apply(st, *XS)
def concatenate(XS, axis=0):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
X1, X2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ndarray or SparseArray
The concatenated array. It will be sparse if the inputs are.
"""
def conc(*XS):
return sp.concatenate(XS, axis=axis) if iscoo(XS[0]) else np.concatenate(XS, axis=axis)
return _apply(conc, *XS)
# note: in contrast to np.hstack this only works with arrays of dimension at least 2
def hstack(XS):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the second axis.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 1)
def vstack(XS):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after
1-D arrays of shape (N,) have been reshaped to (1,N).
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays, will be at least 2-D. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 0)
def transpose(X, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
X : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes according to the values given
Returns
-------
p : ndarray or SparseArray
`X` with its axes permuted. This will be sparse if `X` is.
"""
def t(X):
if iscoo(X):
return X.transpose(axes)
else:
return np.transpose(X, axes)
return _apply(t, X)
def add_intercept(X):
"""
Adds an intercept feature to an array by prepending a column of ones.
Parameters
----------
X : array-like
Input array. Must be 2D.
Returns
-------
arr : ndarray
`X` with a column of ones prepended
"""
return hstack([np.ones((X.shape[0], 1)), X])
def reshape_Y_T(Y, T):
"""
Reshapes Y and T when Y.ndim = 2 and/or T.ndim = 1.
Parameters
----------
Y : array_like, shape (n, ) or (n, 1)
Outcome for the treatment policy. Must be a vector or single-column matrix.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
Returns
-------
Y : array_like, shape (n, )
Flattened outcome for the treatment policy.
T : array_like, shape (n, 1) or (n, d_t)
Reshaped treatment policy.
"""
assert(len(Y) == len(T))
assert(Y.ndim <= 2)
if Y.ndim == 2:
assert(Y.shape[1] == 1)
Y = Y.flatten()
if T.ndim == 1:
T = T.reshape(-1, 1)
return Y, T
def check_inputs(Y, T, X, W=None, multi_output_T=True, multi_output_Y=True):
"""
Input validation for CATE estimators.
Checks Y, T, X, W for consistent length, enforces X, W 2d.
Standard input checks are only applied to all inputs,
such as checking that an input does not have np.nan or np.inf targets.
Converts regular Python lists to numpy arrays.
Parameters
----------
Y : array_like, shape (n, ) or (n, d_y)
Outcome for the treatment policy.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
W : array-like, shape (n, d_w) or None (default=None)
High-dimensional controls.
multi_output_T : bool
Whether to allow more than one treatment.
multi_output_Y: bool
Whether to allow more than one outcome.
Returns
-------
Y : array_like, shape (n, ) or (n, d_y)
Converted and validated Y.
T : array_like, shape (n, ) or (n, d_t)
Converted and validated T.
X : array-like, shape (n, d_x)
Converted and validated X.
W : array-like, shape (n, d_w) or None (default=None)
Converted and validated W.
"""
X, T = check_X_y(X, T, multi_output=multi_output_T, y_numeric=True)
_, Y = check_X_y(X, Y, multi_output=multi_output_Y, y_numeric=True)
if W is not None:
W, _ = check_X_y(W, Y, multi_output=multi_output_Y, y_numeric=True)
return Y, T, X, W
def check_input_arrays(*args, validate_len=True):
"""Cast input sequences into numpy arrays.
Only inputs that are sequence-like will be converted, all other inputs will be left as is.
When `validate_len` is True, the sequences will be checked for equal length.
Parameters
----------
args : scalar or array_like
Inputs to be checked.
validate_len : bool (default=True)
Whether to check if the input arrays have the same length.
Returns
-------
args: array-like
List of inputs where sequence-like objects have been cast to numpy arrays.
"""
args = [check_array(arg, dtype=None, ensure_2d=False, accept_sparse=True)
if np.ndim(arg) > 0 else arg for arg in args]
if validate_len:
n = None
for arg in args:
if np.ndim(arg) > 0:
m = arg.shape[0]
if n is None:
n = m
else:
assert (m == n), "Input arrays have incompatible lengths: {} and {}".format(n, m)
return args
def get_input_columns(X):
"""Extracts column names from dataframe-like input object.
Currently supports column name extraction from pandas DataFrame and Series objects.
Parameters
----------
X : array_like
Input array with column names to be extracted.
Returns
-------
cols: array-like or None
List of columns corresponding to the dataframe-like object.
None if the input array is not in the supported types.
"""
# Type to column extraction function
type_to_func = {
pd.DataFrame: lambda x: x.columns.tolist(),
pd.Series: lambda x: [x.name]
}
if type(X) in type_to_func:
return type_to_func[type(X)](X)
return None
def check_models(models, n):
"""
Input validation for metalearner models.
Check whether the input models satisfy the criteria below.
Parameters
----------
models : estimator or a list/tuple of estimators
n : int
Number of models needed
Returns
----------
models : a list/tuple of estimators
"""
if isinstance(models, (tuple, list)):
if n != len(models):
raise ValueError("The number of estimators doesn't equal to the number of treatments. "
"Please provide either a tuple/list of estimators "
"with same number of treatments or an unified estimator.")
elif hasattr(models, 'fit'):
models = [clone(models, safe=False) for i in range(n)]
else:
raise ValueError(
"models must be either a tuple/list of estimators with same number of treatments "
"or an unified estimator.")
return models
def broadcast_unit_treatments(X, d_t):
"""
Generate `d_t` unit treatments for each row of `X`.
Parameters
----------
d_t: int
Number of treatments
X : array
Features
Returns
-------
X, T : (array, array)
The updated `X` array (with each row repeated `d_t` times),
and the generated `T` array
"""
d_x = shape(X)[0]
eye = np.eye(d_t)
# tile T and repeat X along axis 0 (so that the duplicated rows of X remain consecutive)
T = np.tile(eye, (d_x, 1))
Xs = np.repeat(X, d_t, axis=0)
return Xs, T
def reshape_treatmentwise_effects(A, d_t, d_y):
"""
Given an effects matrix ordered first by treatment, transform it to be ordered by outcome.
Parameters
----------
A : array
The array of effects, of size n*d_y*d_t
d_t : tuple of int
Either () if T was a vector, or a 1-tuple of the number of columns of T if it was an array
d_y : tuple of int
Either () if Y was a vector, or a 1-tuple of the number of columns of Y if it was an array
Returns
-------
A : array (shape (m, d_y, d_t))
The transformed array. Note that singleton dimensions will be dropped for any inputs which
were vectors, as in the specification of `BaseCateEstimator.marginal_effect`.
"""
A = reshape(A, (-1,) + d_t + d_y)
if d_t and d_y:
return transpose(A, (0, 2, 1)) # need to return as m by d_y by d_t matrix
else:
return A
def einsum_sparse(subscripts, *arrs):
"""
Evaluate the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional array operations can be represented
in a simple fashion. This function provides a way to compute such summations.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
Unlike `np.eisnum` elipses are not supported and the output must be explicitly included
arrs : list of COO arrays
These are the sparse arrays for the operation.
Returns
-------
SparseArray
The sparse array calculated based on the Einstein summation convention.
"""
inputs, outputs = subscripts.split('->')
inputs = inputs.split(',')
outputInds = set(outputs)
allInds = set.union(*[set(i) for i in inputs])
# same number of input definitions as arrays
assert len(inputs) == len(arrs)
# input definitions have same number of dimensions as each array
assert all(arr.ndim == len(input) for (arr, input) in zip(arrs, inputs))
# all result indices are unique
assert len(outputInds) == len(outputs)
# all result indices must match at least one input index
assert outputInds <= allInds
# map indices to all array, axis pairs for that index
indMap = {c: [(n, i) for n in range(len(inputs)) for (i, x) in enumerate(inputs[n]) if x == c] for c in allInds}
for c in indMap:
# each index has the same cardinality wherever it appears
assert len({arrs[n].shape[i] for (n, i) in indMap[c]}) == 1
# State: list of (set of letters, list of (corresponding indices, value))
# Algo: while list contains more than one entry
# take two entries
# sort both lists by intersection of their indices
# merge compatible entries (where intersection of indices is equal - in the resulting list,
# take the union of indices and the product of values), stepping through each list linearly
# TODO: might be faster to break into connected components first
# e.g. for "ab,d,bc->ad", the two components "ab,bc" and "d" are independent,
# so compute their content separately, then take cartesian product
# this would save a few pointless sorts by empty tuples
# TODO: Consider investigating other performance ideas for these cases
# where the dense method beat the sparse method (usually sparse is faster)
# e,facd,c->cfed
# sparse: 0.0335489
# dense: 0.011465999999999997
# gbd,da,egb->da
# sparse: 0.0791625
# dense: 0.007319099999999995
# dcc,d,faedb,c->abe
# sparse: 1.2868097
# dense: 0.44605229999999985
def merge(x1, x2):
(s1, l1), (s2, l2) = x1, x2
keys = {c for c in s1 if c in s2} # intersection of strings
outS = ''.join(set(s1 + s2)) # union of strings
outMap = [(True, s1.index(c)) if c in s1 else (False, s2.index(c)) for c in outS]
def keyGetter(s):
inds = [s.index(c) for c in keys]
return lambda p: tuple(p[0][ind] for ind in inds)
kg1 = keyGetter(s1)
kg2 = keyGetter(s2)
l1.sort(key=kg1)
l2.sort(key=kg2)
i1 = i2 = 0
outL = []
while i1 < len(l1) and i2 < len(l2):
k1, k2 = kg1(l1[i1]), kg2(l2[i2])
if k1 < k2:
i1 += 1
elif k2 < k1:
i2 += 1
else:
j1, j2 = i1, i2
while j1 < len(l1) and kg1(l1[j1]) == k1:
j1 += 1
while j2 < len(l2) and kg2(l2[j2]) == k2:
j2 += 1
for c1, d1 in l1[i1:j1]:
for c2, d2 in l2[i2:j2]:
outL.append((tuple(c1[charIdx] if inFirst else c2[charIdx] for inFirst, charIdx in outMap),
d1 * d2))
i1 = j1
i2 = j2
return outS, outL
# when indices are repeated within an array, pre-filter the coordinates and data
def filter_inds(coords, data, n):
counts = Counter(inputs[n])
repeated = [(c, counts[c]) for c in counts if counts[c] > 1]
if len(repeated) > 0:
mask = np.full(len(data), True)
for (k, v) in repeated:
inds = [i for i in range(len(inputs[n])) if inputs[n][i] == k]
for i in range(1, v):
mask &= (coords[:, inds[0]] == coords[:, inds[i]])
if not all(mask):
return coords[mask, :], data[mask]
return coords, data
xs = [(s, list(zip(c, d)))
for n, (s, arr) in enumerate(zip(inputs, arrs))
for c, d in [filter_inds(arr.coords.T, arr.data, n)]]
# TODO: would using einsum's paths to optimize the order of merging help?
while len(xs) > 1:
xs.append(merge(xs.pop(), xs.pop()))
results = defaultdict(int)
for (s, l) in xs:
coordMap = [s.index(c) for c in outputs]
for (c, d) in l:
results[tuple(c[i] for i in coordMap)] += d
return sp.COO(np.array(list(results.keys())).T if results else
np.empty((len(outputs), 0)),
np.array(list(results.values())),
[arrs[indMap[c][0][0]].shape[indMap[c][0][1]] for c in outputs])
def fit_with_groups(model, X, y, groups=None, **kwargs):
"""
Fit a model while correctly handling grouping if necessary.
This enables us to perform an inner-loop cross-validation of a model
which handles grouping correctly, which is not easy using typical sklearn models.
For example, GridSearchCV and RandomSearchCV both support passing 'groups' to fit,
but other CV-related estimators (such as those derived from LinearModelCV, including LassoCV),
do not support passing groups to fit which meanst that GroupKFold cannot be used as the cv instance
when using these types, because the required 'groups' argument will never be passed to the
GroupKFold's split method. See also https://github.com/scikit-learn/scikit-learn/issues/12052
The (hacky) workaround that is used here is to explicitly set the 'cv' attribute (if there is one) to
the exact set of rows and not to use GroupKFold even with the sklearn classes that could support it;
this should work with classes derived from BaseSearchCV, LinearModelCV, and CalibratedClassifierCV.
Parameters
----------
model : estimator
The model to fit
X : array-like
The features to fit against
y : array-like
The target to fit against
groups : array-like, optional
The set of groupings that should be kept together when splitting rows for
cross-validation
kwargs : dict
Any other named arguments to pass to the model's fit
"""
if groups is not None:
# assume that we should perform nested cross-validation if and only if
# the model has a 'cv' attribute; this is a somewhat brittle assumption...
if hasattr(model, 'cv'):
old_cv = model.cv
# logic copied from check_cv
cv = 5 if old_cv is None else old_cv
if isinstance(cv, numbers.Integral):
cv = GroupKFold(cv)
# otherwise we will assume the user already set the cv attribute to something
# compatible with splitting with a 'groups' argument
# now we have to compute the folds explicitly because some classifiers (like LassoCV)
# don't use the groups when calling split internally
splits = list(cv.split(X, y, groups=groups))
try:
model.cv = splits
return model.fit(X, y, **kwargs)
finally:
model.cv = old_cv
return model.fit(X, y, **kwargs)
def filter_none_kwargs(**kwargs):
"""
Filters out any keyword arguments that are None.
This is useful when specific optional keyword arguments might not be universally supported,
so that stripping them out when they are not set enables more uses to succeed.
Parameters
----------
kwargs: dict
The keyword arguments to filter
Returns
-------
filtered_kwargs: dict
The input dictionary, but with all entries having value None removed
"""
return {key: value for key, value in kwargs.items() if value is not None}
class WeightedModelWrapper:
"""Helper class for assiging weights to models without this option.
Parameters
----------
model_instance : estimator
Model that requires weights.
sample_type : string, optional (default=`weighted`)
Method for adding weights to the model. `weighted` for linear regression models
where the weights can be incorporated in the matrix multiplication,
`sampled` for other models. `sampled` samples the training set according
to the normalized weights and creates a dataset larger than the original.
"""
def __init__(self, model_instance, sample_type="weighted"):
self.model_instance = model_instance
if sample_type == "weighted":
self.data_transform = self._weighted_inputs
else:
warnings.warn("The model provided does not support sample weights. "
"Manual weighted sampling may icrease the variance in the results.", UserWarning)
self.data_transform = self._sampled_inputs
def fit(self, X, y, sample_weight=None):
"""Fit underlying model instance with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples, n_outcomes)
Target values.
Returns
-------
self: an instance of the underlying estimator.
"""
if sample_weight is not None:
X, y = self.data_transform(X, y, sample_weight)
return self.model_instance.fit(X, y)
def predict(self, X):
"""Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples, n_outcomes)
Returns predicted values.
"""
return self.model_instance.predict(X)
def _weighted_inputs(self, X, y, sample_weight):
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
normalized_weights = sample_weight * X.shape[0] / np.sum(sample_weight)
sqrt_weights = np.sqrt(normalized_weights)
weighted_X = sqrt_weights.reshape(-1, 1) * X
weighted_y = sqrt_weights.reshape(-1, 1) * y if y.ndim > 1 else sqrt_weights * y
return weighted_X, weighted_y
def _sampled_inputs(self, X, y, sample_weight):
# Normalize weights
normalized_weights = sample_weight / np.sum(sample_weight)
data_length = int(min(1 / np.min(normalized_weights[normalized_weights > 0]), 10) * X.shape[0])
data_indices = np.random.choice(X.shape[0], size=data_length, p=normalized_weights)
return X[data_indices], y[data_indices]
class MultiModelWrapper:
"""Helper class for training different models for each treatment.
Parameters
----------
model_list : array-like, shape (n_T, )
List of models to be trained separately for each treatment group.
"""
def __init__(self, model_list=[]):
self.model_list = model_list
self.n_T = len(model_list)
def fit(self, Xt, y, sample_weight=None):
"""Fit underlying list of models with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Training data. The last n_T columns should be a one-hot encoding of the treatment assignment.
y : array-like, shape (n_samples, )
Target values.
Returns
-------
self: an instance of the class
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
if sample_weight is None:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask])
else:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask], sample_weight[mask])
return self
def predict(self, Xt):
"""Predict using the linear model.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Samples. The last n_T columns should be a one-hot encoding of the treatment assignment.
Returns
-------
C : array, shape (n_samples, )
Returns predicted values.
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
predictions = [self.model_list[np.nonzero(t[i])[0][0]].predict(X[[i]]) for i in range(len(X))]
return np.concatenate(predictions)
def _safe_norm_ppf(q, loc=0, scale=1):
if hasattr(loc, "__len__"):
prelim = loc.copy()
if np.any(scale > 0):
prelim[scale > 0] = scipy.stats.norm.ppf(q, loc=loc[scale > 0], scale=scale[scale > 0])
elif scale > 0:
prelim = scipy.stats.norm.ppf(q, loc=loc, scale=scale)
else:
prelim = loc
return prelim
class Summary:
# This class is mainly derived from statsmodels.iolib.summary.Summary
"""
Result summary
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated
tables are not saved separately.
extra_txt : str
extra lines that are added to the text output, used for warnings
and explanations.
"""
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table(self, res, header, index, title):
table = SimpleTable(res, header, index, title)
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : list[str]
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : str
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if self.extra_txt is not None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : str
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
latex = summary_return(self.tables, return_fmt='latex')
if self.extra_txt is not None:
latex = latex + '\n\n' + self.extra_txt.replace('\n', ' \\newline\n ')
return latex
def as_csv(self):
'''return tables as string
Returns
-------
csv : str
concatenated summary tables in comma delimited format
'''
csv = summary_return(self.tables, return_fmt='csv')
if self.extra_txt is not None:
csv = csv + '\n\n' + self.extra_txt
return csv
def as_html(self):
'''return tables as string
Returns
-------
html : str
concatenated summary tables in HTML format
'''
html = summary_return(self.tables, return_fmt='html')
if self.extra_txt is not None:
html = html + '<br/><br/>' + self.extra_txt.replace('\n', '<br/>')
return html
class SeparateModel:
"""
Splits the data based on the last feature and trains
a separate model for each subsample. At predict time, it
uses the last feature to choose which model to use
to predict.
"""
def __init__(self, *models):
self.models = [clone(model) for model in models]
def fit(self, XZ, T):
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
m.fit(XZ[inds, :-1], T[inds])
return self
def predict(self, XZ):
t_pred = np.zeros(XZ.shape[0])
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
if np.any(inds):
t_pred[inds] = m.predict(XZ[inds, :-1])
return t_pred
@property
def coef_(self):
return np.concatenate((model.coef_ for model in self.models))
class _EncoderWrapper:
"""
Wraps a OneHotEncoder (and optionally also a LabelEncoder).
Useful mainly so that the `encode` method can be used in a FunctionTransformer,
which would otherwise need a lambda (which can't be pickled).
"""
def __init__(self, one_hot_encoder, label_encoder=None, drop_first=False):
self._label_encoder = label_encoder
self._one_hot_encoder = one_hot_encoder
self._drop_first = drop_first
def encode(self, arr):
if self._label_encoder:
arr = self._label_encoder.transform(arr.ravel())
result = self._one_hot_encoder.transform(reshape(arr, (-1, 1)))
return result[:, 1:] if self._drop_first else result
def deprecated(message, category=FutureWarning):
"""
Enables decorating a method or class to providing a warning when it is used.
Parameters
----------
message: string
The deprecation message to use
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
# if we're decorating a class, just update the __init__ method,
# so that the result is still a class instead of a wrapper method
if isinstance(to_wrap, type):
old_init = to_wrap.__init__
@wraps(to_wrap.__init__)
def new_init(*args, **kwargs):
warn(message, category, stacklevel=2)
old_init(*args, **kwargs)
to_wrap.__init__ = new_init
return to_wrap
else:
@wraps(to_wrap)
def m(*args, **kwargs):
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
def _deprecate_positional(message, bad_args, category=FutureWarning):
"""
Enables decorating a method to provide a warning when certain arguments are used positionally.
Parameters
----------
message: string
The deprecation message to use
bad_args : list of string
The positional arguments that will be keyword-only in the future
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
@wraps(to_wrap)
def m(*args, **kwargs):
# want to enforce that each bad_arg was either in kwargs,
# or else it was in neither and is just taking its default value
bound = signature(m).bind(*args, **kwargs)
wrong_args = False
for arg in bad_args:
if arg not in kwargs and arg in bound.arguments:
wrong_args = True
if wrong_args:
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
def transpose_dictionary(d):
"""
Transpose a dictionary of dictionaries, bringing the keys from the second level
to the top and vice versa
Parameters
----------
d: dict
The dictionary to transpose; the values of this dictionary should all themselves
be dictionaries
Returns
-------
output: dict
The output dictionary with first- and second-level keys swapped
"""
output = defaultdict(dict)
for key1, value in d.items():
for key2, val in value.items():
output[key2][key1] = val
return output
class _RegressionWrapper:
"""
A simple wrapper that makes a binary classifier behave like a regressor.
Essentially .fit, calls the fit method of the classifier and
.predict calls the .predict_proba method of the classifier
and returns the probability of label 1.
"""
def __init__(self, clf):
"""
Parameters
----------
clf : the classifier model
"""
self._clf = clf
def fit(self, X, y, **kwargs):
"""
Parameters
----------
X : features
y : one-hot-encoding of binary label, with drop='first'
"""
if len(y.shape) > 1 and y.shape[1] > 1:
y = y @ np.arange(1, y.shape[1] + 1)
self._clf.fit(X, y, **kwargs)
return self
def predict(self, X):
"""
Parameters
----------
X : features
"""
return self._clf.predict_proba(X)[:, 1:]
@deprecated("This class will be removed from a future version of this package; "
"please use econml.sklearn_extensions.linear_model.WeightedLassoCV instead.")
class LassoCVWrapper:
"""Helper class to wrap either LassoCV or MultiTaskLassoCV depending on the shape of the target."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, Y):
assert shape(X)[0] == shape(Y)[0]
assert ndim(Y) <= 2
self.needs_unravel = False
if ndim(Y) == 2 and shape(Y)[1] > 1:
self.model = MultiTaskLassoCV(*self.args, **self.kwargs)
else:
if ndim(Y) == 2 and shape(Y)[1] == 1:
Y = np.ravel(Y)
self.needs_unravel = True
self.model = LassoCV(*self.args, **self.kwargs)
self.model.fit(X, Y)
return self
def predict(self, X):
predictions = self.model.predict(X)
return reshape(predictions, (-1, 1)) if self.needs_unravel else predictions
|
[
"statsmodels.iolib.summary.summary_return",
"numpy.sum",
"numpy.ravel",
"numpy.iinfo",
"numpy.ones",
"collections.defaultdict",
"numpy.shape",
"numpy.arange",
"numpy.tile",
"sklearn.model_selection.GroupKFold",
"sparse.concatenate",
"sklearn.clone",
"sklearn.linear_model.MultiTaskLassoCV",
"sklearn.utils.check_array",
"sklearn.utils.check_X_y",
"numpy.ndim",
"statsmodels.iolib.table.SimpleTable",
"sparse.stack",
"numpy.transpose",
"inspect.signature",
"numpy.reshape",
"numpy.random.choice",
"collections.Counter",
"sparse.COO.from_scipy_sparse",
"numpy.repeat",
"numpy.stack",
"numpy.size",
"numpy.tensordot",
"sparse.tensordot",
"numpy.min",
"functools.wraps",
"sparse.COO",
"numpy.concatenate",
"sklearn.linear_model.LassoCV",
"sparse.COO.from_numpy",
"numpy.zeros",
"numpy.any",
"numpy.nonzero",
"numpy.array",
"functools.reduce",
"numpy.eye",
"warnings.warn",
"numpy.sqrt"
] |
[((959, 977), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (967, 977), True, 'import numpy as np\n'), ((14297, 14357), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'T'], {'multi_output': 'multi_output_T', 'y_numeric': '(True)'}), '(X, T, multi_output=multi_output_T, y_numeric=True)\n', (14306, 14357), False, 'from sklearn.utils import check_array, check_X_y\n'), ((14369, 14429), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'Y'], {'multi_output': 'multi_output_Y', 'y_numeric': '(True)'}), '(X, Y, multi_output=multi_output_Y, y_numeric=True)\n', (14378, 14429), False, 'from sklearn.utils import check_array, check_X_y\n'), ((17732, 17743), 'numpy.eye', 'np.eye', (['d_t'], {}), '(d_t)\n', (17738, 17743), True, 'import numpy as np\n'), ((17845, 17867), 'numpy.tile', 'np.tile', (['eye', '(d_x, 1)'], {}), '(eye, (d_x, 1))\n', (17852, 17867), True, 'import numpy as np\n'), ((17877, 17902), 'numpy.repeat', 'np.repeat', (['X', 'd_t'], {'axis': '(0)'}), '(X, d_t, axis=0)\n', (17886, 17902), True, 'import numpy as np\n'), ((23886, 23902), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23897, 23902), False, 'from collections import defaultdict, Counter\n'), ((39588, 39605), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (39599, 39605), False, 'from collections import defaultdict, Counter\n'), ((2560, 2582), 'warnings.warn', 'warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (2564, 2582), False, 'from warnings import warn\n'), ((5035, 5045), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (5042, 5045), True, 'import numpy as np\n'), ((5147, 5158), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (5155, 5158), True, 'import numpy as np\n'), ((5261, 5271), 'numpy.ndim', 'np.ndim', (['X'], {}), '(X)\n', (5268, 5271), True, 'import numpy as np\n'), ((5856, 5883), 'sparse.COO.from_scipy_sparse', 'sp.COO.from_scipy_sparse', (['X'], {}), '(X)\n', (5880, 5883), True, 'import sparse as sp\n'), ((14467, 14527), 'sklearn.utils.check_X_y', 'check_X_y', (['W', 'Y'], {'multi_output': 'multi_output_Y', 'y_numeric': '(True)'}), '(W, Y, multi_output=multi_output_Y, y_numeric=True)\n', (14476, 14527), False, 'from sklearn.utils import check_array, check_X_y\n'), ((23075, 23093), 'collections.Counter', 'Counter', (['inputs[n]'], {}), '(inputs[n])\n', (23082, 23093), False, 'from collections import defaultdict, Counter\n'), ((29446, 29496), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'y_numeric': '(True)', 'multi_output': '(True)'}), '(X, y, y_numeric=True, multi_output=True)\n', (29455, 29496), False, 'from sklearn.utils import check_array, check_X_y\n'), ((29600, 29627), 'numpy.sqrt', 'np.sqrt', (['normalized_weights'], {}), '(normalized_weights)\n', (29607, 29627), True, 'import numpy as np\n'), ((30083, 30151), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]'], {'size': 'data_length', 'p': 'normalized_weights'}), '(X.shape[0], size=data_length, p=normalized_weights)\n', (30099, 30151), True, 'import numpy as np\n'), ((32039, 32066), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (32053, 32066), True, 'import numpy as np\n'), ((32179, 32196), 'numpy.any', 'np.any', (['(scale > 0)'], {}), '(scale > 0)\n', (32185, 32196), True, 'import numpy as np\n'), ((33366, 33404), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['res', 'header', 'index', 'title'], {}), '(res, header, index, title)\n', (33377, 33404), False, 'from statsmodels.iolib.table import SimpleTable\n'), ((33930, 33976), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""text"""'}), "(self.tables, return_fmt='text')\n", (33944, 33976), False, 'from statsmodels.iolib.summary import summary_return\n'), ((34481, 34528), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""latex"""'}), "(self.tables, return_fmt='latex')\n", (34495, 34528), False, 'from statsmodels.iolib.summary import summary_return\n'), ((34874, 34919), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""csv"""'}), "(self.tables, return_fmt='csv')\n", (34888, 34919), False, 'from statsmodels.iolib.summary import summary_return\n'), ((35220, 35266), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""html"""'}), "(self.tables, return_fmt='html')\n", (35234, 35266), False, 'from statsmodels.iolib.summary import summary_return\n'), ((35940, 35961), 'numpy.zeros', 'np.zeros', (['XZ.shape[0]'], {}), '(XZ.shape[0])\n', (35948, 35961), True, 'import numpy as np\n'), ((36202, 36254), 'numpy.concatenate', 'np.concatenate', (['(model.coef_ for model in self.models)'], {}), '(model.coef_ for model in self.models)\n', (36216, 36254), True, 'import numpy as np\n'), ((38564, 38578), 'functools.wraps', 'wraps', (['to_wrap'], {}), '(to_wrap)\n', (38569, 38578), False, 'from functools import reduce, wraps\n'), ((1841, 1872), 'numpy.reshape', 'np.reshape', (['intercept', '(d_y + dt)'], {}), '(intercept, d_y + dt)\n', (1851, 1872), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.ndim', 'np.ndim', (['cate_intercept'], {}), '(cate_intercept)\n', (1921, 1937), True, 'import numpy as np\n'), ((4162, 4171), 'sparse.COO', 'sp.COO', (['X'], {}), '(X)\n', (4168, 4171), True, 'import sparse as sp\n'), ((4765, 4776), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4773, 4776), True, 'import numpy as np\n'), ((7441, 7467), 'sparse.tensordot', 'sp.tensordot', (['X1', 'X2', 'axes'], {}), '(X1, X2, axes)\n', (7453, 7467), True, 'import sparse as sp\n'), ((7486, 7512), 'numpy.tensordot', 'np.tensordot', (['X1', 'X2', 'axes'], {}), '(X1, X2, axes)\n', (7498, 7512), True, 'import numpy as np\n'), ((8533, 8556), 'functools.reduce', 'reduce', (['np.multiply', 'XS'], {}), '(np.multiply, XS)\n', (8539, 8556), False, 'from functools import reduce, wraps\n'), ((9305, 9328), 'sparse.stack', 'sp.stack', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9313, 9328), True, 'import sparse as sp\n'), ((9350, 9373), 'numpy.stack', 'np.stack', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9358, 9373), True, 'import numpy as np\n'), ((9952, 9981), 'sparse.concatenate', 'sp.concatenate', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9966, 9981), True, 'import sparse as sp\n'), ((10003, 10032), 'numpy.concatenate', 'np.concatenate', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (10017, 10032), True, 'import numpy as np\n'), ((11940, 11961), 'numpy.transpose', 'np.transpose', (['X', 'axes'], {}), '(X, axes)\n', (11952, 11961), True, 'import numpy as np\n'), ((12290, 12314), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (12297, 12314), True, 'import numpy as np\n'), ((15176, 15241), 'sklearn.utils.check_array', 'check_array', (['arg'], {'dtype': 'None', 'ensure_2d': '(False)', 'accept_sparse': '(True)'}), '(arg, dtype=None, ensure_2d=False, accept_sparse=True)\n', (15187, 15241), False, 'from sklearn.utils import check_array, check_X_y\n'), ((28212, 28369), 'warnings.warn', 'warnings.warn', (['"""The model provided does not support sample weights. Manual weighted sampling may icrease the variance in the results."""', 'UserWarning'], {}), "(\n 'The model provided does not support sample weights. Manual weighted sampling may icrease the variance in the results.'\n , UserWarning)\n", (28225, 28369), False, 'import warnings\n'), ((29555, 29576), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (29561, 29576), True, 'import numpy as np\n'), ((29934, 29955), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (29940, 29955), True, 'import numpy as np\n'), ((35690, 35702), 'sklearn.clone', 'clone', (['model'], {}), '(model)\n', (35695, 35702), False, 'from sklearn import clone\n'), ((36059, 36071), 'numpy.any', 'np.any', (['inds'], {}), '(inds)\n', (36065, 36071), True, 'import numpy as np\n'), ((37588, 37611), 'functools.wraps', 'wraps', (['to_wrap.__init__'], {}), '(to_wrap.__init__)\n', (37593, 37611), False, 'from functools import reduce, wraps\n'), ((37847, 37861), 'functools.wraps', 'wraps', (['to_wrap'], {}), '(to_wrap)\n', (37852, 37861), False, 'from functools import reduce, wraps\n'), ((41272, 41315), 'sklearn.linear_model.MultiTaskLassoCV', 'MultiTaskLassoCV', (['*self.args'], {}), '(*self.args, **self.kwargs)\n', (41288, 41315), False, 'from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso\n'), ((41479, 41513), 'sklearn.linear_model.LassoCV', 'LassoCV', (['*self.args'], {}), '(*self.args, **self.kwargs)\n', (41486, 41513), False, 'from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso\n'), ((2491, 2501), 'numpy.ndim', 'np.ndim', (['T'], {}), '(T)\n', (2498, 2501), True, 'import numpy as np\n'), ((3149, 3177), 'numpy.arange', 'np.arange', (['(1)', '(T.shape[1] + 1)'], {}), '(1, T.shape[1] + 1)\n', (3158, 3177), True, 'import numpy as np\n'), ((4222, 4249), 'sparse.COO.from_scipy_sparse', 'sp.COO.from_scipy_sparse', (['X'], {}), '(X)\n', (4246, 4249), True, 'import sparse as sp\n'), ((4275, 4295), 'sparse.COO.from_numpy', 'sp.COO.from_numpy', (['X'], {}), '(X)\n', (4292, 4295), True, 'import sparse as sp\n'), ((15257, 15269), 'numpy.ndim', 'np.ndim', (['arg'], {}), '(arg)\n', (15264, 15269), True, 'import numpy as np\n'), ((15378, 15390), 'numpy.ndim', 'np.ndim', (['arg'], {}), '(arg)\n', (15385, 15390), True, 'import numpy as np\n'), ((17094, 17119), 'sklearn.clone', 'clone', (['models'], {'safe': '(False)'}), '(models, safe=False)\n', (17099, 17119), False, 'from sklearn import clone\n'), ((26225, 26239), 'sklearn.model_selection.GroupKFold', 'GroupKFold', (['cv'], {}), '(cv)\n', (26235, 26239), False, 'from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold\n'), ((37671, 37708), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (37675, 37708), False, 'from warnings import warn\n'), ((37914, 37951), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (37918, 37951), False, 'from warnings import warn\n'), ((39024, 39061), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (39028, 39061), False, 'from warnings import warn\n'), ((40430, 40458), 'numpy.arange', 'np.arange', (['(1)', '(y.shape[1] + 1)'], {}), '(1, y.shape[1] + 1)\n', (40439, 40458), True, 'import numpy as np\n'), ((41400, 41411), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (41408, 41411), True, 'import numpy as np\n'), ((38778, 38790), 'inspect.signature', 'signature', (['m'], {}), '(m)\n', (38787, 38790), False, 'from inspect import signature\n'), ((29990, 30040), 'numpy.min', 'np.min', (['normalized_weights[normalized_weights > 0]'], {}), '(normalized_weights[normalized_weights > 0])\n', (29996, 30040), True, 'import numpy as np\n'), ((2331, 2360), 'sklearn.clone', 'clone', (['featurizer'], {'safe': '(False)'}), '(featurizer, safe=False)\n', (2336, 2360), False, 'from sklearn import clone\n'), ((31960, 31976), 'numpy.nonzero', 'np.nonzero', (['t[i]'], {}), '(t[i])\n', (31970, 31976), True, 'import numpy as np\n')]
|
import os
import dialogflow
from google.api_core.exceptions import InvalidArgument
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = 'private_key.json'
DIALOGFLOW_PROJECT_ID = 'smartcar-sykc'
DIALOGFLOW_LANGUAGE_CODE = 'en'
SESSION_ID = 'me'
def doDialogFlow(text_to_be_analyzed):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID)
text_input = dialogflow.types.TextInput(text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE)
query_input = dialogflow.types.QueryInput(text=text_input)
try:
response = session_client.detect_intent(session=session, query_input=query_input)
except InvalidArgument:
raise
print("Query text:", response.query_result.query_text)
print("Detected intent:", response.query_result.intent.display_name)
print("Detected intent confidence:", response.query_result.intent_detection_confidence)
print("Fulfillment text:", response.query_result.fulfillment_text)
return response.query_result.fulfillment_text
|
[
"dialogflow.SessionsClient",
"dialogflow.types.TextInput",
"dialogflow.types.QueryInput"
] |
[((303, 330), 'dialogflow.SessionsClient', 'dialogflow.SessionsClient', ([], {}), '()\n', (328, 330), False, 'import dialogflow\n'), ((425, 522), 'dialogflow.types.TextInput', 'dialogflow.types.TextInput', ([], {'text': 'text_to_be_analyzed', 'language_code': 'DIALOGFLOW_LANGUAGE_CODE'}), '(text=text_to_be_analyzed, language_code=\n DIALOGFLOW_LANGUAGE_CODE)\n', (451, 522), False, 'import dialogflow\n'), ((536, 580), 'dialogflow.types.QueryInput', 'dialogflow.types.QueryInput', ([], {'text': 'text_input'}), '(text=text_input)\n', (563, 580), False, 'import dialogflow\n')]
|
import datetime
import time
import logging
from ..db.models import TradingOrder
logger = logging.getLogger(__name__)
class Order:
"""Class that represents an order. Order executes on instantiation by a thread pool"""
def __init__(self, market, side, type, amount, price, session=None):
self.market = market
if session is not None:
self.session = session()
self.side = side
self.type = type
self.amount = amount
self.price = price
self.__order_receipt = None
logger.info("Opening " + side + " order of " + amount + " " + self.market.base_currency)
self.execute()
def execute(self):
if self.type == "limit":
if self.side == "buy":
self.__order_receipt = self.market.exchange.create_limit_buy_order(self.market.analysis_pair, self.amount, self.price)
order = TradingOrder(
exchange=self.market.exchange.id,
strategy_id=self.market.strategy.strategy_id,
run_key=self.market.strategy.run_key,
pair=self.market.analysis_pair,
position='long',
amount=self.amount,
price=self.price,
simulated="live"
)
self.session.add(order)
self.session.commit()
self.session.close()
elif self.side == "sell":
self.__order_receipt = self.market.exchange.create_limit_sell_order(self.market.analysis_pair, self.amount, self.price)
order = TradingOrder(
exchange=self.market.exchange.id,
strategy_id=self.market.strategy.strategy_id,
run_key=self.market.strategy.run_key,
pair=self.market.analysis_pair,
position='short',
amount=self.amount,
price=self.price,
simulated="live"
)
self.session.add(order)
self.session.commit()
self.session.close()
else:
logger.error("Invalid order side: " + self.side + ", specify 'buy' or 'sell' ")
elif self.type == "market":
logger.error("Market orders not available")
else:
logger.error("Invalid order type: " + self.type + ", specify 'limit' or 'market' ")
def get_id(self):
return self.__order_receipt.get().id
def cancel(self):
try:
self.market.exchange.cancel_order(self.get_id())
except:
logger.error("Order cannot be canceled. Has already been filled")
def is_open(self):
return self.market.exchange.fetch_order(self.get_id())['remaining'] > 0
def get_status(self):
return self.market.exchange.fetch_order(self.get_id())['status']
def get_amount_filled(self):
return self.market.exchange.fetch_order(self.get_id())['filled']
def get_amount_remaining(self):
return self.market.exchange.fetch_order(self.get_id())['remaining']
|
[
"logging.getLogger"
] |
[((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n')]
|
from typing import Any, Tuple, Union
import numpy as np
import pandas as pd
def named_aggregate_summary(series: pd.Series, key: str):
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def mad(arr, m=None):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
if m is None:
m = np.median(arr)
return np.median(np.abs(arr - m))
def numerical_summary(
series: pd.Series,
quantiles=(0.05, 0.25, 0.5, 0.75, 0.95),
count=None,
is_unique=None,
return_values=False,
) -> Union[dict, Tuple[dict, Any]]:
"""
Args:
series: series to summarize
Returns:
"""
if count is None:
count = series.count()
values = series.values
present_values = values[~np.isnan(values)]
finite_mask = np.isfinite(present_values)
finite_values = present_values[finite_mask]
summary = {
"mean": np.mean(present_values),
"std": np.std(present_values, ddof=1),
"min": np.min(present_values),
"max": np.max(present_values),
# Unbiased kurtosis obtained using Fisher's definition (kurtosis of normal == 0.0). Normalized by N-1.
"kurt": series.kurt(),
# Unbiased skew normalized by N-1
"skew": series.skew(),
"sum": np.sum(present_values),
"n_infinite": (~finite_mask).sum(),
"n_zeros": (count - np.count_nonzero(present_values)),
}
for percentile, value in series.quantile(quantiles).to_dict().items():
summary["quantile_{:d}".format(int(percentile * 100))] = value
summary["median"] = summary["quantile_50"]
summary["iqr"] = summary["quantile_75"] - summary["quantile_25"]
summary["mad"] = mad(present_values, summary["quantile_50"])
summary["variance"] = summary["std"] ** 2
summary["cv"] = summary["std"] / summary["mean"] if summary["mean"] else np.NaN
summary["range"] = summary["max"] - summary["min"]
summary["monotonic_increase"] = series.is_monotonic_increasing
summary["monotonic_decrease"] = series.is_monotonic_decreasing
summary["monotonic_increase_strict"] = (
summary["monotonic_increase"] and series.is_unique
)
summary["monotonic_decrease_strict"] = (
summary["monotonic_decrease"] and series.is_unique
)
if return_values:
return summary, finite_values
return summary
|
[
"numpy.abs",
"numpy.sum",
"numpy.count_nonzero",
"numpy.median",
"numpy.std",
"numpy.isfinite",
"numpy.isnan",
"numpy.max",
"numpy.mean",
"numpy.min"
] |
[((1053, 1080), 'numpy.isfinite', 'np.isfinite', (['present_values'], {}), '(present_values)\n', (1064, 1080), True, 'import numpy as np\n'), ((175, 189), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (181, 189), True, 'import numpy as np\n'), ((214, 229), 'numpy.mean', 'np.mean', (['series'], {}), '(series)\n', (221, 229), True, 'import numpy as np\n'), ((256, 273), 'numpy.median', 'np.median', (['series'], {}), '(series)\n', (265, 273), True, 'import numpy as np\n'), ((297, 311), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (303, 311), True, 'import numpy as np\n'), ((585, 599), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (594, 599), True, 'import numpy as np\n'), ((621, 636), 'numpy.abs', 'np.abs', (['(arr - m)'], {}), '(arr - m)\n', (627, 636), True, 'import numpy as np\n'), ((1162, 1185), 'numpy.mean', 'np.mean', (['present_values'], {}), '(present_values)\n', (1169, 1185), True, 'import numpy as np\n'), ((1202, 1232), 'numpy.std', 'np.std', (['present_values'], {'ddof': '(1)'}), '(present_values, ddof=1)\n', (1208, 1232), True, 'import numpy as np\n'), ((1249, 1271), 'numpy.min', 'np.min', (['present_values'], {}), '(present_values)\n', (1255, 1271), True, 'import numpy as np\n'), ((1288, 1310), 'numpy.max', 'np.max', (['present_values'], {}), '(present_values)\n', (1294, 1310), True, 'import numpy as np\n'), ((1542, 1564), 'numpy.sum', 'np.sum', (['present_values'], {}), '(present_values)\n', (1548, 1564), True, 'import numpy as np\n'), ((1017, 1033), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (1025, 1033), True, 'import numpy as np\n'), ((1638, 1670), 'numpy.count_nonzero', 'np.count_nonzero', (['present_values'], {}), '(present_values)\n', (1654, 1670), True, 'import numpy as np\n')]
|
"""Get NWP data for NWCSAF
"""
import os
import sys
import argparse
import pandas
from .. import sky
from sattools import log
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"date", action="store", type=pandas.Period,
help="Date to download, ISO 8601 format")
return parser
def getnwp(dt):
sky.verify_period(dt)
safnwc = os.getenv("SAFNWC")
if not safnwc:
sys.exit("Environment variable SAFNWC not set, exiting")
sky.get_and_send(safnwc, dt)
def main():
p = get_parser().parse_args()
log.setup_main_handler()
getnwp(p.date)
|
[
"sys.exit",
"os.getenv",
"argparse.ArgumentParser",
"sattools.log.setup_main_handler"
] |
[((161, 266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n', (184, 266), False, 'import argparse\n'), ((499, 518), 'os.getenv', 'os.getenv', (['"""SAFNWC"""'], {}), "('SAFNWC')\n", (508, 518), False, 'import os\n'), ((688, 712), 'sattools.log.setup_main_handler', 'log.setup_main_handler', ([], {}), '()\n', (710, 712), False, 'from sattools import log\n'), ((546, 602), 'sys.exit', 'sys.exit', (['"""Environment variable SAFNWC not set, exiting"""'], {}), "('Environment variable SAFNWC not set, exiting')\n", (554, 602), False, 'import sys\n')]
|
#!/usr/bin/env python
import io
import json
import os
import re
import setuptools
import setuptools.command.build_py
import distutils.command.build
node_dependencies = [
( 'netron', [
'node_modules/d3/dist/d3.js',
'node_modules/dagre/dist/dagre.js'
]
)
]
class build(distutils.command.build.build):
user_options = distutils.command.build.build.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.version = None
def finalize_options(self):
distutils.command.build.build.finalize_options(self)
def run(self):
build_py.version = bool(self.version)
return distutils.command.build.build.run(self)
class build_py(setuptools.command.build_py.build_py):
user_options = setuptools.command.build_py.build_py.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
setuptools.command.build_py.build_py.initialize_options(self)
self.version = None
def finalize_options(self):
setuptools.command.build_py.build_py.finalize_options(self)
def run(self):
setuptools.command.build_py.build_py.run(self)
for target, files in node_dependencies:
target = os.path.join(self.build_lib, target)
if not os.path.exists(target):
os.makedirs(target)
for file in files:
self.copy_file(file, target)
if build_py.version:
for _, _, build_dir, filenames in self.data_files:
for filename in filenames:
if filename == 'index.html':
filepath = os.path.join(build_dir, filename)
with open(filepath, 'r') as reader:
content = reader.read()
content = re.sub(r'(<meta name="version" content=")\d+.\d+.\d+(">)', r'\g<1>' + package_version() + r'\g<2>', content)
with open(filepath, 'w') as writer:
writer.write(content)
def build_module(self, module, module_file, package):
setuptools.command.build_py.build_py.build_module(self, module, module_file, package)
if build_py.version and module == '__version__':
outfile = self.get_module_outfile(self.build_lib, package.split('.'), module)
with open(outfile, 'w+') as writer:
writer.write("__version__ = '" + package_version() + "'\n")
def package_version():
with open('./package.json') as reader:
manifest = json.load(reader)
return manifest['version']
setuptools.setup(
name="netron",
version=package_version(),
description="Viewer for neural network, deep learning, and machine learning models",
long_description='Netron is a viewer for neural network, deep learning, and machine learning models.\n\n' +
'Netron supports **ONNX** (`.onnx`, `.pb`, `.pbtxt`), **TensorFlow Lite** (`.tflite`), **Caffe** (`.caffemodel`, `.prototxt`), **Darknet** (`.cfg`), **Core ML** (`.mlmodel`, `.mlpackage`), **Keras** (`.h5`, `.keras`), **MNN** (`.mnn`), **MXNet** (`.model`, `.json`), **ncnn** (`.param`), **PaddlePaddle** (`.zip`, `.pdmodel`), **Caffe2** (`predict_net.pb`), **Barracuda** (`.nn`), **Tengine** (`.tmfile`), **TNN** (`.tnnproto`), **RKNN** (`.rknn`), **MindSpore Lite** (`.ms`) and **UFF** (`.uff`). Netron has experimental support for **PyTorch** (`.pt`, `.pth`), **TorchScript** (`.pt`, `.pth`), **TensorFlow.js** (`model.json`, `.pb`), **TensorFlow** (`.pb`, `.meta`, `.pbtxt`, `.ckpt`, `.index`), **Torch** (`.t7`), **OpenVINO** (`.xml`), **ArmNN** (`.armnn`), **BigDL** (`.bigdl`, `.model`), **Chainer** (`.npz`, `.h5`), **CNTK** (`.model`, `.cntk`), **Deeplearning4j** (`.zip`), **MediaPipe** (`.pbtxt`), **ML.NET** (`.zip`), and **scikit-learn** (`.pkl`)',
keywords=[
'onnx', 'keras', 'tensorflow', 'tflite', 'coreml', 'mxnet', 'caffe', 'caffe2', 'torchscript', 'pytorch', 'ncnn', 'mnn', 'openvino', 'darknet', 'paddlepaddle', 'chainer',
'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
'visualizer', 'viewer'
],
license="MIT",
cmdclass={
'build': build,
'build_py': build_py
},
package_dir={
'netron': 'source'
},
packages=[
'netron'
],
package_data={ 'netron': [ '*.*' ] },
exclude_package_data={ 'netron': [ 'app.js', 'electron.*' ] },
install_requires=[],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/lutzroeder/netron',
entry_points={
'console_scripts': [ 'netron = netron:main' ]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization'
],
options={
'build': {
'build_base': './dist',
'build_lib': './dist/lib'
},
'bdist_wheel': {
'universal': 1,
'dist_dir': './dist/dist'
},
'egg_info': {
'egg_base': './dist'
}
}
)
|
[
"json.load",
"os.makedirs",
"setuptools.command.build_py.build_py.finalize_options",
"setuptools.command.build_py.build_py.build_module",
"os.path.exists",
"setuptools.command.build_py.build_py.initialize_options",
"setuptools.command.build_py.build_py.run",
"os.path.join"
] |
[((970, 1031), 'setuptools.command.build_py.build_py.initialize_options', 'setuptools.command.build_py.build_py.initialize_options', (['self'], {}), '(self)\n', (1025, 1031), False, 'import setuptools\n'), ((1100, 1159), 'setuptools.command.build_py.build_py.finalize_options', 'setuptools.command.build_py.build_py.finalize_options', (['self'], {}), '(self)\n', (1153, 1159), False, 'import setuptools\n'), ((1187, 1233), 'setuptools.command.build_py.build_py.run', 'setuptools.command.build_py.build_py.run', (['self'], {}), '(self)\n', (1227, 1233), False, 'import setuptools\n'), ((2179, 2268), 'setuptools.command.build_py.build_py.build_module', 'setuptools.command.build_py.build_py.build_module', (['self', 'module', 'module_file', 'package'], {}), '(self, module, module_file,\n package)\n', (2228, 2268), False, 'import setuptools\n'), ((2622, 2639), 'json.load', 'json.load', (['reader'], {}), '(reader)\n', (2631, 2639), False, 'import json\n'), ((1303, 1339), 'os.path.join', 'os.path.join', (['self.build_lib', 'target'], {}), '(self.build_lib, target)\n', (1315, 1339), False, 'import os\n'), ((1359, 1381), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (1373, 1381), False, 'import os\n'), ((1399, 1418), 'os.makedirs', 'os.makedirs', (['target'], {}), '(target)\n', (1410, 1418), False, 'import os\n'), ((1714, 1747), 'os.path.join', 'os.path.join', (['build_dir', 'filename'], {}), '(build_dir, filename)\n', (1726, 1747), False, 'import os\n')]
|
import boto3
import json
import sys
client = boto3.client('codecommit', region_name=sys.argv[1])
results = client.list_repositories()
for item in results['repositories']:
branch_results = client.list_branches(repositoryName=item['repositoryName'])
print(f"{item['repositoryName']}: {len(branch_results['branches'])} branch(es)")
next_token = results['nextToken'] if 'nextToken' in results else None
while next_token != None:
results = client.list_repositories(nextToken=next_token)
for item in results['repositories']:
branch_results = client.list_branches(repositoryName=item['repositoryName'])
print(f"{item['repositoryName']}: {len(branch_results['branches'])} branch(es)")
next_token = results['nextToken'] if 'nextToken' in results else None
|
[
"boto3.client"
] |
[((47, 98), 'boto3.client', 'boto3.client', (['"""codecommit"""'], {'region_name': 'sys.argv[1]'}), "('codecommit', region_name=sys.argv[1])\n", (59, 98), False, 'import boto3\n')]
|
import pytest
from heedy import App
# These tests are of bugfixes to heedy, making sure that things are working
def test_appscope():
# Makes sure that removing owner's access doesn't affect the App's access
a = App("testkey")
o = a.objects.create("myobj")
assert o.access == "*"
o.key = "lol"
o.owner_scopes = "read"
assert o.read()["key"] == "lol"
o.key = "hiya"
assert o.read()["key"] == "hiya"
def test_metamod():
a = App("testkey")
o = a.objects.create("myobj", otype="timeseries")
o.meta = {"schema": {"type": "number"}}
assert o.cached_data["meta"]["schema"]["type"] == "number"
assert o.cached_data["meta"]["actor"] == False
assert o.meta["schema"]["type"] == "number"
assert o.meta["actor"] == False
|
[
"heedy.App"
] |
[((222, 236), 'heedy.App', 'App', (['"""testkey"""'], {}), "('testkey')\n", (225, 236), False, 'from heedy import App\n'), ((466, 480), 'heedy.App', 'App', (['"""testkey"""'], {}), "('testkey')\n", (469, 480), False, 'from heedy import App\n')]
|
import itertools
import random
UnoccupiedEmoji = "🌲"
class Unoccupied:
def __repr__(self):
return UnoccupiedEmoji
class Park:
""" A class to represent the wildlife park which is being simulated.
Parameters
==========
width : `int`
The park's width.
height : `int`
The park's height.
Attributes
==========
occupants : `tuple`
A tuple of (i, j) coordinates. The current location of the rhino in the park.
coordinates : `bool`
True when the agent is mobile, False eitherwise. A rhino
becomes immobile once they meet a poacher.
"""
def __init__(self, width=5, height=5):
self.occupants = [
[Unoccupied() for _ in range(width)] for _ in range(height)
]
self.width = width
self.height = height
self.coordinates = list(itertools.product(range(height), range(width)))
def get_random_unoccupied_cell(self):
"""
Returns the coordinates of a random unoccupied cell.
"""
random.shuffle(self.coordinates)
for i, j in self.coordinates:
if self.occupants[i][j].__repr__() == UnoccupiedEmoji:
return i, j
return False
def get_neighbours(self, i, j, radius=1):
neighbours = []
for offset in range(min(self.width - j, radius + 1)):
for step in range(min(self.height - 1 - i, radius - offset) + 1):
if (step, offset) != (0, 0):
neighbours.append((i + step, j + offset))
for offset in range(min(self.width - j, radius + 1)):
for step in range(1, min(i, radius - offset) + 1):
if (step, offset) != (0, 0):
neighbours.append((i - step, j + offset))
for offset in range(1, min(j, radius + 1) + 1):
for step in range(min(self.height - 1 - i, radius - offset) + 1):
if (step, offset) != (0, 0):
neighbours.append((i + step, j - offset))
for offset in range(1, min(j, radius + 1) + 1):
for step in range(1, min(i, radius - offset) + 1):
if (step, offset) != (0, 0):
neighbours.append((i - step, j - offset))
return neighbours
def __len__(self):
return self.width * self.height
def __repr__(self):
repr = ""
for row in self.occupants:
repr += f"{str(row)}\n"
return repr
|
[
"random.shuffle"
] |
[((1053, 1085), 'random.shuffle', 'random.shuffle', (['self.coordinates'], {}), '(self.coordinates)\n', (1067, 1085), False, 'import random\n')]
|
from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names
from wk import PointDict
from vtgui.app import make_app, SelectDir, SelectFile, VirtualField
from wcf import models_names
import sys
def make_trainval_config(
cfg, data_cfg
):
cfg=PointDict(**cfg)
data_cfg=PointDict(**data_cfg)
class Config(TrainValConfigBase):
MODEL_TYPE = cfg.MODEL_TYPE
GEN_CLASSES_FILE = cfg.GEN_CLASSES_FILE
USE_tqdm_TRAIN = cfg.USE_tqdm_TRAIN
INPUT_SIZE = (cfg.INPUT_W, cfg.INPUT_H)
BATCH_SIZE = cfg.BATCH_SIZE
NUM_EPOCHS = cfg.NUM_EPOCHS
BALANCE_CLASSES = cfg.BALANCE_CLASSES
VAL_INTERVAL = cfg.VAL_INTERVAL
WEIGHTS_SAVE_INTERVAL = cfg.WEIGHTS_SAVE_INTERVAL
WEIGHTS_INIT = cfg.WEIGHTS_INIT
TRAIN_DIR = cfg.TRAIN_DIR
VAL_DIR = cfg.VAL_DIR
INPUT_W = cfg.INPUT_W
INPUT_H = cfg.INPUT_H
val_transform = EasyTransform([
t.Resize(INPUT_SIZE[::-1]),
t.SaveToDir(cfg.VISUALIZE_RESULT_DIR),
t.ToTensor(),
])
train_transform = EasyTransform(list(filter(lambda x:x is not None,[
t.ColorJitter(brightness=data_cfg.BRIGHTNESS, contrast=data_cfg.CONTRAST, saturation=data_cfg.SATURATION, hue=data_cfg.HUE),
t.RandomHorizontalFlip() if data_cfg.RandomHorizontalFlip else None,
t.RandomVerticalFlip() if data_cfg.RandomVerticalFlip else None,
t.RandomRotate(data_cfg.RandomRotate) if data_cfg.RandomRotate else None ,
t.RandomShear(data_cfg.RandomShear,data_cfg.RandomShear) if data_cfg.RandomShear else None,
t.RandomTranslate(data_cfg.RandomTranslate) if data_cfg.RandomTranslate else None,
t.RandomBlur(p=data_cfg.RandomBlur, radius=1) if data_cfg.RandomBlur else None,
t.RandomSPNoise(p=data_cfg.RandomSPNoise) if data_cfg.RandomSPNoise else None,
*val_transform,
])))
return Config
models = [
models_names.resnet10,
models_names.resnet18,
models_names.resnet50,
models_names.shufflenet_v2_x0_5,
models_names.shufflenet_v2_x1_0,
]
def get_base_config():
base_config = dict(
MODEL_TYPE=VirtualField(title='模型', description='选择模型', default='resnet18', options=models),
TRAIN_DIR=SelectDir('/home/ars/sda5/data/projects/烟分类/data/烟分类-train'),
VAL_DIR=SelectDir('/home/ars/sda5/data/projects/烟分类/data/烟分类-val'),
GEN_CLASSES_FILE=VirtualField(default=True, title='生成类别文件'),
USE_tqdm_TRAIN=True,
BATCH_SIZE=64,
NUM_EPOCHS=200,
BALANCE_CLASSES=True,
VAL_INTERVAL=1,
WEIGHTS_SAVE_INTERVAL=1,
WEIGHTS_INIT='weights/training/model_best.pkl',
INPUT_W=224,
INPUT_H=224,
VISUALIZE_RESULT_DIR='data/visualize',
)
return base_config
def get_data_config():
data_config = dict(
BRIGHTNESS=0.1,
CONTRAST=0.05,
SATURATION=0.05,
HUE=0.05,
RandomHorizontalFlip=False,
RandomVerticalFlip=False,
RandomRotate=0,
RandomShear=0,
RandomTranslate=0,
RandomBlur=0.3,
RandomSPNoise=0.3,
)
return data_config
def training_callback(base_cfg, data_config):
Config= make_trainval_config(base_cfg,data_config)
cfg=Config()
train(cfg)
def make_training_app():
base_config=get_base_config()
data_config=get_data_config()
app = make_app(function=training_callback, args=(base_config, data_config), columns=[4, 3])
return app
|
[
"wcf.t.RandomShear",
"wcf.t.RandomSPNoise",
"wcf.t.RandomTranslate",
"wcf.t.ColorJitter",
"vtgui.app.make_app",
"wcf.t.RandomVerticalFlip",
"wcf.t.RandomHorizontalFlip",
"wcf.t.SaveToDir",
"wcf.t.RandomRotate",
"wcf.t.ToTensor",
"vtgui.app.VirtualField",
"wcf.train",
"wk.PointDict",
"vtgui.app.SelectDir",
"wcf.t.RandomBlur",
"wcf.t.Resize"
] |
[((273, 289), 'wk.PointDict', 'PointDict', ([], {}), '(**cfg)\n', (282, 289), False, 'from wk import PointDict\n'), ((303, 324), 'wk.PointDict', 'PointDict', ([], {}), '(**data_cfg)\n', (312, 324), False, 'from wk import PointDict\n'), ((3356, 3366), 'wcf.train', 'train', (['cfg'], {}), '(cfg)\n', (3361, 3366), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((3471, 3560), 'vtgui.app.make_app', 'make_app', ([], {'function': 'training_callback', 'args': '(base_config, data_config)', 'columns': '[4, 3]'}), '(function=training_callback, args=(base_config, data_config),\n columns=[4, 3])\n', (3479, 3560), False, 'from vtgui.app import make_app, SelectDir, SelectFile, VirtualField\n'), ((2226, 2311), 'vtgui.app.VirtualField', 'VirtualField', ([], {'title': '"""模型"""', 'description': '"""选择模型"""', 'default': '"""resnet18"""', 'options': 'models'}), "(title='模型', description='选择模型', default='resnet18', options=models\n )\n", (2238, 2311), False, 'from vtgui.app import make_app, SelectDir, SelectFile, VirtualField\n'), ((2326, 2386), 'vtgui.app.SelectDir', 'SelectDir', (['"""/home/ars/sda5/data/projects/烟分类/data/烟分类-train"""'], {}), "('/home/ars/sda5/data/projects/烟分类/data/烟分类-train')\n", (2335, 2386), False, 'from vtgui.app import make_app, SelectDir, SelectFile, VirtualField\n'), ((2404, 2462), 'vtgui.app.SelectDir', 'SelectDir', (['"""/home/ars/sda5/data/projects/烟分类/data/烟分类-val"""'], {}), "('/home/ars/sda5/data/projects/烟分类/data/烟分类-val')\n", (2413, 2462), False, 'from vtgui.app import make_app, SelectDir, SelectFile, VirtualField\n'), ((2489, 2531), 'vtgui.app.VirtualField', 'VirtualField', ([], {'default': '(True)', 'title': '"""生成类别文件"""'}), "(default=True, title='生成类别文件')\n", (2501, 2531), False, 'from vtgui.app import make_app, SelectDir, SelectFile, VirtualField\n'), ((971, 997), 'wcf.t.Resize', 't.Resize', (['INPUT_SIZE[::-1]'], {}), '(INPUT_SIZE[::-1])\n', (979, 997), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1011, 1048), 'wcf.t.SaveToDir', 't.SaveToDir', (['cfg.VISUALIZE_RESULT_DIR'], {}), '(cfg.VISUALIZE_RESULT_DIR)\n', (1022, 1048), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1062, 1074), 'wcf.t.ToTensor', 't.ToTensor', ([], {}), '()\n', (1072, 1074), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1176, 1303), 'wcf.t.ColorJitter', 't.ColorJitter', ([], {'brightness': 'data_cfg.BRIGHTNESS', 'contrast': 'data_cfg.CONTRAST', 'saturation': 'data_cfg.SATURATION', 'hue': 'data_cfg.HUE'}), '(brightness=data_cfg.BRIGHTNESS, contrast=data_cfg.CONTRAST,\n saturation=data_cfg.SATURATION, hue=data_cfg.HUE)\n', (1189, 1303), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1313, 1337), 'wcf.t.RandomHorizontalFlip', 't.RandomHorizontalFlip', ([], {}), '()\n', (1335, 1337), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1394, 1416), 'wcf.t.RandomVerticalFlip', 't.RandomVerticalFlip', ([], {}), '()\n', (1414, 1416), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1471, 1508), 'wcf.t.RandomRotate', 't.RandomRotate', (['data_cfg.RandomRotate'], {}), '(data_cfg.RandomRotate)\n', (1485, 1508), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1558, 1615), 'wcf.t.RandomShear', 't.RandomShear', (['data_cfg.RandomShear', 'data_cfg.RandomShear'], {}), '(data_cfg.RandomShear, data_cfg.RandomShear)\n', (1571, 1615), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1662, 1705), 'wcf.t.RandomTranslate', 't.RandomTranslate', (['data_cfg.RandomTranslate'], {}), '(data_cfg.RandomTranslate)\n', (1679, 1705), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1757, 1802), 'wcf.t.RandomBlur', 't.RandomBlur', ([], {'p': 'data_cfg.RandomBlur', 'radius': '(1)'}), '(p=data_cfg.RandomBlur, radius=1)\n', (1769, 1802), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n'), ((1849, 1890), 'wcf.t.RandomSPNoise', 't.RandomSPNoise', ([], {'p': 'data_cfg.RandomSPNoise'}), '(p=data_cfg.RandomSPNoise)\n', (1864, 1890), False, 'from wcf import train, TrainValConfigBase, val, t, EasyTransform, models_names\n')]
|
import unittest
import sys
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
class SeleniumScrape:
def run(self, vidId):
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome('chromedriver/chromedriver', chrome_options=options)
# driver = webdriver.Chrome(chrome_options=options)
driver.get("https://www.youtube.com/watch?v={}".format(vidId))
#mute video
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//button[@class='ytp-mute-button ytp-button']"))).click()
# clicks more options dropdown on video
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/div[6]/div[2]/ytd-video-primary-info-renderer[1]/div[1]/div[1]/div[3]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]"))).click()
#clicks opentranscripts and clears timestamps
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//yt-formatted-string[@class='style-scope ytd-menu-service-item-renderer'][contains(text(),'Open transcript')]"))).click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[1]/ytd-transcript-header-renderer[1]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]"))).click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "/html[1]/body[1]/ytd-app[1]/ytd-popup-container[1]/iron-dropdown[1]/div[1]/ytd-menu-popup-renderer[1]/paper-menu[1]/div[1]/ytd-menu-service-item-renderer[1]/yt-formatted-string[1]"))).click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[2]/ytd-transcript-body-renderer[1]"))).click()
#grabs transcript from the video
text = driver.find_element_by_xpath("/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[2]/ytd-transcript-body-renderer[1]").text
transcriptRaw = open('raw_transcript.txt', 'w')
transcriptRaw.write(text)
transcriptRaw.close()
driver.close()
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.Chrome"
] |
[((347, 372), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (370, 372), False, 'from selenium import webdriver\n'), ((431, 500), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""chromedriver/chromedriver"""'], {'chrome_options': 'options'}), "('chromedriver/chromedriver', chrome_options=options)\n", (447, 500), False, 'from selenium import webdriver\n'), ((693, 788), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH, "//button[@class=\'ytp-mute-button ytp-button\']")'], {}), '((By.XPATH,\n "//button[@class=\'ytp-mute-button ytp-button\']"))\n', (723, 788), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((883, 1145), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/div[6]/div[2]/ytd-video-primary-info-renderer[1]/div[1]/div[1]/div[3]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]'\n )"], {}), "((By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/div[6]/div[2]/ytd-video-primary-info-renderer[1]/div[1]/div[1]/div[3]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]'\n ))\n", (913, 1145), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1241, 1406), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH,\n "//yt-formatted-string[@class=\'style-scope ytd-menu-service-item-renderer\'][contains(text(),\'Open transcript\')]"\n )'], {}), '((By.XPATH,\n "//yt-formatted-string[@class=\'style-scope ytd-menu-service-item-renderer\'][contains(text(),\'Open transcript\')]"\n ))\n', (1271, 1406), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1447, 1739), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[1]/ytd-transcript-header-renderer[1]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]'\n )"], {}), "((By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[1]/ytd-transcript-header-renderer[1]/div[1]/ytd-menu-renderer[1]/button[1]/yt-icon[1]'\n ))\n", (1477, 1739), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1780, 2014), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/ytd-popup-container[1]/iron-dropdown[1]/div[1]/ytd-menu-popup-renderer[1]/paper-menu[1]/div[1]/ytd-menu-service-item-renderer[1]/yt-formatted-string[1]'\n )"], {}), "((By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/ytd-popup-container[1]/iron-dropdown[1]/div[1]/ytd-menu-popup-renderer[1]/paper-menu[1]/div[1]/ytd-menu-service-item-renderer[1]/yt-formatted-string[1]'\n ))\n", (1810, 2014), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2055, 2296), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[2]/ytd-transcript-body-renderer[1]'\n )"], {}), "((By.XPATH,\n '/html[1]/body[1]/ytd-app[1]/div[1]/ytd-page-manager[1]/ytd-watch[1]/div[2]/div[2]/div[1]/ytd-transcript-loader[1]/div[1]/ytd-transcript-renderer[1]/div[2]/ytd-transcript-body-renderer[1]'\n ))\n", (2085, 2296), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((661, 686), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (674, 686), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((851, 876), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (864, 876), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1209, 1234), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1222, 1234), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1415, 1440), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1428, 1440), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1748, 1773), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1761, 1773), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2023, 2048), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (2036, 2048), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')]
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from shapely.geometry import LineString
from eclipse_gis import eclipse_gis
from BeautifulSoup import BeautifulSoup
import shapefile
from shapely.geometry import shape
def parse_html(eclipse_path_html):
doc = BeautifulSoup(eclipse_path_html, convertEntities=BeautifulSoup.HTML_ENTITIES)
data = doc.body.find('pre').text.replace("\r", "\n").split("\n")
times, points = eclipse_gis.load_data(data)
return times, points
def load_map(map_path):
sh = shapefile.Reader(map_path)
feature = sh.shapeRecords()[0]
first = feature.shape.__geo_interface__
shp_geom = shape(first)
return shp_geom
def points_to_latlong(points, color='r'):
lats = []
lons = []
colors = []
for point in points:
lat = point[0]
lon = point[1]
colors.append(color)
lats.append(lat)
lons.append(lon)
return lats, lons, colors
def us_shape_to_points(main_us):
points = []
for line in main_us.geoms:
for point in line.coords:
points.append((point[1], point[0]))
return points
def load_path(eclipse_path_data):
times, points = eclipse_gis.load_stripped_data(open(eclipse_path_data).readlines())
boundary, center = eclipse_gis.generate_polygon(points)
eg = eclipse_gis.EclipseGIS(boundary, center)
return eg
|
[
"BeautifulSoup.BeautifulSoup",
"eclipse_gis.eclipse_gis.generate_polygon",
"eclipse_gis.eclipse_gis.EclipseGIS",
"eclipse_gis.eclipse_gis.load_data",
"shapely.geometry.shape",
"shapefile.Reader"
] |
[((792, 869), 'BeautifulSoup.BeautifulSoup', 'BeautifulSoup', (['eclipse_path_html'], {'convertEntities': 'BeautifulSoup.HTML_ENTITIES'}), '(eclipse_path_html, convertEntities=BeautifulSoup.HTML_ENTITIES)\n', (805, 869), False, 'from BeautifulSoup import BeautifulSoup\n'), ((960, 987), 'eclipse_gis.eclipse_gis.load_data', 'eclipse_gis.load_data', (['data'], {}), '(data)\n', (981, 987), False, 'from eclipse_gis import eclipse_gis\n'), ((1047, 1073), 'shapefile.Reader', 'shapefile.Reader', (['map_path'], {}), '(map_path)\n', (1063, 1073), False, 'import shapefile\n'), ((1168, 1180), 'shapely.geometry.shape', 'shape', (['first'], {}), '(first)\n', (1173, 1180), False, 'from shapely.geometry import shape\n'), ((1795, 1831), 'eclipse_gis.eclipse_gis.generate_polygon', 'eclipse_gis.generate_polygon', (['points'], {}), '(points)\n', (1823, 1831), False, 'from eclipse_gis import eclipse_gis\n'), ((1841, 1881), 'eclipse_gis.eclipse_gis.EclipseGIS', 'eclipse_gis.EclipseGIS', (['boundary', 'center'], {}), '(boundary, center)\n', (1863, 1881), False, 'from eclipse_gis import eclipse_gis\n')]
|
from pyxdsm.XDSM import XDSM, OPT, SOLVER, FUNC
# Change `use_sfmath` to False to use computer modern
x = XDSM(use_sfmath=True)
x.add_system("opt", OPT, r"\text{Optimizer}")
x.add_system("solver", SOLVER, r"\text{Newton}")
x.add_system("D1", FUNC, "D_1")
x.add_system("D2", FUNC, "D_2")
x.add_system("F", FUNC, "F")
x.add_system("G", FUNC, "G")
x.connect("opt", "D1", "x, z")
x.connect("opt", "D2", "z")
x.connect("opt", "F", "x, z")
x.connect("solver", "D1", "y_2")
x.connect("solver", "D2", "y_1")
x.connect("D1", "solver", r"\mathcal{R}(y_1)")
x.connect("solver", "F", "y_1, y_2")
x.connect("D2", "solver", r"\mathcal{R}(y_2)")
x.connect("solver", "G", "y_1, y_2")
x.connect("F", "opt", "f")
x.connect("G", "opt", "g")
x.add_output("opt", "x^*, z^*", side="left")
x.add_output("D1", "y_1^*", side="left")
x.add_output("D2", "y_2^*", side="left")
x.add_output("F", "f^*", side="left")
x.add_output("G", "g^*", side="left")
x.write("mdf")
|
[
"pyxdsm.XDSM.XDSM"
] |
[((107, 128), 'pyxdsm.XDSM.XDSM', 'XDSM', ([], {'use_sfmath': '(True)'}), '(use_sfmath=True)\n', (111, 128), False, 'from pyxdsm.XDSM import XDSM, OPT, SOLVER, FUNC\n')]
|
import unittest
from os.path import join
from unittest import mock
from pythonforandroid.recipes.python3 import (
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE,
)
from pythonforandroid.util import BuildInterruptingException
from tests.recipes.recipe_lib_test import RecipeCtx
class TestPython3Recipe(RecipeCtx, unittest.TestCase):
"""
TestCase for recipe :mod:`~pythonforandroid.recipes.python3`
"""
recipe_name = "python3"
def test_property__libpython(self):
self.assertEqual(
self.recipe._libpython,
f'libpython{self.recipe.link_version}.so'
)
@mock.patch('pythonforandroid.recipes.python3.Path.is_file')
def test_should_build(self, mock_is_file):
# in case that python lib exists, we shouldn't trigger the build
self.assertFalse(self.recipe.should_build(self.arch))
# in case that python lib doesn't exist, we should trigger the build
mock_is_file.return_value = False
self.assertTrue(self.recipe.should_build(self.arch))
def test_include_root(self):
expected_include_dir = join(
self.recipe.get_build_dir(self.arch.arch), 'Include',
)
self.assertEqual(
expected_include_dir, self.recipe.include_root(self.arch.arch)
)
def test_link_root(self):
expected_link_root = join(
self.recipe.get_build_dir(self.arch.arch), 'android-build',
)
self.assertEqual(
expected_link_root, self.recipe.link_root(self.arch.arch)
)
@mock.patch("pythonforandroid.recipes.python3.subprocess.call")
def test_compile_python_files(self, mock_subprocess):
fake_compile_dir = '/fake/compile/dir'
hostpy = self.recipe.ctx.hostpython = '/fake/hostpython3'
self.recipe.compile_python_files(fake_compile_dir)
mock_subprocess.assert_called_once_with(
[hostpy, '-OO', '-m', 'compileall', '-b', '-f', fake_compile_dir],
)
@mock.patch("pythonforandroid.recipe.Recipe.check_recipe_choices")
@mock.patch("pythonforandroid.archs.glob")
def test_get_recipe_env(
self,
mock_glob,
mock_check_recipe_choices,
):
"""
Test that method
:meth:`~pythonforandroid.recipes.python3.Python3Recipe.get_recipe_env`
returns the expected flags
"""
mock_glob.return_value = ["llvm"]
mock_check_recipe_choices.return_value = sorted(
self.ctx.recipe_build_order
)
env = self.recipe.get_recipe_env(self.arch)
self.assertIn(
f'-fPIC -DANDROID -D__ANDROID_API__={self.ctx.ndk_api}',
env["CFLAGS"])
self.assertEqual(env["CC"], self.arch.get_clang_exe(with_target=True))
# make sure that the mocked methods are actually called
mock_glob.assert_called()
mock_check_recipe_choices.assert_called()
def test_set_libs_flags(self):
# todo: properly check `Python3Recipe.set_lib_flags`
pass
# These decorators are to mock calls to `get_recipe_env`
# and `set_libs_flags`, since these calls are tested separately
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.util.makedirs")
@mock.patch("pythonforandroid.archs.glob")
def test_build_arch(
self,
mock_glob,
mock_makedirs,
mock_chdir,):
mock_glob.return_value = ["llvm"]
# specific `build_arch` mocks
with mock.patch(
"builtins.open",
mock.mock_open(read_data="#define ZLIB_VERSION 1.1\nfoo")
) as mock_open_zlib, mock.patch(
"pythonforandroid.recipes.python3.sh.Command"
) as mock_sh_command, mock.patch(
"pythonforandroid.recipes.python3.sh.make"
) as mock_make, mock.patch(
"pythonforandroid.recipes.python3.sh.cp"
) as mock_cp:
self.recipe.build_arch(self.arch)
# make sure that the mocked methods are actually called
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
sh_command_calls = {
f"{recipe_build_dir}/config.guess",
f"{recipe_build_dir}/configure",
}
for command in sh_command_calls:
self.assertIn(
mock.call(command),
mock_sh_command.mock_calls,
)
mock_open_zlib.assert_called()
self.assertEqual(mock_make.call_count, 1)
for make_call, kw in mock_make.call_args_list:
self.assertIn(
f'INSTSONAME={self.recipe._libpython}', make_call
)
mock_cp.assert_called_with(
"pyconfig.h", join(recipe_build_dir, 'Include'),
)
mock_makedirs.assert_called()
mock_chdir.assert_called()
def test_build_arch_wrong_ndk_api(self):
# we check ndk_api using recipe's ctx
self.recipe.ctx.ndk_api = 20
with self.assertRaises(BuildInterruptingException) as e:
self.recipe.build_arch(self.arch)
self.assertEqual(
e.exception.args[0],
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format(
ndk_api=self.recipe.ctx.ndk_api,
min_ndk_api=self.recipe.MIN_NDK_API,
),
)
# restore recipe's ctx or we could get failures with other test,
# since we share `self.recipe with all the tests of the class
self.recipe.ctx.ndk_api = self.ctx.ndk_api
@mock.patch('shutil.copystat')
@mock.patch('shutil.copyfile')
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.util.makedirs")
@mock.patch("pythonforandroid.util.walk")
@mock.patch("pythonforandroid.recipes.python3.sh.find")
@mock.patch("pythonforandroid.recipes.python3.sh.cp")
@mock.patch("pythonforandroid.recipes.python3.sh.zip")
@mock.patch("pythonforandroid.recipes.python3.subprocess.call")
def test_create_python_bundle(
self,
mock_subprocess,
mock_sh_zip,
mock_sh_cp,
mock_sh_find,
mock_walk,
mock_makedirs,
mock_chdir,
mock_copyfile,
mock_copystat,
):
fake_compile_dir = '/fake/compile/dir'
simulated_walk_result = [
["/fake_dir", ["__pycache__", "Lib"], ["README", "setup.py"]],
["/fake_dir/Lib", ["ctypes"], ["abc.pyc", "abc.py"]],
["/fake_dir/Lib/ctypes", [], ["util.pyc", "util.py"]],
]
mock_walk.return_value = simulated_walk_result
self.recipe.create_python_bundle(fake_compile_dir, self.arch)
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
modules_build_dir = join(
recipe_build_dir,
'android-build',
'build',
'lib.linux{}-{}-{}'.format(
'2' if self.recipe.version[0] == '2' else '',
self.arch.command_prefix.split('-')[0],
self.recipe.major_minor_version_string
))
expected_sp_paths = [
modules_build_dir,
join(recipe_build_dir, 'Lib'),
self.ctx.get_python_install_dir(self.arch.arch),
]
for n, (sp_call, kw) in enumerate(mock_subprocess.call_args_list):
self.assertEqual(sp_call[0][-1], expected_sp_paths[n])
# we expect two calls to `walk_valid_filens`
self.assertEqual(len(mock_walk.call_args_list), 2)
mock_sh_zip.assert_called()
mock_sh_cp.assert_called()
mock_sh_find.assert_called()
mock_makedirs.assert_called()
mock_chdir.assert_called()
mock_copyfile.assert_called()
mock_copystat.assert_called()
|
[
"pythonforandroid.recipes.python3.NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format",
"os.path.join",
"unittest.mock.patch",
"unittest.mock.mock_open",
"unittest.mock.call"
] |
[((612, 671), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.Path.is_file"""'], {}), "('pythonforandroid.recipes.python3.Path.is_file')\n", (622, 671), False, 'from unittest import mock\n'), ((1552, 1614), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.subprocess.call"""'], {}), "('pythonforandroid.recipes.python3.subprocess.call')\n", (1562, 1614), False, 'from unittest import mock\n'), ((1989, 2054), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipe.Recipe.check_recipe_choices"""'], {}), "('pythonforandroid.recipe.Recipe.check_recipe_choices')\n", (1999, 2054), False, 'from unittest import mock\n'), ((2060, 2101), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.archs.glob"""'], {}), "('pythonforandroid.archs.glob')\n", (2070, 2101), False, 'from unittest import mock\n'), ((3164, 3205), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.util.chdir"""'], {}), "('pythonforandroid.util.chdir')\n", (3174, 3205), False, 'from unittest import mock\n'), ((3211, 3255), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.util.makedirs"""'], {}), "('pythonforandroid.util.makedirs')\n", (3221, 3255), False, 'from unittest import mock\n'), ((3261, 3302), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.archs.glob"""'], {}), "('pythonforandroid.archs.glob')\n", (3271, 3302), False, 'from unittest import mock\n'), ((5530, 5559), 'unittest.mock.patch', 'mock.patch', (['"""shutil.copystat"""'], {}), "('shutil.copystat')\n", (5540, 5559), False, 'from unittest import mock\n'), ((5565, 5594), 'unittest.mock.patch', 'mock.patch', (['"""shutil.copyfile"""'], {}), "('shutil.copyfile')\n", (5575, 5594), False, 'from unittest import mock\n'), ((5600, 5641), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.util.chdir"""'], {}), "('pythonforandroid.util.chdir')\n", (5610, 5641), False, 'from unittest import mock\n'), ((5647, 5691), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.util.makedirs"""'], {}), "('pythonforandroid.util.makedirs')\n", (5657, 5691), False, 'from unittest import mock\n'), ((5697, 5737), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.util.walk"""'], {}), "('pythonforandroid.util.walk')\n", (5707, 5737), False, 'from unittest import mock\n'), ((5743, 5797), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.find"""'], {}), "('pythonforandroid.recipes.python3.sh.find')\n", (5753, 5797), False, 'from unittest import mock\n'), ((5803, 5855), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.cp"""'], {}), "('pythonforandroid.recipes.python3.sh.cp')\n", (5813, 5855), False, 'from unittest import mock\n'), ((5861, 5914), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.zip"""'], {}), "('pythonforandroid.recipes.python3.sh.zip')\n", (5871, 5914), False, 'from unittest import mock\n'), ((5920, 5982), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.subprocess.call"""'], {}), "('pythonforandroid.recipes.python3.subprocess.call')\n", (5930, 5982), False, 'from unittest import mock\n'), ((3664, 3721), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.Command"""'], {}), "('pythonforandroid.recipes.python3.sh.Command')\n", (3674, 3721), False, 'from unittest import mock\n'), ((3764, 3818), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.make"""'], {}), "('pythonforandroid.recipes.python3.sh.make')\n", (3774, 3818), False, 'from unittest import mock\n'), ((3855, 3907), 'unittest.mock.patch', 'mock.patch', (['"""pythonforandroid.recipes.python3.sh.cp"""'], {}), "('pythonforandroid.recipes.python3.sh.cp')\n", (3865, 3907), False, 'from unittest import mock\n'), ((4729, 4762), 'os.path.join', 'join', (['recipe_build_dir', '"""Include"""'], {}), "(recipe_build_dir, 'Include')\n", (4733, 4762), False, 'from os.path import join\n'), ((5158, 5275), 'pythonforandroid.recipes.python3.NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format', 'NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format', ([], {'ndk_api': 'self.recipe.ctx.ndk_api', 'min_ndk_api': 'self.recipe.MIN_NDK_API'}), '(ndk_api=self.recipe.ctx.ndk_api,\n min_ndk_api=self.recipe.MIN_NDK_API)\n', (5201, 5275), False, 'from pythonforandroid.recipes.python3 import NDK_API_LOWER_THAN_SUPPORTED_MESSAGE\n'), ((7184, 7213), 'os.path.join', 'join', (['recipe_build_dir', '"""Lib"""'], {}), "(recipe_build_dir, 'Lib')\n", (7188, 7213), False, 'from os.path import join\n'), ((3577, 3637), 'unittest.mock.mock_open', 'mock.mock_open', ([], {'read_data': '"""#define ZLIB_VERSION 1.1\nfoo"""'}), '(read_data="""#define ZLIB_VERSION 1.1\nfoo""")\n', (3591, 3637), False, 'from unittest import mock\n'), ((4338, 4356), 'unittest.mock.call', 'mock.call', (['command'], {}), '(command)\n', (4347, 4356), False, 'from unittest import mock\n')]
|
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look on text below...
__author__ = "VirtualV <github.com/virtualvfix>"
__date__ = "$Dec 9, 2015 1:16:20 PM$"
# Compile line
# sudo pip install -U cython or sudo apt-get install cython
# python bin.py build_ext --inplace
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension("import.py", ["release_import.py"]),
]
setup(
name='EncImport',
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules
)
|
[
"distutils.extension.Extension",
"distutils.core.setup"
] |
[((649, 737), 'distutils.core.setup', 'setup', ([], {'name': '"""EncImport"""', 'cmdclass': "{'build_ext': build_ext}", 'ext_modules': 'ext_modules'}), "(name='EncImport', cmdclass={'build_ext': build_ext}, ext_modules=\n ext_modules)\n", (654, 737), False, 'from distutils.core import setup\n'), ((599, 644), 'distutils.extension.Extension', 'Extension', (['"""import.py"""', "['release_import.py']"], {}), "('import.py', ['release_import.py'])\n", (608, 644), False, 'from distutils.extension import Extension\n')]
|
import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_model.model import mutate
@task(input_pdb_path=FILE_IN, output_pdb_path=FILE_OUT, on_failure="IGNORE")
def mutate_pc(input_pdb_path, output_pdb_path, properties, **kwargs):
try:
mutate.Mutate(input_pdb_path=input_pdb_path, output_pdb_path=output_pdb_path, properties=properties, **kwargs).launch()
except Exception:
traceback.print_exc()
fu.write_failed_output(output_pdb_path)
|
[
"biobb_common.tools.file_utils.write_failed_output",
"pycompss.api.task.task",
"biobb_model.model.mutate.Mutate",
"traceback.print_exc"
] |
[((239, 314), 'pycompss.api.task.task', 'task', ([], {'input_pdb_path': 'FILE_IN', 'output_pdb_path': 'FILE_OUT', 'on_failure': '"""IGNORE"""'}), "(input_pdb_path=FILE_IN, output_pdb_path=FILE_OUT, on_failure='IGNORE')\n", (243, 314), False, 'from pycompss.api.task import task\n'), ((552, 573), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (571, 573), False, 'import traceback\n'), ((582, 621), 'biobb_common.tools.file_utils.write_failed_output', 'fu.write_failed_output', (['output_pdb_path'], {}), '(output_pdb_path)\n', (604, 621), True, 'from biobb_common.tools import file_utils as fu\n'), ((402, 517), 'biobb_model.model.mutate.Mutate', 'mutate.Mutate', ([], {'input_pdb_path': 'input_pdb_path', 'output_pdb_path': 'output_pdb_path', 'properties': 'properties'}), '(input_pdb_path=input_pdb_path, output_pdb_path=\n output_pdb_path, properties=properties, **kwargs)\n', (415, 517), False, 'from biobb_model.model import mutate\n')]
|
from jivago.config.production_jivago_context import ProductionJivagoContext
from jivago.config.router.router_builder import RouterBuilder
from jivago.lang.annotations import Override
from jivago.wsgi.routing.routing_rule import RoutingRule
from jivago.wsgi.routing.serving.static_file_routing_table import StaticFileRoutingTable
class MyApplicationContext(ProductionJivagoContext):
@Override
def create_router_config(self) -> RouterBuilder:
return super().create_router_config() \
.add_rule(RoutingRule("/", StaticFileRoutingTable("/var/www"))) \
.add_rule(RoutingRule("/", StaticFileRoutingTable("/var/www", allowed_extensions=['.html', '.xml'])))
|
[
"jivago.wsgi.routing.serving.static_file_routing_table.StaticFileRoutingTable"
] |
[((617, 689), 'jivago.wsgi.routing.serving.static_file_routing_table.StaticFileRoutingTable', 'StaticFileRoutingTable', (['"""/var/www"""'], {'allowed_extensions': "['.html', '.xml']"}), "('/var/www', allowed_extensions=['.html', '.xml'])\n", (639, 689), False, 'from jivago.wsgi.routing.serving.static_file_routing_table import StaticFileRoutingTable\n'), ((539, 573), 'jivago.wsgi.routing.serving.static_file_routing_table.StaticFileRoutingTable', 'StaticFileRoutingTable', (['"""/var/www"""'], {}), "('/var/www')\n", (561, 573), False, 'from jivago.wsgi.routing.serving.static_file_routing_table import StaticFileRoutingTable\n')]
|
from django import template
register = template.Library()
@register.filter
def first_word(string, arg=" "):
return string.split(arg)[0]
|
[
"django.template.Library"
] |
[((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import gettext_lazy as _
from django.db import models
from .osprocess import osprocess
import sys
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=128)
password = models.CharField(max_length=50, default='', blank=True, null=True)
passwordrt = models.CharField(max_length=50, default='', blank=True, null=True)
osproc = osprocess()
labels = {
'password': _('Password'),
'passwordrt' : _('Repeat password'),
}
def __str__(self): # __unicode__ on Python 2
return self.name
def delete(self, *args, **kwargs):
self.osproc.deleteUser(self.name)
if (self.osproc.execResult):
super(User, self).delete(*args, **kwargs)
else:
print >> sys.stdout, "output: %s " % (self.osproc.output)
print >> sys.stdout, "erroroutput: %s " % (self.osproc.erroroutput)
return self.osproc.erroroutput
def save(self, *args, **kwargs):
if (self.pk is None):
self.name = self.name.lower()
self.osproc.addUser(self.name, self.password)
else:
pass
# modify
if (self.osproc.execResult):
self.password = "<PASSWORD>"
self.passwordrt = "<PASSWORD>"
super(User, self).save(*args, **kwargs)
else:
print >> sys.stdout, "output: %s " % (self.osproc.output)
print >> sys.stdout, "erroroutput: %s " % (self.osproc.erroroutput)
return self.osproc.erroroutput
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(User,blank=True)
osproc = osprocess()
def __str__(self): # __unicode__ on Python 2
return self.name
def delete(self, *args, **kwargs):
self.osproc.deleteGroup(self.name)
if (self.osproc.execResult):
super(Group, self).delete(*args, **kwargs)
else:
print >> sys.stdout, "output: %s " % (self.osproc.output)
print >> sys.stdout, "erroroutput: %s " % (self.osproc.erroroutput)
return self.osproc.erroroutput
def save(self, *args, **kwargs):
if (self.pk is None):
self.name = self.name.lower()
self.osproc.addGroup(self.name)
# add
#self.name = self.name + "+"
else:
pass
# modify
if (self.osproc.execResult):
super(Group, self).save(*args, **kwargs)
else:
print >> sys.stdout, "output: %s " % (self.osproc.output)
print >> sys.stdout, "erroroutput: %s " % (self.osproc.erroroutput)
return self.osproc.erroroutput
|
[
"django.db.models.CharField",
"django.db.models.ManyToManyField",
"django.utils.translation.gettext_lazy"
] |
[((259, 291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (275, 291), False, 'from django.db import models\n'), ((307, 373), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': '""""""', 'blank': '(True)', 'null': '(True)'}), "(max_length=50, default='', blank=True, null=True)\n", (323, 373), False, 'from django.db import models\n'), ((391, 457), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': '""""""', 'blank': '(True)', 'null': '(True)'}), "(max_length=50, default='', blank=True, null=True)\n", (407, 457), False, 'from django.db import models\n'), ((1736, 1768), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1752, 1768), False, 'from django.db import models\n'), ((1783, 1823), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)'}), '(User, blank=True)\n', (1805, 1823), False, 'from django.db import models\n'), ((518, 531), 'django.utils.translation.gettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (519, 531), True, 'from django.utils.translation import gettext_lazy as _\n'), ((556, 576), 'django.utils.translation.gettext_lazy', '_', (['"""Repeat password"""'], {}), "('Repeat password')\n", (557, 576), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
"""
The :mod:`ramp_frontend.forms` module defines the different forms used on the
website.
"""
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms import DateField
from wtforms import DateTimeField
from wtforms import FileField
from wtforms import IntegerField
from wtforms import PasswordField
from wtforms import SelectMultipleField
from wtforms import StringField
from wtforms import validators
from wtforms import ValidationError
from wtforms.widgets import CheckboxInput
from wtforms.widgets import ListWidget
def _space_check(form, field):
if ' ' in field.data:
raise ValidationError('Field cannot contain space.')
def _ascii_check(form, field):
try:
field.data.encode('ascii')
except Exception:
raise ValidationError('Field cannot contain non-ascii characters.')
class LoginForm(FlaskForm):
"""Login-in form.
Attributes
----------
user_name : str
The user name.
password : str
<PASSWORD>.
"""
user_name = StringField('user_name', [validators.DataRequired()])
password = PasswordField('password', [validators.DataRequired()])
class UserUpdateProfileForm(FlaskForm):
"""User profile form.
Attributes
----------
user_name : str
The user name.
firstname : str
The user's first name.
lastname : str
The user's last name.
email : str
The user's email address.
linkedin_url : str, default == ''
The user's LinkedIn URL.
twitter_url : str, defaut == ''
The user's Twitter URL.
facebook_url : str, default == ''
The user's Facebook URL.
google_url : str, default == ''
The user's Google URL.
github_url : str, default == ''
The user's GitHub URL.
website_url : str, default == ''
The user's website URL.
bio : str, default == ''
The user's bio.
is_want_news : bool, default is True
Whether the user want some info from us.
"""
user_name = StringField('user_name', [
validators.DataRequired(), validators.Length(min=1, max=20),
_space_check])
firstname = StringField('firstname', [validators.DataRequired()])
lastname = StringField('lastname', [validators.DataRequired()])
email = StringField('email', [validators.DataRequired()])
linkedin_url = StringField('linkedin_url')
twitter_url = StringField('twitter_url')
facebook_url = StringField('facebook_url')
google_url = StringField('google_url')
github_url = StringField('github_url')
website_url = StringField('website_url')
bio = StringField('bio')
is_want_news = BooleanField()
class UserCreateProfileForm(UserUpdateProfileForm):
"""User profile form.
Attributes
----------
user_name : str
The user name.
password : str
<PASSWORD>.
firstname : str
The user's first name.
lastname : str
The user's last name.
email : str
The user's email address.
linkedin_url : str, default == ''
The user's LinkedIn URL.
twitter_url : str, defaut == ''
The user's Twitter URL.
facebook_url : str, default == ''
The user's Facebook URL.
google_url : str, default == ''
The user's Google URL.
github_url : str, default == ''
The user's GitHub URL.
website_url : str, default == ''
The user's website URL.
bio : str, default == ''
The user's bio.
is_want_news : bool, default is True
Whether the user want some info from us.
"""
password = PasswordField('password', [validators.DataRequired()])
class CodeForm(FlaskForm):
"""Code form.
This is the form used to contain the code when submitting to RAMP.
Attributes
----------
named_codes : list of tuple (submission_file_name, submission_code)
The place holder containing the name of the submission file and the
code associated.
"""
names_codes = []
class SubmitForm(FlaskForm):
"""Submission name form.
This is the form where the name of the submission given by the user will
be stored.
Attributes
----------
submission_name : str
The name of the submission.
"""
submission_name = StringField('submission_name',
[validators.DataRequired(), _space_check])
class UploadForm(FlaskForm):
"""Submission uploading form.
This is the form used to upload a file to be loaded during a RAMP
submission.
Attributes
----------
file : file
File to be uploaded and loaded into the sandbox code form.
"""
file = FileField('file')
class EventUpdateProfileForm(FlaskForm):
"""Form to update the parameters of an event.
Attributes
----------
title : str
The event title.
is_send_trained_mails : bool
Whether or not to send an email when submissions are trained.
is_public : bool
Whether or not the event is public.
is_controled_signup : bool
Whether or not the event has controlled sign-up.
is_competitive : bool
Whether or not the event has a competitive phase.
min_duration_between_submission_hour : int
The number of hour to wait between two submissions.
min_duration_between_submission_minute : int
The number of minute to wait between two submissions.
min_duration_between_submission_second : int
The number of second to wait between two submissions.
opening_timestamp : datetime
The date and time when the event is opening.
closing_timestamp : datetime
The date and time when the event is closing.
public_opening_timestamp : datetime
The date and time when the public phase of the event is opening.
"""
title = StringField(
'event_title', [validators.DataRequired(), validators.Length(max=80)]
)
is_send_trained_mails = BooleanField()
is_send_submitted_mails = BooleanField()
is_public = BooleanField()
is_controled_signup = BooleanField()
is_competitive = BooleanField()
min_duration_between_submissions_hour = IntegerField(
'min_h', [validators.NumberRange(min=0)]
)
min_duration_between_submissions_minute = IntegerField(
'min_m', [validators.NumberRange(min=0, max=59)]
)
min_duration_between_submissions_second = IntegerField(
'min_s', [validators.NumberRange(min=0, max=59)]
)
opening_timestamp = DateTimeField(
'opening_timestamp', [], format='%Y-%m-%d %H:%M:%S'
)
closing_timestamp = DateTimeField(
'closing_timestamp', [], format='%Y-%m-%d %H:%M:%S'
)
public_opening_timestamp = DateTimeField(
'public_opening_timestamp', [], format='%Y-%m-%d %H:%M:%S'
)
class MultiCheckboxField(SelectMultipleField):
"""A form containing multiple checkboxes."""
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class ImportForm(FlaskForm):
"""The form allowing to select which model to view."""
selected_f_names = MultiCheckboxField('selected_f_names')
class CreditForm(FlaskForm):
"""Credit form.
The credit form is used to acknowledge other submission when making a
RAMP submission after tracking the user activity.
Attributes
----------
note : str
Some notes regarding the credit.
self_credit : str
The credit given to the current submission.
name_credits : list
The name for the credits.
"""
note = StringField('submission_name')
self_credit = StringField('self credit')
name_credits = []
class AskForEventForm(FlaskForm):
"""Form to ask for a new event.
Attributes
----------
suffix : str
The suffix used for the event.
title : str
The event title.
n_students : int
The number of students that will take part in the event.
min_duration_between_submission_hour : int
The number of hour to wait between two submissions.
min_duration_between_submission_minute : int
The number of minute to wait between two submissions.
min_duration_between_submission_second : int
The number of second to wait between two submissions.
opening_timestamp : datetime
The date and time when the event is opening.
closing_timestamp : datetime
The date and time when the event is closing.
"""
suffix = StringField(
'event_suffix',
[validators.DataRequired(), validators.Length(max=20), _ascii_check,
_space_check]
)
title = StringField(
'event_title',
[validators.DataRequired(), validators.Length(max=80)]
)
n_students = IntegerField(
'n_students',
[validators.DataRequired(), validators.NumberRange(min=0)]
)
min_duration_between_submissions_hour = IntegerField(
'min_h', [validators.NumberRange(min=0)]
)
min_duration_between_submissions_minute = IntegerField(
'min_m', [validators.NumberRange(min=0, max=59)]
)
min_duration_between_submissions_second = IntegerField(
'min_s', [validators.NumberRange(min=0, max=59)]
)
opening_date = DateField(
'opening_date', [validators.DataRequired()], format='%Y-%m-%d'
)
closing_date = DateField(
'closing_date', [validators.DataRequired()], format='%Y-%m-%d'
)
class EmailForm(FlaskForm):
email = StringField(
'Email', validators=[validators.DataRequired(), validators.Email()]
)
class PasswordForm(FlaskForm):
password = PasswordField(
'Password', validators=[validators.DataRequired()]
)
|
[
"wtforms.ValidationError",
"wtforms.widgets.CheckboxInput",
"wtforms.validators.Length",
"wtforms.validators.Email",
"wtforms.BooleanField",
"wtforms.DateTimeField",
"wtforms.widgets.ListWidget",
"wtforms.FileField",
"wtforms.validators.NumberRange",
"wtforms.StringField",
"wtforms.validators.DataRequired"
] |
[((2361, 2388), 'wtforms.StringField', 'StringField', (['"""linkedin_url"""'], {}), "('linkedin_url')\n", (2372, 2388), False, 'from wtforms import StringField\n'), ((2407, 2433), 'wtforms.StringField', 'StringField', (['"""twitter_url"""'], {}), "('twitter_url')\n", (2418, 2433), False, 'from wtforms import StringField\n'), ((2453, 2480), 'wtforms.StringField', 'StringField', (['"""facebook_url"""'], {}), "('facebook_url')\n", (2464, 2480), False, 'from wtforms import StringField\n'), ((2498, 2523), 'wtforms.StringField', 'StringField', (['"""google_url"""'], {}), "('google_url')\n", (2509, 2523), False, 'from wtforms import StringField\n'), ((2541, 2566), 'wtforms.StringField', 'StringField', (['"""github_url"""'], {}), "('github_url')\n", (2552, 2566), False, 'from wtforms import StringField\n'), ((2585, 2611), 'wtforms.StringField', 'StringField', (['"""website_url"""'], {}), "('website_url')\n", (2596, 2611), False, 'from wtforms import StringField\n'), ((2622, 2640), 'wtforms.StringField', 'StringField', (['"""bio"""'], {}), "('bio')\n", (2633, 2640), False, 'from wtforms import StringField\n'), ((2660, 2674), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (2672, 2674), False, 'from wtforms import BooleanField\n'), ((4673, 4690), 'wtforms.FileField', 'FileField', (['"""file"""'], {}), "('file')\n", (4682, 4690), False, 'from wtforms import FileField\n'), ((5955, 5969), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (5967, 5969), False, 'from wtforms import BooleanField\n'), ((6000, 6014), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (6012, 6014), False, 'from wtforms import BooleanField\n'), ((6031, 6045), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (6043, 6045), False, 'from wtforms import BooleanField\n'), ((6072, 6086), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (6084, 6086), False, 'from wtforms import BooleanField\n'), ((6108, 6122), 'wtforms.BooleanField', 'BooleanField', ([], {}), '()\n', (6120, 6122), False, 'from wtforms import BooleanField\n'), ((6506, 6572), 'wtforms.DateTimeField', 'DateTimeField', (['"""opening_timestamp"""', '[]'], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "('opening_timestamp', [], format='%Y-%m-%d %H:%M:%S')\n", (6519, 6572), False, 'from wtforms import DateTimeField\n'), ((6611, 6677), 'wtforms.DateTimeField', 'DateTimeField', (['"""closing_timestamp"""', '[]'], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "('closing_timestamp', [], format='%Y-%m-%d %H:%M:%S')\n", (6624, 6677), False, 'from wtforms import DateTimeField\n'), ((6723, 6796), 'wtforms.DateTimeField', 'DateTimeField', (['"""public_opening_timestamp"""', '[]'], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "('public_opening_timestamp', [], format='%Y-%m-%d %H:%M:%S')\n", (6736, 6796), False, 'from wtforms import DateTimeField\n'), ((6922, 6952), 'wtforms.widgets.ListWidget', 'ListWidget', ([], {'prefix_label': '(False)'}), '(prefix_label=False)\n', (6932, 6952), False, 'from wtforms.widgets import ListWidget\n'), ((6973, 6988), 'wtforms.widgets.CheckboxInput', 'CheckboxInput', ([], {}), '()\n', (6986, 6988), False, 'from wtforms.widgets import CheckboxInput\n'), ((7559, 7589), 'wtforms.StringField', 'StringField', (['"""submission_name"""'], {}), "('submission_name')\n", (7570, 7589), False, 'from wtforms import StringField\n'), ((7608, 7634), 'wtforms.StringField', 'StringField', (['"""self credit"""'], {}), "('self credit')\n", (7619, 7634), False, 'from wtforms import StringField\n'), ((616, 662), 'wtforms.ValidationError', 'ValidationError', (['"""Field cannot contain space."""'], {}), "('Field cannot contain space.')\n", (631, 662), False, 'from wtforms import ValidationError\n'), ((776, 837), 'wtforms.ValidationError', 'ValidationError', (['"""Field cannot contain non-ascii characters."""'], {}), "('Field cannot contain non-ascii characters.')\n", (791, 837), False, 'from wtforms import ValidationError\n'), ((1053, 1078), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (1076, 1078), False, 'from wtforms import validators\n'), ((1123, 1148), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (1146, 1148), False, 'from wtforms import validators\n'), ((2058, 2083), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (2081, 2083), False, 'from wtforms import validators\n'), ((2085, 2117), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(1)', 'max': '(20)'}), '(min=1, max=20)\n', (2102, 2117), False, 'from wtforms import validators\n'), ((2184, 2209), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (2207, 2209), False, 'from wtforms import validators\n'), ((2252, 2277), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (2275, 2277), False, 'from wtforms import validators\n'), ((2314, 2339), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (2337, 2339), False, 'from wtforms import validators\n'), ((3624, 3649), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (3647, 3649), False, 'from wtforms import validators\n'), ((4346, 4371), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (4369, 4371), False, 'from wtforms import validators\n'), ((5867, 5892), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (5890, 5892), False, 'from wtforms import validators\n'), ((5894, 5919), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(80)'}), '(max=80)\n', (5911, 5919), False, 'from wtforms import validators\n'), ((6199, 6228), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)'}), '(min=0)\n', (6221, 6228), False, 'from wtforms import validators\n'), ((6314, 6351), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)', 'max': '(59)'}), '(min=0, max=59)\n', (6336, 6351), False, 'from wtforms import validators\n'), ((6437, 6474), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)', 'max': '(59)'}), '(min=0, max=59)\n', (6459, 6474), False, 'from wtforms import validators\n'), ((8511, 8536), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (8534, 8536), False, 'from wtforms import validators\n'), ((8538, 8563), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(20)'}), '(max=20)\n', (8555, 8563), False, 'from wtforms import validators\n'), ((8665, 8690), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (8688, 8690), False, 'from wtforms import validators\n'), ((8692, 8717), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(80)'}), '(max=80)\n', (8709, 8717), False, 'from wtforms import validators\n'), ((8787, 8812), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (8810, 8812), False, 'from wtforms import validators\n'), ((8814, 8843), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)'}), '(min=0)\n', (8836, 8843), False, 'from wtforms import validators\n'), ((8927, 8956), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)'}), '(min=0)\n', (8949, 8956), False, 'from wtforms import validators\n'), ((9042, 9079), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)', 'max': '(59)'}), '(min=0, max=59)\n', (9064, 9079), False, 'from wtforms import validators\n'), ((9165, 9202), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)', 'max': '(59)'}), '(min=0, max=59)\n', (9187, 9202), False, 'from wtforms import validators\n'), ((9265, 9290), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (9288, 9290), False, 'from wtforms import validators\n'), ((9372, 9397), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (9395, 9397), False, 'from wtforms import validators\n'), ((9508, 9533), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (9531, 9533), False, 'from wtforms import validators\n'), ((9535, 9553), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (9551, 9553), False, 'from wtforms import validators\n'), ((9656, 9681), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (9679, 9681), False, 'from wtforms import validators\n')]
|
import os
from mock import patch, MagicMock
from restkit import RequestError
from requests import RequestException
from gevent.event import Event
from openprocurement.medicines.registry.tests.base import BaseServersTest, config
from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge
from openprocurement.medicines.registry.client import ProxyClient
from openprocurement.medicines.registry.utils import file_exists, file_is_empty
from openprocurement.medicines.registry.tests.utils import rm_dir, custom_sleep
class TestBridgeWorker(BaseServersTest):
__test__ = True
def test_init(self):
self.worker = MedicinesRegistryBridge(config)
self.assertEqual(self.worker.delay, config.get('delay'))
self.assertTrue(isinstance(self.worker.proxy_client, ProxyClient))
self.assertTrue(self.worker.services_not_available.is_set())
self.assertEqual(self.worker.db.backend, 'redis')
self.assertEqual(self.worker.db.db_name, 0)
self.assertEqual(self.worker.db.port, '6379')
self.assertEqual(self.worker.db.host, '127.0.0.1')
def test_start_jobs(self):
self.worker = MedicinesRegistryBridge(config)
registry, json_former = [MagicMock(return_value=i) for i in range(2)]
self.worker.registry = registry
self.worker.json_former = json_former
self.worker._start_jobs()
self.assertTrue(registry.called)
self.assertTrue(json_former.called)
self.assertEqual(self.worker.jobs['registry'], 0)
self.assertEqual(self.worker.jobs['json_former'], 1)
def test_files_init(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.BASE_DIR = self.BASE_DIR
self.DATA_PATH = os.path.join(self.worker.BASE_DIR, 'data')
self.assertFalse(os.path.exists(self.DATA_PATH))
self.worker._files_init()
self.registry_xml = os.path.join(self.DATA_PATH, 'registry.xml')
self.inn_json = os.path.join(self.DATA_PATH, 'inn.json')
self.atc_json = os.path.join(self.DATA_PATH, 'atc.json')
self.inn2atc_json = os.path.join(self.DATA_PATH, 'inn2atc.json')
self.atc2inn_json = os.path.join(self.DATA_PATH, 'atc2inn.json')
self.DATA_PATH = os.path.join(self.worker.BASE_DIR, 'data')
self.assertTrue(os.path.exists(self.DATA_PATH))
self.assertTrue(file_exists(self.registry_xml))
self.assertTrue(file_exists(self.inn_json))
self.assertTrue(file_exists(self.atc_json))
self.assertTrue(file_exists(self.inn2atc_json))
self.assertTrue(file_exists(self.atc2inn_json))
self.assertTrue(file_is_empty(self.registry_xml))
rm_dir(self.DATA_PATH)
def test_proxy_server(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.sandbox_mode = 'True'
self.proxy_server.stop()
with self.assertRaises(RequestException):
self.worker.check_proxy()
self.proxy_server.start()
self.assertTrue(self.worker.check_proxy())
def test_proxy_server_mock(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.proxy_client = MagicMock(health=MagicMock(side_effect=RequestError()))
with self.assertRaises(RequestError):
self.worker.check_proxy()
self.worker.proxy_client = MagicMock(return_value=True)
self.assertTrue(self.worker.check_proxy())
def test_proxy_server_success(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.sandbox_mode = 'True'
self.assertTrue(self.worker.check_proxy())
def test_proxy_sandbox_mismatch(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.sandbox_mode = 'False'
with self.assertRaises(RequestException):
self.worker.check_proxy()
self.worker.sandbox_mode = 'True'
self.assertTrue(self.worker.check_proxy())
def test_check_services(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.services_not_available = MagicMock(set=MagicMock(), clear=MagicMock())
self.worker.check_services()
self.assertTrue(self.worker.services_not_available.clear.called)
self.worker.check_services()
self.assertFalse(self.worker.services_not_available.set.called)
def test_check_services_mock(self):
self.worker = MedicinesRegistryBridge(config)
self.worker = MagicMock()
self.worker.set_wake_up = MagicMock()
self.worker.set_sleep = MagicMock()
self.worker.check_services()
self.assertFalse(self.worker.set_wake_up.called)
self.worker.check_services()
self.assertFalse(self.worker.set_sleep.called)
def test_available_service(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.sandbox_mode = 'True'
self.proxy_server.stop()
with self.assertRaises(RequestException):
self.worker.check_proxy()
self.assertFalse(self.worker.all_available())
self.worker.check_services()
self.proxy_server.start()
self.assertTrue(self.worker.all_available())
def test_sleep_wakeup(self):
self.worker = MedicinesRegistryBridge(config)
self.assertTrue(isinstance(self.worker.services_not_available, Event))
self.assertEqual(self.worker.services_not_available.set(), None)
@patch('gevent.killall')
def test_run_exception(self, killlall):
self.worker = MedicinesRegistryBridge(config)
self.worker.delay = 1
self.worker._start_jobs = MagicMock(return_value={'a': 1})
self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception('test error'))
self.worker.run()
killlall.assert_called_once_with([1], timeout=5)
self.db.flushall()
@patch('gevent.killall')
@patch('gevent.sleep')
def test_run_exception(self, gevent_sleep, killlall):
self.worker = MedicinesRegistryBridge(config)
gevent_sleep.side_effect = custom_sleep
self.worker._start_jobs = MagicMock(return_value={'a': 1})
self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception('test error'))
with self.assertRaises(AttributeError):
self.worker.run()
with self.assertRaises(AssertionError):
killlall.assert_called_once_with([1], timeout=5)
self.db.flushall()
@patch('gevent.sleep')
def test_launch(self, gevent_sleep):
self.worker = MedicinesRegistryBridge(config)
self.worker.run = MagicMock()
self.worker.all_available = MagicMock(return_value=True)
self.worker.launch()
self.worker.run.assert_called_once()
self.db.flushall()
def test_check_and_revive_jobs(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.jobs = {'test': MagicMock(dead=MagicMock(return_value=True))}
self.worker.revive_job = MagicMock()
self.worker.check_and_revive_jobs()
self.worker.revive_job.assert_called_once_with('test')
def test_revive_job(self):
self.worker = MedicinesRegistryBridge(config)
self.worker.test = MagicMock()
self.worker.jobs = {'test': MagicMock(dead=MagicMock(return_value=True))}
self.worker.revive_job('test')
self.assertEqual(self.worker.jobs['test'].dead, False)
|
[
"openprocurement.medicines.registry.utils.file_is_empty",
"openprocurement.medicines.registry.utils.file_exists",
"os.path.exists",
"mock.patch",
"openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge",
"openprocurement.medicines.registry.tests.base.config.get",
"restkit.RequestError",
"openprocurement.medicines.registry.tests.utils.rm_dir",
"mock.MagicMock",
"os.path.join"
] |
[((5463, 5486), 'mock.patch', 'patch', (['"""gevent.killall"""'], {}), "('gevent.killall')\n", (5468, 5486), False, 'from mock import patch, MagicMock\n'), ((5890, 5913), 'mock.patch', 'patch', (['"""gevent.killall"""'], {}), "('gevent.killall')\n", (5895, 5913), False, 'from mock import patch, MagicMock\n'), ((5919, 5940), 'mock.patch', 'patch', (['"""gevent.sleep"""'], {}), "('gevent.sleep')\n", (5924, 5940), False, 'from mock import patch, MagicMock\n'), ((6481, 6502), 'mock.patch', 'patch', (['"""gevent.sleep"""'], {}), "('gevent.sleep')\n", (6486, 6502), False, 'from mock import patch, MagicMock\n'), ((656, 687), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (679, 687), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((1174, 1205), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (1197, 1205), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((1665, 1696), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (1688, 1696), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((1767, 1809), 'os.path.join', 'os.path.join', (['self.worker.BASE_DIR', '"""data"""'], {}), "(self.worker.BASE_DIR, 'data')\n", (1779, 1809), False, 'import os\n'), ((1931, 1975), 'os.path.join', 'os.path.join', (['self.DATA_PATH', '"""registry.xml"""'], {}), "(self.DATA_PATH, 'registry.xml')\n", (1943, 1975), False, 'import os\n'), ((2000, 2040), 'os.path.join', 'os.path.join', (['self.DATA_PATH', '"""inn.json"""'], {}), "(self.DATA_PATH, 'inn.json')\n", (2012, 2040), False, 'import os\n'), ((2065, 2105), 'os.path.join', 'os.path.join', (['self.DATA_PATH', '"""atc.json"""'], {}), "(self.DATA_PATH, 'atc.json')\n", (2077, 2105), False, 'import os\n'), ((2134, 2178), 'os.path.join', 'os.path.join', (['self.DATA_PATH', '"""inn2atc.json"""'], {}), "(self.DATA_PATH, 'inn2atc.json')\n", (2146, 2178), False, 'import os\n'), ((2207, 2251), 'os.path.join', 'os.path.join', (['self.DATA_PATH', '"""atc2inn.json"""'], {}), "(self.DATA_PATH, 'atc2inn.json')\n", (2219, 2251), False, 'import os\n'), ((2278, 2320), 'os.path.join', 'os.path.join', (['self.worker.BASE_DIR', '"""data"""'], {}), "(self.worker.BASE_DIR, 'data')\n", (2290, 2320), False, 'import os\n'), ((2716, 2738), 'openprocurement.medicines.registry.tests.utils.rm_dir', 'rm_dir', (['self.DATA_PATH'], {}), '(self.DATA_PATH)\n', (2722, 2738), False, 'from openprocurement.medicines.registry.tests.utils import rm_dir, custom_sleep\n'), ((2795, 2826), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (2818, 2826), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((3136, 3167), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (3159, 3167), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((3380, 3408), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (3389, 3408), False, 'from mock import patch, MagicMock\n'), ((3524, 3555), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (3547, 3555), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((3715, 3746), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (3738, 3746), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((4031, 4062), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (4054, 4062), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((4436, 4467), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (4459, 4467), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((4490, 4501), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4499, 4501), False, 'from mock import patch, MagicMock\n'), ((4536, 4547), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4545, 4547), False, 'from mock import patch, MagicMock\n'), ((4580, 4591), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4589, 4591), False, 'from mock import patch, MagicMock\n'), ((4839, 4870), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (4862, 4870), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((5272, 5303), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (5295, 5303), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((5553, 5584), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (5576, 5584), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((5649, 5681), 'mock.MagicMock', 'MagicMock', ([], {'return_value': "{'a': 1}"}), "(return_value={'a': 1})\n", (5658, 5681), False, 'from mock import patch, MagicMock\n'), ((6021, 6052), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (6044, 6052), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((6135, 6167), 'mock.MagicMock', 'MagicMock', ([], {'return_value': "{'a': 1}"}), "(return_value={'a': 1})\n", (6144, 6167), False, 'from mock import patch, MagicMock\n'), ((6566, 6597), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (6589, 6597), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((6624, 6635), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6633, 6635), False, 'from mock import patch, MagicMock\n'), ((6672, 6700), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6681, 6700), False, 'from mock import patch, MagicMock\n'), ((6868, 6899), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (6891, 6899), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((7015, 7026), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (7024, 7026), False, 'from mock import patch, MagicMock\n'), ((7188, 7219), 'openprocurement.medicines.registry.databridge.bridge.MedicinesRegistryBridge', 'MedicinesRegistryBridge', (['config'], {}), '(config)\n', (7211, 7219), False, 'from openprocurement.medicines.registry.databridge.bridge import MedicinesRegistryBridge\n'), ((7247, 7258), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (7256, 7258), False, 'from mock import patch, MagicMock\n'), ((732, 751), 'openprocurement.medicines.registry.tests.base.config.get', 'config.get', (['"""delay"""'], {}), "('delay')\n", (742, 751), False, 'from openprocurement.medicines.registry.tests.base import BaseServersTest, config\n'), ((1239, 1264), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'i'}), '(return_value=i)\n', (1248, 1264), False, 'from mock import patch, MagicMock\n'), ((1835, 1865), 'os.path.exists', 'os.path.exists', (['self.DATA_PATH'], {}), '(self.DATA_PATH)\n', (1849, 1865), False, 'import os\n'), ((2345, 2375), 'os.path.exists', 'os.path.exists', (['self.DATA_PATH'], {}), '(self.DATA_PATH)\n', (2359, 2375), False, 'import os\n'), ((2401, 2431), 'openprocurement.medicines.registry.utils.file_exists', 'file_exists', (['self.registry_xml'], {}), '(self.registry_xml)\n', (2412, 2431), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((2457, 2483), 'openprocurement.medicines.registry.utils.file_exists', 'file_exists', (['self.inn_json'], {}), '(self.inn_json)\n', (2468, 2483), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((2509, 2535), 'openprocurement.medicines.registry.utils.file_exists', 'file_exists', (['self.atc_json'], {}), '(self.atc_json)\n', (2520, 2535), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((2561, 2591), 'openprocurement.medicines.registry.utils.file_exists', 'file_exists', (['self.inn2atc_json'], {}), '(self.inn2atc_json)\n', (2572, 2591), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((2617, 2647), 'openprocurement.medicines.registry.utils.file_exists', 'file_exists', (['self.atc2inn_json'], {}), '(self.atc2inn_json)\n', (2628, 2647), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((2674, 2706), 'openprocurement.medicines.registry.utils.file_is_empty', 'file_is_empty', (['self.registry_xml'], {}), '(self.registry_xml)\n', (2687, 2706), False, 'from openprocurement.medicines.registry.utils import file_exists, file_is_empty\n'), ((4122, 4133), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4131, 4133), False, 'from mock import patch, MagicMock\n'), ((4141, 4152), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4150, 4152), False, 'from mock import patch, MagicMock\n'), ((6951, 6979), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6960, 6979), False, 'from mock import patch, MagicMock\n'), ((7310, 7338), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7319, 7338), False, 'from mock import patch, MagicMock\n'), ((3242, 3256), 'restkit.RequestError', 'RequestError', ([], {}), '()\n', (3254, 3256), False, 'from restkit import RequestError\n')]
|
# Copyright 2018 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import unittest
from pycadf import cadftaxonomy as taxonomy
from webob import Request
from . import fake
from watcher.watcher import OpenStackWatcherMiddleware
class TestWatcherMiddleware(unittest.TestCase):
def setUp(self):
self.watcher = OpenStackWatcherMiddleware(fake.FakeApp(), {})
def test_get_target_project_id_from_keystone_token_info(self):
token_info = {
'token': {
'catalog': [
{
'type': 'compute',
'id': '0123456789abcdef',
'name': 'nova',
'endpoints': [
{
'url': 'https://nova.local:8774/v2.1/194dfdddb6bc43e09701035b52edb0d9',
'interface': 'public',
'region': 'region',
'id': '0123456789abcdef'
}
]
}
]
}
}
self.watcher.service_type = 'compute'
self.assertEqual(
self.watcher.get_target_project_id_from_keystone_token_info(token_info),
'194dfdddb6bc43e09701035b52edb0d9',
"should be '194dfdddb6bc43e09701035b52edb0d9' as found in the service catalog"
)
def test_fail_get_target_project_id_from_keystone_token_info(self):
token_info = {
'token': {
'catalog': [
{
'type': 'compute',
'id': '0123456789abcdef',
'name': 'nova',
'endpoints': [
{
'url': 'https://nova.local:8774/v2.1',
'interface': 'public',
'region': 'region',
'id': '<KEY>'
}
]
}
]
}
}
self.watcher.service_type = 'compute'
self.assertEqual(
self.watcher.get_target_project_id_from_keystone_token_info(token_info),
taxonomy.UNKNOWN,
"should be 'unknown' as the service catalog contains no project scoped endpoint url"
)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((2992, 3007), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3005, 3007), False, 'import unittest\n')]
|
'''
Linux discovery tool..
'''
import socket
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
import platform
import os
import psutil
import json
import csv
import datetime
import argparse
def getListOfProcessSortedByMemory():
'''
Get list of running process sorted by Memory Usage
'''
listOfProcObjects = []
# Iterate over the list
for proc in psutil.process_iter():
try:
# Fetch process details as dict
pinfo = proc.as_dict(attrs=['pid', 'name', 'username'])
pinfo['vms'] = proc.memory_info().vms / (1024 * 1024)
# Append dict to list
listOfProcObjects.append(pinfo);
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
# Sort list of dict by key vms i.e. memory usage
listOfProcObjects = sorted(listOfProcObjects, key=lambda procObj: procObj['vms'], reverse=True)
return listOfProcObjects
def getProcessInfo():
'''
Get a list of all processes runnong on the server
'''
processNames = []
print('*** Create a list of all running processes ***')
listOfProcessNames = list()
# Iterate over all running processes
for proc in psutil.process_iter():
# Get process detail as dictionary
pInfoDict = proc.as_dict(attrs=['pid', 'name', 'cpu_percent'])
# Append dict of process detail in list
listOfProcessNames.append(pInfoDict)
# Iterate over the list of dictionary and print each elem
for elem in listOfProcessNames:
processNames.append(elem)
return processNames
def getTopProcesses():
'''
Get a list of the top 10 processes by memory usage
'''
listOfTopProcesses = []
print('*** Top 10 process with highest memory usage ***')
listOfRunningProcess = getListOfProcessSortedByMemory()
for elem in listOfRunningProcess[:10] :
listOfTopProcesses.append(elem)
return listOfTopProcesses
def getDiskInfo():
'''
Get disk information
'''
disk_info = psutil.disk_partitions()
print('*** Getting disk information ***')
disks = []
for disk in disk_info:
try:
disk = {
"name" : disk.device,
"mount_point" : disk.mountpoint,
"type" : disk.fstype,
"total_size" : psutil.disk_usage(disk.mountpoint).total,
"used_size" : psutil.disk_usage(disk.mountpoint).used,
"percent_used" : psutil.disk_usage(disk.mountpoint).percent
}
disks.append(disk)
except:
print('Error on Disk Info')
return disks
def getNetworkConnectionInfo():
'''
Get network information, for certain O/S this function will require sudo
'''
try:
print('*** Getting network information ***')
network_info = []
AD = "-"
AF_INET6 = getattr(socket, 'AF_INET6', object())
proto_map = {
(AF_INET, SOCK_STREAM): 'tcp',
(AF_INET6, SOCK_STREAM): 'tcp6',
(AF_INET, SOCK_DGRAM): 'udp',
(AF_INET6, SOCK_DGRAM): 'udp6',
}
templ = "%-5s %-30s %-30s %-13s %-6s %s"
proc_names = {}
for p in psutil.process_iter(['pid', 'name']):
proc_names[p.info['pid']] = p.info['name']
for c in psutil.net_connections(kind='inet'):
laddr = "%s:%s" % (c.laddr)
raddr = ""
if c.raddr:
raddr = "%s:%s" % (c.raddr)
name = proc_names.get(c.pid, '?') or ''
network = {
"protocol": proto_map[(c.family, c.type)],
"localaddr": laddr,
"raddr": raddr,
"status": c.status,
"pid": c.pid,
"program": name[:15]
}
network_info.append(network)
return network_info
except:
print('Error: You need to run network discovery as sudo.')
def getSystemInfo():
'''
Gets base system information
'''
print('*** Getting base system information ***')
host_system = platform.uname()
cpu_count = psutil.cpu_count()
memory_stats = psutil.virtual_memory()
memory_total = memory_stats.total / 1024
memory_used = memory_stats.used / 1024
memory_used_percent = memory_stats.percent
system_info = {
"system_node": host_system.node,
"system_release": host_system.release,
"system_version": host_system.version,
"system_machine": host_system.machine,
"system_processor": host_system.processor,
"physical_proc_count": psutil.cpu_count(logical=False),
"logical_proc_count": psutil.cpu_count(logical=True),
"system_total_mem": memory_total,
"system_mem_used": memory_used,
"system_mem_used_perc": memory_used_percent
}
return system_info
def fileNameGenerator(fileprefix, nodeName):
'''
Creates filename string
'''
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
extension = ".csv"
filename = "_".join([fileprefix, nodeName, suffix]) + extension
return filename
def writeObject(fileName, fieldDef, rowData):
'''
Write single object
'''
with open(fileName, 'w') as fileOutput:
writer = csv.DictWriter(fileOutput, fieldnames=fieldDef)
writer.writeheader()
writer.writerow(rowData)
def writeObjectArray(fileName, fieldDef, rowData):
'''
Writes object array
'''
with open(fileName, 'w') as fileOutput:
writer = csv.DictWriter(fileOutput, fieldnames=fieldDef)
writer.writeheader()
for row in rowData:
writer.writerow(row)
def systemInformation():
system_info = getSystemInfo()
nodeName = system_info['system_node']
fieldDef = ['system_node', 'system_release', 'system_version', 'system_machine',
'system_processor', 'physical_proc_count', 'logical_proc_count', 'system_total_mem',
'system_mem_used', 'system_mem_used_perc']
fileName = fileNameGenerator("systeminfo", system_info['system_node'])
writeObject(fileName, fieldDef, system_info)
def processInformation():
system_info = getSystemInfo()
nodeName = system_info['system_node']
process_info = getProcessInfo()
fieldDef = ['pid', 'cpu_percent', 'name']
fileName = fileNameGenerator("processinfo", nodeName)
writeObjectArray(fileName, fieldDef, process_info)
def topProcessInformation():
system_info = getSystemInfo()
nodeName = system_info['system_node']
top_process_info = getTopProcesses()
fieldDef = ['pid', 'username', 'name', 'vms']
fileName = fileNameGenerator('topinfo', nodeName)
writeObjectArray(fileName, fieldDef, top_process_info)
def diskInformation():
system_info = getSystemInfo()
nodeName = system_info['system_node']
disk_info = getDiskInfo()
fieldDef = ['name', 'mount_point', 'type', 'total_size', 'used_size', 'percent_used']
fileName = fileNameGenerator('diskinfo', nodeName)
writeObjectArray(fileName, fieldDef, disk_info)
def networkInformation():
try:
system_info = getSystemInfo()
nodeName = system_info['system_node']
connection_info = getNetworkConnectionInfo()
fieldDef = ['protocol', 'localaddr', 'raddr', 'status', 'pid', 'program']
fileName = fileNameGenerator('netinfo', nodeName)
writeObjectArray(fileName, fieldDef, connection_info)
except:
print("Error: Failed to collect network information")
def main(discoveryOption):
'''
Main function
'''
if discoveryOption == 'system':
systemInformation()
elif discoveryOption == 'process':
processInformation()
elif discoveryOption == 'top':
topProcessInformation()
elif discoveryOption == 'disk':
diskInformation()
elif discoveryOption == 'network':
networkInformation()
elif discoveryOption == 'all':
systemInformation()
processInformation()
topProcessInformation()
diskInformation()
networkInformation()
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='junkshon_linux_scan', description='Performs system level discovery based upon selected options and creates a CSV output')
parser.add_argument('-d', '--discovery', type=str, choices=['system', 'process', 'top', 'disk', 'network', 'all'], help='Options include: system : system level information, \
process : process information, \
top : top 10 processes by memory, \
disk : disk information, \
network : network connection information, \
all : discover all')
args = parser.parse_args()
main(args.discovery)
|
[
"psutil.disk_partitions",
"psutil.process_iter",
"psutil.virtual_memory",
"argparse.ArgumentParser",
"psutil.disk_usage",
"platform.uname",
"psutil.net_connections",
"datetime.datetime.now",
"psutil.cpu_count",
"csv.DictWriter"
] |
[((377, 398), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (396, 398), False, 'import psutil\n'), ((1209, 1230), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (1228, 1230), False, 'import psutil\n'), ((2041, 2065), 'psutil.disk_partitions', 'psutil.disk_partitions', ([], {}), '()\n', (2063, 2065), False, 'import psutil\n'), ((4122, 4138), 'platform.uname', 'platform.uname', ([], {}), '()\n', (4136, 4138), False, 'import platform\n'), ((4155, 4173), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (4171, 4173), False, 'import psutil\n'), ((4194, 4217), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (4215, 4217), False, 'import psutil\n'), ((8242, 8403), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""junkshon_linux_scan"""', 'description': '"""Performs system level discovery based upon selected options and creates a CSV output"""'}), "(prog='junkshon_linux_scan', description=\n 'Performs system level discovery based upon selected options and creates a CSV output'\n )\n", (8265, 8403), False, 'import argparse\n'), ((3231, 3267), 'psutil.process_iter', 'psutil.process_iter', (["['pid', 'name']"], {}), "(['pid', 'name'])\n", (3250, 3267), False, 'import psutil\n'), ((3341, 3376), 'psutil.net_connections', 'psutil.net_connections', ([], {'kind': '"""inet"""'}), "(kind='inet')\n", (3363, 3376), False, 'import psutil\n'), ((4648, 4679), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (4664, 4679), False, 'import psutil\n'), ((4711, 4741), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(True)'}), '(logical=True)\n', (4727, 4741), False, 'import psutil\n'), ((5333, 5380), 'csv.DictWriter', 'csv.DictWriter', (['fileOutput'], {'fieldnames': 'fieldDef'}), '(fileOutput, fieldnames=fieldDef)\n', (5347, 5380), False, 'import csv\n'), ((5605, 5652), 'csv.DictWriter', 'csv.DictWriter', (['fileOutput'], {'fieldnames': 'fieldDef'}), '(fileOutput, fieldnames=fieldDef)\n', (5619, 5652), False, 'import csv\n'), ((5022, 5045), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5043, 5045), False, 'import datetime\n'), ((2352, 2386), 'psutil.disk_usage', 'psutil.disk_usage', (['disk.mountpoint'], {}), '(disk.mountpoint)\n', (2369, 2386), False, 'import psutil\n'), ((2424, 2458), 'psutil.disk_usage', 'psutil.disk_usage', (['disk.mountpoint'], {}), '(disk.mountpoint)\n', (2441, 2458), False, 'import psutil\n'), ((2498, 2532), 'psutil.disk_usage', 'psutil.disk_usage', (['disk.mountpoint'], {}), '(disk.mountpoint)\n', (2515, 2532), False, 'import psutil\n')]
|
import pytest
from furrycorn import config
from furrycorn.model.common import resource_identifier
def test_mk(mocker):
obj = { 'type': 'foo', 'id': '1234' } # mock
cfg = config.mk('https://api', 'ABCDEF')
result = resource_identifier.mk(obj, cfg)
assert type(result) is resource_identifier.ResourceId
|
[
"furrycorn.model.common.resource_identifier.mk",
"furrycorn.config.mk"
] |
[((181, 215), 'furrycorn.config.mk', 'config.mk', (['"""https://api"""', '"""ABCDEF"""'], {}), "('https://api', 'ABCDEF')\n", (190, 215), False, 'from furrycorn import config\n'), ((230, 262), 'furrycorn.model.common.resource_identifier.mk', 'resource_identifier.mk', (['obj', 'cfg'], {}), '(obj, cfg)\n', (252, 262), False, 'from furrycorn.model.common import resource_identifier\n')]
|
import tensorflow.keras as keras
import tensorflow as tf
from keras import layers, models
class SEBlock(keras.Model):
def __init__(self, ratio=16):
super().__init__()
self.ratio = ratio
self.gap = layers.GlobalAveragePooling2D()
def build(self, input_shape):
filters = input_shape[-1]
self.reshape = layers.Reshape((1, 1, filters))
self.fc1 = layers.Dense(
filters // self.ratio, kernel_initializer='he_normal', use_bias=False, activation='relu')
self.fc2 = layers.Dense(
filters, kernel_initializer='he_normal', use_bias=False, activation='sigmoid')
def call(self, input):
x = self.gap(input)
x = self.reshape(x)
x = self.fc1(x)
x = self.fc2(x)
return tf.multiply(x, input)
def get_config(self):
return{
'ratio': self.ratio
}
class SEResNeXtUnit(keras.Model):
def __init__(self, filters, strides, cardinality=32):
super().__init__()
self.filters = filters
self.strides = strides
self.conv1x1_1 = layers.Conv2D(filters, 1, 1)
self.bn1 = layers.BatchNormalization()
self.conv3x3 = layers.Conv2D(
filters, 3, strides, groups=cardinality, padding='same')
self.bn2 = layers.BatchNormalization()
self.conv1x1_2 = layers.Conv2D(filters*2, 1, 1)
self.bn3 = layers.BatchNormalization()
self.seBlock = SEBlock()
def build(self, input_shape):
input_filters = input_shape[-1]
if input_filters != self.filters*2:
self.shortcut = models.Sequential([
layers.Conv2D(self.filters*2, 1, self.strides),
layers.BatchNormalization()
])
else:
self.shortcut = models.Sequential()
def call(self, input):
x = input
x = self.conv1x1_1(x)
x = self.bn1(x)
x = tf.nn.relu(x)
x = self.conv3x3(x)
x = self.bn2(x)
x = tf.nn.relu(x)
x = self.conv1x1_2(x)
x = self.bn3(x)
x = tf.nn.relu(x)
# Add SEBlock here
x = self.seBlock(x)
shortcut = self.shortcut(input)
return tf.nn.relu(tf.add(x, shortcut))
def get_config(self):
return{
'filters': self.filters,
'strides': self.strides
}
class Encoder(keras.Model):
def __init__(self, channels, repeat, strides):
super().__init__()
self.resBlocks = keras.Sequential()
self.resBlocks.add(SEResNeXtUnit(channels, strides))
for _ in range(1, repeat):
self.resBlocks.add(SEResNeXtUnit(channels, strides=1))
def call(self, inputs):
return self.resBlocks(inputs)
def get_config(self):
return {}
class ChannelAttention(keras.Model):
def __init__(self, reduction):
super().__init__()
self.globalMaxPool = layers.GlobalMaxPooling2D(keepdims=True)
self.globalAvgPool = layers.GlobalAveragePooling2D(keepdims=True)
self.reduction = reduction
def build(self, input_shape):
self.fc = keras.Sequential([
layers.Conv2D(input_shape[-1]//self.reduction, 3, padding='same'),
layers.ReLU(),
layers.Conv2D(input_shape[-1], 1, padding='valid')
])
def call(self, inputs):
x1 = self.globalMaxPool(inputs)
x2 = self.globalAvgPool(inputs)
x1 = self.fc(x1)
x2 = self.fc(x2)
x = tf.nn.sigmoid(layers.add([x1, x2]))
return x
class SpatialAttention(keras.Model):
def __init__(self):
super().__init__()
self.conv3x3 = layers.Conv2D(1, 3, padding='same')
def call(self, inputs):
# https://github.com/kobiso/CBAM-tensorflow/blob/master/attention_module.py#L95
x1 = tf.math.reduce_max(inputs, axis=3, keepdims=True)
x2 = tf.math.reduce_mean(inputs, axis=3, keepdims=True)
x = tf.concat([x1, x2], 3)
x = self.conv3x3(x)
x = tf.nn.sigmoid(x)
return x
class CBAM(keras.Model):
def __init__(self, reduction):
super().__init__()
self.channelAttention = ChannelAttention(reduction)
self.spaialAttention = SpatialAttention()
def call(self, inputs):
x = inputs * self.channelAttention(inputs)
x = x * self.spaialAttention(x)
return x
class Decoder(keras.Model):
def __init__(self, channels, upsample=True):
super().__init__()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
if upsample:
self.upsample = keras.Sequential([
layers.UpSampling2D(2, interpolation='nearest')
])
else:
self.upsample = keras.Sequential()
self.conv3x3_2 = layers.Conv2D(
channels, 3, padding='same', use_bias=False)
self.conv1x1 = layers.Conv2D(channels, 1, use_bias=False)
self.cbam = CBAM(reduction=16)
def build(self, input_shape):
self.conv3x3_1 = layers.Conv2D(
input_shape[-1], 3, padding='same', use_bias=False)
def call(self, inputs):
x = self.bn1(inputs)
x = tf.nn.relu(x)
x = self.upsample(x)
x = self.conv3x3_1(x)
x = self.bn2(x)
x = tf.nn.relu(x)
x = self.conv3x3_2(x)
x = self.cbam(x)
shortcut = self.conv1x1(self.upsample(inputs))
x += shortcut
return x
def get_config(self):
return {}
def SEResNeXt101UNet(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Encode by ResNet34
x = layers.Conv2D(64, 7, strides=2, padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = tf.nn.relu(x)
x0 = layers.MaxPooling2D(3, strides=2, padding='same')(x)
# ResNet34
x1 = Encoder(128, 3, strides=1)(x0)
x2 = Encoder(256, 4, strides=2)(x1)
x3 = Encoder(512, 23, strides=2)(x2)
x4 = Encoder(1024, 3, strides=2)(x3)
# Center Block
y5 = layers.Conv2D(512, 3, padding='same', use_bias=False)(x4)
# Decode
y4 = Decoder(64)(layers.Concatenate(axis=3)([x4, y5]))
y3 = Decoder(64)(layers.Concatenate(axis=3)([x3, y4]))
y2 = Decoder(64)(layers.Concatenate(axis=3)([x2, y3]))
y1 = Decoder(64)(layers.Concatenate(axis=3)([x1, y2]))
y0 = Decoder(64)(y1)
# Hypercolumn
y4 = layers.UpSampling2D(16, interpolation='bilinear')(y4)
y3 = layers.UpSampling2D(8, interpolation='bilinear')(y3)
y2 = layers.UpSampling2D(4, interpolation='bilinear')(y2)
y1 = layers.UpSampling2D(2, interpolation='bilinear')(y1)
hypercolumn = layers.Concatenate(axis=3)([y0, y1, y2, y3, y4])
# Final conv
outputs = keras.Sequential([
layers.Conv2D(64, 3, padding='same', use_bias=False),
layers.ELU(),
layers.Conv2D(num_classes, 1, use_bias=False)
])(hypercolumn)
outputs = tf.nn.softmax(outputs)
return keras.Model(inputs, outputs)
if __name__ == '__main__':
m = SEResNeXt101UNet((160, 160, 3), 4)
m.summary()
|
[
"tensorflow.multiply",
"tensorflow.keras.Sequential",
"keras.layers.Reshape",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.keras.Input",
"tensorflow.concat",
"keras.layers.ELU",
"keras.layers.GlobalAveragePooling2D",
"keras.layers.GlobalMaxPooling2D",
"keras.layers.MaxPooling2D",
"tensorflow.math.reduce_max",
"tensorflow.add",
"tensorflow.keras.Model",
"keras.layers.Concatenate",
"keras.layers.ReLU",
"keras.layers.Conv2D",
"keras.layers.UpSampling2D",
"keras.layers.BatchNormalization",
"tensorflow.math.reduce_mean",
"keras.layers.add",
"keras.layers.Dense",
"keras.models.Sequential",
"tensorflow.nn.sigmoid"
] |
[((5605, 5635), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (5616, 5635), True, 'import tensorflow.keras as keras\n'), ((5788, 5801), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5798, 5801), True, 'import tensorflow as tf\n'), ((6963, 6985), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['outputs'], {}), '(outputs)\n', (6976, 6985), True, 'import tensorflow as tf\n'), ((6997, 7025), 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (7008, 7025), True, 'import tensorflow.keras as keras\n'), ((227, 258), 'keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (256, 258), False, 'from keras import layers, models\n'), ((351, 382), 'keras.layers.Reshape', 'layers.Reshape', (['(1, 1, filters)'], {}), '((1, 1, filters))\n', (365, 382), False, 'from keras import layers, models\n'), ((402, 508), 'keras.layers.Dense', 'layers.Dense', (['(filters // self.ratio)'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)', 'activation': '"""relu"""'}), "(filters // self.ratio, kernel_initializer='he_normal',\n use_bias=False, activation='relu')\n", (414, 508), False, 'from keras import layers, models\n'), ((537, 632), 'keras.layers.Dense', 'layers.Dense', (['filters'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)', 'activation': '"""sigmoid"""'}), "(filters, kernel_initializer='he_normal', use_bias=False,\n activation='sigmoid')\n", (549, 632), False, 'from keras import layers, models\n'), ((789, 810), 'tensorflow.multiply', 'tf.multiply', (['x', 'input'], {}), '(x, input)\n', (800, 810), True, 'import tensorflow as tf\n'), ((1105, 1133), 'keras.layers.Conv2D', 'layers.Conv2D', (['filters', '(1)', '(1)'], {}), '(filters, 1, 1)\n', (1118, 1133), False, 'from keras import layers, models\n'), ((1153, 1180), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1178, 1180), False, 'from keras import layers, models\n'), ((1205, 1275), 'keras.layers.Conv2D', 'layers.Conv2D', (['filters', '(3)', 'strides'], {'groups': 'cardinality', 'padding': '"""same"""'}), "(filters, 3, strides, groups=cardinality, padding='same')\n", (1218, 1275), False, 'from keras import layers, models\n'), ((1308, 1335), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1333, 1335), False, 'from keras import layers, models\n'), ((1362, 1394), 'keras.layers.Conv2D', 'layers.Conv2D', (['(filters * 2)', '(1)', '(1)'], {}), '(filters * 2, 1, 1)\n', (1375, 1394), False, 'from keras import layers, models\n'), ((1412, 1439), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1437, 1439), False, 'from keras import layers, models\n'), ((1938, 1951), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1948, 1951), True, 'import tensorflow as tf\n'), ((2017, 2030), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (2027, 2030), True, 'import tensorflow as tf\n'), ((2098, 2111), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (2108, 2111), True, 'import tensorflow as tf\n'), ((2515, 2533), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (2531, 2533), True, 'import tensorflow.keras as keras\n'), ((2939, 2979), 'keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {'keepdims': '(True)'}), '(keepdims=True)\n', (2964, 2979), False, 'from keras import layers, models\n'), ((3009, 3053), 'keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {'keepdims': '(True)'}), '(keepdims=True)\n', (3038, 3053), False, 'from keras import layers, models\n'), ((3678, 3713), 'keras.layers.Conv2D', 'layers.Conv2D', (['(1)', '(3)'], {'padding': '"""same"""'}), "(1, 3, padding='same')\n", (3691, 3713), False, 'from keras import layers, models\n'), ((3844, 3893), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['inputs'], {'axis': '(3)', 'keepdims': '(True)'}), '(inputs, axis=3, keepdims=True)\n', (3862, 3893), True, 'import tensorflow as tf\n'), ((3907, 3957), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['inputs'], {'axis': '(3)', 'keepdims': '(True)'}), '(inputs, axis=3, keepdims=True)\n', (3926, 3957), True, 'import tensorflow as tf\n'), ((3970, 3992), 'tensorflow.concat', 'tf.concat', (['[x1, x2]', '(3)'], {}), '([x1, x2], 3)\n', (3979, 3992), True, 'import tensorflow as tf\n'), ((4033, 4049), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (4046, 4049), True, 'import tensorflow as tf\n'), ((4528, 4555), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4553, 4555), False, 'from keras import layers, models\n'), ((4575, 4602), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4600, 4602), False, 'from keras import layers, models\n'), ((4838, 4896), 'keras.layers.Conv2D', 'layers.Conv2D', (['channels', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(channels, 3, padding='same', use_bias=False)\n", (4851, 4896), False, 'from keras import layers, models\n'), ((4933, 4975), 'keras.layers.Conv2D', 'layers.Conv2D', (['channels', '(1)'], {'use_bias': '(False)'}), '(channels, 1, use_bias=False)\n', (4946, 4975), False, 'from keras import layers, models\n'), ((5075, 5140), 'keras.layers.Conv2D', 'layers.Conv2D', (['input_shape[-1]', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(input_shape[-1], 3, padding='same', use_bias=False)\n", (5088, 5140), False, 'from keras import layers, models\n'), ((5224, 5237), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5234, 5237), True, 'import tensorflow as tf\n'), ((5333, 5346), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5343, 5346), True, 'import tensorflow as tf\n'), ((5669, 5732), 'keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(7)'], {'strides': '(2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(64, 7, strides=2, padding='same', use_bias=False)\n", (5682, 5732), False, 'from keras import layers, models\n'), ((5749, 5776), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5774, 5776), False, 'from keras import layers, models\n'), ((5811, 5860), 'keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""'}), "(3, strides=2, padding='same')\n", (5830, 5860), False, 'from keras import layers, models\n'), ((6071, 6124), 'keras.layers.Conv2D', 'layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(512, 3, padding='same', use_bias=False)\n", (6084, 6124), False, 'from keras import layers, models\n'), ((6432, 6481), 'keras.layers.UpSampling2D', 'layers.UpSampling2D', (['(16)'], {'interpolation': '"""bilinear"""'}), "(16, interpolation='bilinear')\n", (6451, 6481), False, 'from keras import layers, models\n'), ((6495, 6543), 'keras.layers.UpSampling2D', 'layers.UpSampling2D', (['(8)'], {'interpolation': '"""bilinear"""'}), "(8, interpolation='bilinear')\n", (6514, 6543), False, 'from keras import layers, models\n'), ((6557, 6605), 'keras.layers.UpSampling2D', 'layers.UpSampling2D', (['(4)'], {'interpolation': '"""bilinear"""'}), "(4, interpolation='bilinear')\n", (6576, 6605), False, 'from keras import layers, models\n'), ((6619, 6667), 'keras.layers.UpSampling2D', 'layers.UpSampling2D', (['(2)'], {'interpolation': '"""bilinear"""'}), "(2, interpolation='bilinear')\n", (6638, 6667), False, 'from keras import layers, models\n'), ((6690, 6716), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (6708, 6716), False, 'from keras import layers, models\n'), ((1806, 1825), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (1823, 1825), False, 'from keras import layers, models\n'), ((2235, 2254), 'tensorflow.add', 'tf.add', (['x', 'shortcut'], {}), '(x, shortcut)\n', (2241, 2254), True, 'import tensorflow as tf\n'), ((3526, 3546), 'keras.layers.add', 'layers.add', (['[x1, x2]'], {}), '([x1, x2])\n', (3536, 3546), False, 'from keras import layers, models\n'), ((4793, 4811), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (4809, 4811), True, 'import tensorflow.keras as keras\n'), ((6164, 6190), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (6182, 6190), False, 'from keras import layers, models\n'), ((6223, 6249), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (6241, 6249), False, 'from keras import layers, models\n'), ((6282, 6308), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (6300, 6308), False, 'from keras import layers, models\n'), ((6341, 6367), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (6359, 6367), False, 'from keras import layers, models\n'), ((3173, 3240), 'keras.layers.Conv2D', 'layers.Conv2D', (['(input_shape[-1] // self.reduction)', '(3)'], {'padding': '"""same"""'}), "(input_shape[-1] // self.reduction, 3, padding='same')\n", (3186, 3240), False, 'from keras import layers, models\n'), ((3252, 3265), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (3263, 3265), False, 'from keras import layers, models\n'), ((3279, 3329), 'keras.layers.Conv2D', 'layers.Conv2D', (['input_shape[-1]', '(1)'], {'padding': '"""valid"""'}), "(input_shape[-1], 1, padding='valid')\n", (3292, 3329), False, 'from keras import layers, models\n'), ((6798, 6850), 'keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(64, 3, padding='same', use_bias=False)\n", (6811, 6850), False, 'from keras import layers, models\n'), ((6860, 6872), 'keras.layers.ELU', 'layers.ELU', ([], {}), '()\n', (6870, 6872), False, 'from keras import layers, models\n'), ((6882, 6927), 'keras.layers.Conv2D', 'layers.Conv2D', (['num_classes', '(1)'], {'use_bias': '(False)'}), '(num_classes, 1, use_bias=False)\n', (6895, 6927), False, 'from keras import layers, models\n'), ((1657, 1705), 'keras.layers.Conv2D', 'layers.Conv2D', (['(self.filters * 2)', '(1)', 'self.strides'], {}), '(self.filters * 2, 1, self.strides)\n', (1670, 1705), False, 'from keras import layers, models\n'), ((1721, 1748), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1746, 1748), False, 'from keras import layers, models\n'), ((4688, 4735), 'keras.layers.UpSampling2D', 'layers.UpSampling2D', (['(2)'], {'interpolation': '"""nearest"""'}), "(2, interpolation='nearest')\n", (4707, 4735), False, 'from keras import layers, models\n')]
|
'''
Classes for BMI decoding using the Kalman filter.
'''
import numpy as np
from scipy.io import loadmat
from . import bmi
import pickle
import re
class KalmanFilter(bmi.GaussianStateHMM):
"""
Low-level KF, agnostic to application
Model:
x_{t+1} = Ax_t + w_t; w_t ~ N(0, W)
y_t = Cx_t + q_t; q_t ~ N(0, Q)
"""
model_attrs = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv', 'C_xpose_Q_inv_C']
attrs_to_pickle = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv', 'C_xpose_Q_inv_C', 'R', 'S', 'T', 'ESS']
def __init__(self, A=None, W=None, C=None, Q=None, is_stochastic=None):
'''
Constructor for KalmanFilter
Parameters
----------
A : np.mat, optional
Model of state transition matrix
W : np.mat, optional
Model of process noise covariance
C : np.mat, optional
Model of conditional distribution between observations and hidden state
Q : np.mat, optional
Model of observation noise covariance
is_stochastic : np.array, optional
Array of booleans specifying for each state whether it is stochastic.
If 'None' specified, all states are assumed to be stochastic
Returns
-------
KalmanFilter instance
'''
if A is None and W is None and C is None and Q is None:
## This condition should only be true in the unpickling phase
pass
else:
self.A = np.mat(A)
self.W = np.mat(W)
self.C = np.mat(C)
self.Q = np.mat(Q)
if is_stochastic is None:
n_states = self.A.shape[0]
self.is_stochastic = np.ones(n_states, dtype=bool)
else:
self.is_stochastic = is_stochastic
self.state_noise = bmi.GaussianState(0.0, self.W)
self.obs_noise = bmi.GaussianState(0.0, self.Q)
self._pickle_init()
def _pickle_init(self):
"""Code common to unpickling and initialization
"""
nS = self.A.shape[0]
offset_row = np.zeros(nS)
offset_row[-1] = 1
self.include_offset = np.array_equal(np.array(self.A)[-1, :], offset_row)
self.alt = nS < self.C.shape[0] # No. of states less than no. of observations
attrs = list(self.__dict__.keys())
if not 'C_xpose_Q_inv_C' in attrs:
C, Q = self.C, self.Q
self.C_xpose_Q_inv = C.T * np.linalg.pinv(Q)
self.C_xpose_Q_inv_C = C.T * np.linalg.pinv(Q) * C
try:
self.is_stochastic
except:
n_states = self.A.shape[0]
self.is_stochastic = np.ones(n_states, dtype=bool)
def _obs_prob(self, state):
'''
Predict the observations based on the model parameters:
y_est = C*x_t + Q
Parameters
----------
state : bmi.GaussianState instance
The model-predicted state
Returns
-------
bmi.GaussianState instance
the model-predicted observations
'''
return self.C * state + self.obs_noise
def _forward_infer(self, st, obs_t, Bu=None, u=None, x_target=None, F=None, obs_is_control_independent=True, **kwargs):
'''
Estimate p(x_t | ..., y_{t-1}, y_t)
Parameters
----------
st : GaussianState
Current estimate (mean and cov) of hidden state
obs_t : np.mat of shape (N, 1)
ARG_DESCR
Bu : DATA_TYPE, optional, default=None
ARG_DESCR
u : DATA_TYPE, optional, default=None
ARG_DESCR
x_target : DATA_TYPE, optional, default=None
ARG_DESCR
obs_is_control_independent : bool, optional, default=True
ARG_DESCR
kwargs : optional kwargs
ARG_DESCR
Returns
-------
GaussianState
New state estimate incorporating the most recent observation
'''
using_control_input = (Bu is not None) or (u is not None) or (x_target is not None)
pred_state = self._ssm_pred(st, target_state=x_target, Bu=Bu, u=u, F=F)
C, Q = self.C, self.Q
P = pred_state.cov
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = P*(I - D*P*(I + D*P).I)*D
F = (I - KC)*self.A
post_state = pred_state
if obs_is_control_independent and using_control_input:
post_state.mean += -KC*self.A*st.mean + K*obs_t
else:
post_state.mean += -KC*pred_state.mean + K*obs_t
post_state.cov = (I - KC) * P
return post_state
def set_state_cov(self, n_steps):
C, Q = self.C, self.Q
A, W = self.A, self.W
P = self.state.cov
for k in range(n_steps):
P = A*P*A.T + W
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = P*(I - D*P*(I + D*P).I)*D
P = (I - KC) * P
return P
def _calc_kalman_gain(self, P):
'''
Calculate Kalman gain using the 'alternate' definition
Parameters
----------
P : np.matrix
Prediciton covariance matrix, i.e., cov(x_{t+1} | y_1, \cdots, y_t)
Returns
-------
K : np.matrix
Kalman gain matrix for the input next state prediciton covariance.
'''
nX = P.shape[0]
I = np.mat(np.eye(nX))
D = self.C_xpose_Q_inv_C
L = self.C_xpose_Q_inv
K = P * (I - D*P*(I + D*P).I) * L
return K
def get_sskf(self, tol=1e-15, return_P=False, dtype=np.array, max_iter=4000,
verbose=False, return_Khist=False, alt=True):
"""Calculate the steady-state KF matrices
value of P returned is the posterior error cov, i.e. P_{t|t}
Parameters
----------
Returns
-------
"""
A, W, C, Q = np.mat(self.A), np.mat(self.W), np.mat(self.C), np.mat(self.Q)
nS = A.shape[0]
P = np.mat(np.zeros([nS, nS]))
I = np.mat(np.eye(nS))
D = self.C_xpose_Q_inv_C
last_K = np.mat(np.ones(C.T.shape))*np.inf
K = np.mat(np.ones(C.T.shape))*0
K_hist = []
iter_idx = 0
last_P = None
while np.linalg.norm(K-last_K) > tol and iter_idx < max_iter:
P = A*P*A.T + W
last_K = K
K = self._calc_kalman_gain(P)
K_hist.append(K)
KC = P*(I - D*P*(I + D*P).I)*D
last_P = P
P -= KC*P;
iter_idx += 1
if verbose:
print(("Converged in %d iterations--error: %g" % (iter_idx, np.linalg.norm(K-last_K))))
n_state_vars, n_state_vars = A.shape
F = (np.mat(np.eye(n_state_vars, n_state_vars)) - KC) * A
if return_P and return_Khist:
return dtype(F), dtype(K), dtype(last_P), K_hist
elif return_P:
return dtype(F), dtype(K), dtype(last_P)
elif return_Khist:
return dtype(F), dtype(K), K_hist
else:
return dtype(F), dtype(K)
def get_kalman_gain_seq(self, N=1000, tol=1e-10, verbose=False):
'''
Calculate K_t for times {0, 1, ..., N}
Parameters
----------
N : int, optional
Number of steps to calculate Kalman gain for, default = 1000
tol : float, optional
Tolerance on K matrix convergence, default = 1e-10
verbose : bool, optional
Print intermediate/debugging information if true, default=False
Returns
-------
list
[K_0, K_1, ..., K_{N-1}]
'''
A, W, H, Q = np.mat(self.kf.A), np.mat(self.kf.W), np.mat(self.kf.H), np.mat(self.kf.Q)
P = np.mat( np.zeros(A.shape) )
K = [None]*N
ss_idx = None # index at which K is steady-state (within tol)
for n in range(N):
if not ss_idx == None and n > ss_idx:
K[n] = K[ss_idx]
else:
P = A*P*A.T + W
K[n] = (P*H.T)*linalg.pinv(H*P*H.T + Q);
P -= K[n]*H*P;
if n > 0 and np.linalg.norm(K[n] - K[n-1]) < tol:
ss_idx = n
if verbose:
print(("breaking after %d iterations" % n))
return K, ss_idx
def get_kf_system_mats(self, T):
"""
KF system matrices
x_{t+1} = F_t*x_t + K_t*y_t
Parameters
----------
T : int
Number of system iterations to calculate (F_t, K_t)
Returns
-------
tuple of lists
Each element of the tuple is (F_t, K_t) for a given 't'
"""
F = [None]*T
K, ss_idx = self.get_kalman_gain_seq(N=T, verbose=False)
nX = self.kf.A.shape[0]
I = np.mat(np.eye(nX))
for t in range(T):
if t > ss_idx: F[t] = F[ss_idx]
else: F[t] = (I - K[t]*self.kf.H)*self.kf.A
return F, K
@classmethod
def MLE_obs_model(self, hidden_state, obs, include_offset=True, drives_obs=None,
regularizer=None):
"""
Unconstrained ML estimator of {C, Q} given observations and
the corresponding hidden states
Parameters
----------
include_offset : bool, optional, default=True
A row of all 1's is added as the last row of hidden_state if one is not already present
Returns
-------
"""
assert hidden_state.shape[1] == obs.shape[1], "different numbers of time samples: %s vs %s" % (str(hidden_state.shape), str(obs.shape))
if isinstance(hidden_state, np.ma.core.MaskedArray):
mask = ~hidden_state.mask[0,:] # NOTE THE INVERTER
inds = np.nonzero([ mask[k]*mask[k+1] for k in range(len(mask)-1)])[0]
X = np.mat(hidden_state[:,mask])
T = len(np.nonzero(mask)[0])
Y = np.mat(obs[:,mask])
if include_offset:
if not np.all(X[-1,:] == 1):
X = np.vstack([ X, np.ones([1,T]) ])
else:
num_hidden_state, T = hidden_state.shape
X = np.mat(hidden_state)
if include_offset:
if not np.all(X[-1,:] == 1):
X = np.vstack([ X, np.ones([1,T]) ])
Y = np.mat(obs)
n_states = X.shape[0]
if not drives_obs is None:
X = X[drives_obs, :]
# ML estimate of C and Q
if regularizer is None:
C = np.mat(np.linalg.lstsq(X.T, Y.T)[0].T)
else:
x = X.T
y = Y.T
XtX_lamb = x.T.dot(x) + regularizer * np.eye(x.shape[1])
XtY = x.T.dot(y)
C = np.linalg.solve(XtX_lamb, XtY).T
Q = np.cov(Y - C*X, bias=1)
if np.ndim(Q) == 0:
# if "obs" only has 1 feature, Q might get collapsed to a scalar
Q = np.mat(Q.reshape(1,1))
if not drives_obs is None:
n_obs = C.shape[0]
C_tmp = np.zeros([n_obs, n_states])
C_tmp[:,drives_obs] = C
C = C_tmp
return (C, Q)
@classmethod
def MLE_state_space_model(self, hidden_state, include_offset=True):
'''
Train state space model for KF from fully observed hidden state
Parameters
----------
hidden_state : np.ndarray of shape (N, T)
N = dimensionality of state vector, T = number of observations
include_offset : boolean, optional, default=False
if True, append a "1" to each state vector to add an offset term into the
regression
Returns
-------
A : np.ndarray of shape (N, N)
W : np.ndarray of shape (N, N)
'''
X = hidden_state
T = hidden_state.shape[1]
if include_offset:
X = np.vstack([ X, np.ones([1,T]) ])
X1 = X[:,:-1]
X2 = X[:,1:]
A = np.linalg.lstsq(X1.T, X2.T)[0].T
W = np.cov(X2 - np.dot(A, X1), bias=1)
return A, W
def set_steady_state_pred_cov(self):
'''
Calculate the steady-state prediction covariance and set the current state prediction covariance to the steady-state value
'''
A, W, C, Q = np.mat(self.A), np.mat(self.W), np.mat(self.C), np.mat(self.Q)
D = self.C_xpose_Q_inv_C
nS = A.shape[0]
P = np.mat(np.zeros([nS, nS]))
I = np.mat(np.eye(nS))
last_K = np.mat(np.ones(C.T.shape))*np.inf
K = np.mat(np.ones(C.T.shape))*0
iter_idx = 0
for iter_idx in range(40):
P = A*P*A.T + W
last_K = K
KC = P*(I - D*P*(I + D*P).I)*D
P -= KC*P;
# TODO fix
P[0:3, 0:3] = 0
F, K = self.get_sskf()
F = (I - KC)*A
self._init_state(init_state=self.state.mean, init_cov=P)
def get_K_null(self):
'''
$$y_{null} = K_{null} * y_t$$ gives the "null" component of the spike inputs, i.e. $$K_t*y_{null} = 0_{N\times 1}$$
Parameters
----------
Returns
-------
'''
F, K = self.get_sskf()
K = np.mat(K)
n_neurons = K.shape[1]
K_null = np.eye(n_neurons) - np.linalg.pinv(K) * K
return K_null
class KalmanFilterDriftCorrection(KalmanFilter):
attrs_to_pickle = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv',
'C_xpose_Q_inv_C', 'R', 'S', 'T', 'ESS', 'drift_corr','prev_drift_corr']
noise_threshold = 96.*3.5
def _init_state(self):
if hasattr(self, 'prev_drift_corr'):
self.drift_corr = self.prev_drift_corr.copy()
print(('prev drift corr', np.mean(self.prev_drift_corr)))
else:
self.drift_corr = np.mat(np.zeros(( self.A.shape[0], 1)))
self.prev_drift_corr = np.mat(np.zeros(( self.A.shape[0], 1)))
if hasattr(self, 'noise_rej'):
if self.noise_rej:
print(('noise rej thresh: ', self.noise_rej_cutoff))
else:
self.noise_rej = False
self.noise_cnt = 0
super(KalmanFilterDriftCorrection, self)._init_state()
def _forward_infer(self, st, obs_t, Bu=None, u=None, x_target=None, F=None, obs_is_control_independent=True, **kwargs):
if self.noise_rej:
if np.sum(obs_t) > self.noise_rej_cutoff:
#print np.sum(obs_t), 'rejecting noise!'
self.noise_cnt += 1
obs_t = np.mat(self.noise_rej_mFR).T
state = super(KalmanFilterDriftCorrection, self)._forward_infer(st, obs_t, Bu=None, u=None, x_target=None, F=None,
obs_is_control_independent=True, **kwargs)
### Apply Drift Correction ###
decoded_vel = state.mean.copy()
state.mean[self.vel_ix] = decoded_vel[self.vel_ix] - self.drift_corr[self.vel_ix]
### Update Drift Correcton ###
self.drift_corr[self.vel_ix] = self.drift_corr[self.vel_ix]*self.drift_rho + decoded_vel[self.vel_ix]*float(1. - self.drift_rho)
self.prev_drift_corr = self.drift_corr.copy()
return state
class PCAKalmanFilter(KalmanFilter):
'''
A modified KalmanFilter where the Kalman gain is confined to produce outputs in a lower-dimensional linear subspace, i.e. some principal component space
'''
def _forward_infer(self, st, obs_t, Bu=None, u=None, target_state=None, obs_is_control_independent=True, **kwargs):
'''
See KalmanFilter._forward_infer for docs
'''
using_control_input = (Bu is not None) or (u is not None) or (target_state is not None)
pred_state = self._ssm_pred(st, target_state=target_state, Bu=Bu, u=u)
C, Q = self.C, self.Q
P = pred_state.cov
try:
M = self.M
pca_offset = self.pca_offset
except:
print("couldn't extract PCA parameters!")
M = 1
pca_offset = 0
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = K*C
F = (I - KC)*self.A
post_state = pred_state
if obs_is_control_independent and using_control_input:
post_state.mean += -KC*self.A*st.mean + M*K*obs_t + pca_offset
else:
post_state.mean += -KC*pred_state.mean + M*K*obs_t + pca_offset
post_state.cov = (I - KC) * P
return post_state
def __getstate__(self):
'''
See KalmanFilter.__getstate__ for docs
'''
data = super(PCAKalmanFilter, self).__getstate__()
data['M'] = self.M
data['pca_offset'] = self.pca_offset
return data
def __setstate__(self, state):
'''
See KalmanFilter.__setstate__ for docs
'''
super(PCAKalmanFilter, self).__setstate__(state)
self.M = state['M']
self.pca_offset = state['pca_offset']
class FAKalmanFilter(KalmanFilter):
def _forward_infer(self, st, obs_t, Bu=None, u=None, target_state=None, obs_is_control_independent=True, **kwargs):
input_dict = {}
if hasattr(self, 'FA_kwargs'):
input_type = self.FA_input + '_input'
input_dict['all_input'] = obs_t.copy()
dmn = obs_t - self.FA_kwargs['fa_mu']
shar = (self.FA_kwargs['fa_sharL'] * dmn)
priv = (dmn - shar)
main_shar = (self.FA_kwargs['fa_main_shared'] * dmn)
main_priv = (dmn - main_shar)
FA = self.FA_kwargs['FA_model']
inp = obs_t.copy()
if inp.shape[1] == 1:
inp = inp.T # want 1 x neurons
z = FA.transform(dmn.T)
z = z.T #Transform to fact x 1
z = z[:self.FA_kwargs['fa_main_shar_n_dim'], :] #only use number in main space
input_dict['private_input'] = priv + self.FA_kwargs['fa_mu']
input_dict['shared_input'] = shar + self.FA_kwargs['fa_mu']
input_dict['private_scaled_input'] = np.multiply(priv, self.FA_kwargs['fa_priv_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['shared_scaled_input'] = np.multiply(shar, self.FA_kwargs['fa_shar_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['all_scaled_by_shar_input'] = np.multiply(dmn, self.FA_kwargs['fa_shar_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['sc_shared+unsc_priv_input'] = input_dict['shared_scaled_input'] + input_dict['private_input'] - self.FA_kwargs['fa_mu']
input_dict['sc_shared+sc_priv_input'] = input_dict['shared_scaled_input'] + input_dict['private_scaled_input']- self.FA_kwargs['fa_mu']
input_dict['main_shared_input'] = main_shar + self.FA_kwargs['fa_mu']
input_dict['main_sc_shared_input'] = np.multiply(main_shar, self.FA_kwargs['fa_main_shared_sc']) + self.FA_kwargs['fa_mu']
input_dict['main_sc_shar+unsc_priv_input'] = input_dict['main_sc_shared_input'] + input_dict['private_input'] - self.FA_kwargs['fa_mu']
input_dict['main_sc_shar+sc_priv_input'] = input_dict['main_sc_shared_input'] + input_dict['private_scaled_input'] - self.FA_kwargs['fa_mu']
input_dict['main_sc_private_input'] = np.multiply(main_priv, self.FA_kwargs['fa_main_private_sc']) + self.FA_kwargs['fa_mu']
#z = self.FA_kwargs['u_svd'].T*self.FA_kwargs['uut_psi_inv']*dmn
input_dict['split_input'] = np.vstack((z, main_priv))
#print input_dict['split_input'].shape
own_pc_trans = np.mat(self.FA_kwargs['own_pc_trans'])*np.mat(dmn)
input_dict['pca_input'] = own_pc_trans + self.FA_kwargs['fa_mu']
if input_type in list(input_dict.keys()):
#print input_type
obs_t_mod = input_dict[input_type]
else:
print(input_type)
raise Exception("Error in FA_KF input_type, none of the expected inputs")
else:
obs_t_mod = obs_t.copy()
input_dict['task_input'] = obs_t_mod.copy()
post_state = super(FAKalmanFilter, self)._forward_infer(st, obs_t_mod, Bu=Bu, u=u, target_state=target_state,
obs_is_control_independent=obs_is_control_independent, **kwargs)
self.FA_input_dict = input_dict
return post_state
class KFDecoder(bmi.BMI, bmi.Decoder):
'''
Wrapper for KalmanFilter specifically for the application of BMI decoding.
'''
def __init__(self, *args, **kwargs):
'''
Constructor for KFDecoder
Parameters
----------
*args, **kwargs : see riglib.bmi.bmi.Decoder for arguments
Returns
-------
KFDecoder instance
'''
super(KFDecoder, self).__init__(*args, **kwargs)
mFR = kwargs.pop('mFR', 0.)
sdFR = kwargs.pop('sdFR', 1.)
self.mFR = mFR
self.sdFR = sdFR
self.zeromeanunits = None
self.zscore = False
self.kf = self.filt
def _pickle_init(self):
super(KFDecoder, self)._pickle_init()
if not hasattr(self.filt, 'B'):
self.filt.B = np.mat(np.vstack([np.zeros([3,3]), np.eye(3)*1000*self.binlen, np.zeros(3)]))
if not hasattr(self.filt, 'F'):
self.filt.F = np.mat(np.zeros([3,7]))
def init_zscore(self, mFR_curr, sdFR_curr):
'''
Initialize parameters for zcoring observations, if that feature is enabled in the decoder object
Parameters
----------
mFR_curr : np.array of shape (N,)
Current mean estimates (as opposed to potentially old estimates already stored in the decoder)
sdFR_curr : np.array of shape (N,)
Current standard deviation estimates (as opposed to potentially old estimates already stored in the decoder)
Returns
-------
None
'''
# if interfacing with Kinarm system, may mean and sd will be shape (n, 1)
self.zeromeanunits, = np.nonzero(mFR_curr == 0) #find any units with a mean FR of zero for this session
sdFR_curr[self.zeromeanunits] = np.nan # set mean and SD of quiet units to nan to avoid divide by 0 error
mFR_curr[self.zeromeanunits] = np.nan
#self.sdFR_ratio = self.sdFR/sdFR_curr
#self.mFR_diff = mFR_curr-self.mFR
#self.mFR_curr = mFR_curr
self.mFR = mFR_curr
self.sdFR = sdFR_curr
self.zscore = True
def update_params(self, new_params, steady_state=True):
'''
Update the decoder parameters if new parameters are available (e.g., by CLDA). See Decoder.update_params
'''
super(KFDecoder, self).update_params(new_params)
# set the KF to the new steady state
if steady_state:
self.kf.set_steady_state_pred_cov()
def __setstate__(self, state):
"""
Set decoder state after un-pickling. See Decoder.__setstate__, which runs the _pickle_init function at some point during the un-pickling process
Parameters
----------
state : dict
Variables to set as attributes of the unpickled object.
Returns
-------
None
"""
if 'kf' in state and 'filt' not in state:
state['filt'] = state['kf']
super(KFDecoder, self).__setstate__(state)
def plot_K(self, **kwargs):
'''
Plot the Kalman gain weights
Parameters
----------
**kwargs : optional kwargs
These are passed to the plot function (e.g., which rows to plot)
Returns
-------
None
'''
F, K = self.kf.get_sskf()
self.plot_pds(K.T, **kwargs)
def shuffle(self, shuffle_baselines=False):
'''
Shuffle the neural model
Parameters
----------
shuffle_baselines : bool, optional, default = False
If true, shuffle the estimates of the baseline firing rates in addition to the state-dependent neural tuning parameters.
Returns
-------
None (shuffling is done on the current decoder object)
'''
# generate random permutation
import random
inds = list(range(self.filt.C.shape[0]))
random.shuffle(inds)
# shuffle rows of C, and rows+cols of Q
C_orig = self.filt.C.copy()
self.filt.C = self.filt.C[inds, :]
if not shuffle_baselines:
self.filt.C[:,-1] = C_orig[:,-1]
self.filt.Q = self.filt.Q[inds, :]
self.filt.Q = self.filt.Q[:, inds]
self.filt.C_xpose_Q_inv = self.filt.C.T * np.linalg.pinv(self.filt.Q.I)
# RML sufficient statistics (S and T, but not R and ESS)
# shuffle rows of S, and rows+cols of T
try:
self.filt.S = self.filt.S[inds, :]
self.filt.T = self.filt.T[inds, :]
self.filt.T = self.filt.T[:, inds]
except AttributeError:
# if this decoder never had the RML sufficient statistics
# (R, S, T, and ESS) as attributes of self.filt
pass
def change_binlen(self, new_binlen, screen_update_rate=60.0):
'''
Function to change the binlen of the KFDecoder analytically.
Parameters
----------
new_binlen : float
New bin length of the decoder, in seconds
screen_update_rate: float, optional, default = 60Hz
Rate at which the __call__ function will be called
'''
bin_gain = new_binlen / self.binlen
self.binlen = new_binlen
# Alter bminum, bmicount, # of subbins
screen_update_period = 1./screen_update_rate
if self.binlen < screen_update_period:
self.n_subbins = int(screen_update_period / self.binlen)
self.bmicount = 0
if hasattr(self, 'bminum'):
del self.bminum
else:
self.n_subbins = 1
self.bminum = int(self.binlen / screen_update_period)
self.bmicount = 0
# change C matrix
self.filt.C *= bin_gain
self.filt.Q *= bin_gain**2
self.filt.C_xpose_Q_inv *= 1./bin_gain
# change state space Model
# TODO generalize this beyond endpoint
from . import state_space_models
A, W = self.ssm.get_ssm_matrices(update_rate=new_binlen)
self.filt.A = A
self.filt.W = W
def conv_to_steady_state(self):
'''
Create an SSKFDecoder object based on KalmanFilter parameters in this KFDecoder object
'''
from . import sskfdecoder
self.filt = sskfdecoder.SteadyStateKalmanFilter(A=self.filt.A, W=self.filt.W, C=self.filt.C, Q=self.filt.Q)
def subselect_units(self, units):
'''
Prune units from the KFDecoder, e.g., due to loss of recordings for a particular cell
Parameters
units : string or np.ndarray of shape (N,2)
The units which should be KEPT in the decoder
Returns
-------
KFDecoder
New KFDecoder object using only a subset of the cells of the original KFDecoder
'''
# Parse units into list of indices to keep
inds_to_keep = self._proc_units(units, 'keep')
dec_new = self._return_proc_units_decoder(inds_to_keep)
return dec_new
#self._save_new_dec(dec_new, '_subset')
def project_Q(C_v, Q_hat):
"""
Deprecated! See clda.KFRML_IVC
"""
print("projecting!")
from scipy.optimize import fmin_bfgs, fmin_ncg
C_v = np.mat(C_v)
Q_hat = np.mat(Q_hat)
Q_hat_inv = Q_hat.I
c_1 = C_v[:,0]
c_2 = C_v[:,1]
A_1 = c_1*c_1.T - c_2*c_2.T
A_2 = c_2*c_1.T
A_3 = c_1*c_2.T
A = [A_1, A_2, A_3]
if 1:
U = np.hstack([c_1 - c_2, c_2, c_1])
V = np.vstack([(c_1 + c_2).T, c_1.T, c_2.T])
C_inv_fn = lambda nu: np.mat(np.diag([1./nu[0], 1./(nu[0] + nu[1]), 1./(nu[2] - nu[0]) ]))
C_fn = lambda nu: np.mat(np.diag([nu[0], (nu[0] + nu[1]), (nu[2] - nu[0]) ]))
nu_0 = np.zeros(3)
c_scalars = np.ones(3)
else:
u_1, s_1, v_1 = np.linalg.svd(A_1)
c_scalars = np.hstack([s_1[0:2], 1, 1])
U = np.hstack([u_1[:,0:2], c_2, c_1])
V = np.vstack([v_1[0:2, :], c_1.T, c_2.T])
C_fn = lambda nu: np.mat(np.diag(nu * c_scalars))
nu_0 = np.zeros(4)
def cost_fn_gen(nu, return_type='cost'):
C = C_fn(nu)
S_star_inv = Q_hat + U*C_fn(nu)*V
#if return_type == 'cost':
# print C_v.T * S_star_inv * C_v
if np.any(np.diag(C) == 0):
S_star = S_star_inv.I
else:
C_inv = C.I
S_star = Q_hat_inv - Q_hat_inv * U * (C_inv + V*Q_hat_inv*U).I*V * Q_hat_inv;
# log-determinant using LU decomposition, required if Q is large, i.e. lots of simultaneous observations
cost = -np.log(np.linalg.det(S_star_inv))
#cost = -np.prod(np.linalg.slogdet(S_star_inv))
# TODO gradient dimension needs to be the same as nu
#grad = -np.array([np.trace(S_star*U[:,0] * c_scalars[0] * V[0,:]) for k in range(len(nu))])
#grad = -1e-4*np.array([np.trace(S_star*A[0]), np.trace(S_star*A[1]), np.trace(S_star*A[2])])
#print c_2.T*S_star*c_2
grad = -1e-4*np.array(np.hstack([c_1.T*S_star*c_1 - c_2.T*S_star*c_2, c_1.T*S_star*c_2, c_2.T*S_star*c_1])).ravel()
S = S_star
hess = np.mat([[np.trace(S*A_1*S*A_1), np.trace(S*A_2*S*A_1), np.trace(S*A_3*S*A_1)],
[np.trace(S*A_1*S*A_2), np.trace(S*A_2*S*A_2), np.trace(S*A_3*S*A_2)],
[np.trace(S*A_1*S*A_3), np.trace(S*A_2*S*A_3), np.trace(S*A_3*S*A_3)]])
#grad = hess*np.mat(grad.reshape(-1,1))
#log = logging.getLogger()
#print "nu = %s, cost = %g, grad=%s" % (nu, cost, grad)
#log.warning("nu = %s, cost = %g, grad=%s" % (nu, cost, grad))
if return_type == 'cost':
return cost
elif return_type == 'grad':
return grad
elif return_type == 'hess':
return hess
elif return_type == 'opt_val':
return S_star
else:
raise ValueError("Cost function doesn't know how to return this: %s" % return_type)
cost_fn = lambda nu: cost_fn_gen(nu, return_type = 'cost')
grad = lambda nu: cost_fn_gen(nu, return_type = 'grad')
hess = lambda nu: cost_fn_gen(nu, return_type = 'hess')
arg_opt = lambda nu: cost_fn_gen(nu, return_type = 'opt_val')
# Call optimization routine
#v_star = fmin_ncg(cost_fn, nu_0, fprime=grad, fhess=hess, maxiter=10000)
#print v_star
#v_star = fmin_bfgs(cost_fn, nu_0, maxiter=10000, gtol=1e-15)
v_star = fmin_bfgs(cost_fn, nu_0, fprime=grad, maxiter=10000, gtol=1e-15)
print(v_star)
Q_inv = arg_opt(v_star)
Q = Q_inv.I
Q = Q_hat + U * C_fn(v_star) * V
# TODO print out (log) a more useful measure of success
#print C_v.T * Q_inv * C_v
#print C_v.T * Q.I * C_v
#print v_star
return Q
|
[
"numpy.trace",
"numpy.sum",
"random.shuffle",
"numpy.ones",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.mean",
"numpy.diag",
"numpy.linalg.pinv",
"numpy.linalg.solve",
"numpy.mat",
"numpy.multiply",
"numpy.ndim",
"numpy.linalg.det",
"numpy.cov",
"numpy.hstack",
"scipy.optimize.fmin_bfgs",
"numpy.dot",
"numpy.vstack",
"numpy.all",
"numpy.linalg.lstsq",
"numpy.zeros",
"numpy.nonzero",
"numpy.array",
"numpy.eye"
] |
[((28104, 28115), 'numpy.mat', 'np.mat', (['C_v'], {}), '(C_v)\n', (28110, 28115), True, 'import numpy as np\n'), ((28128, 28141), 'numpy.mat', 'np.mat', (['Q_hat'], {}), '(Q_hat)\n', (28134, 28141), True, 'import numpy as np\n'), ((31330, 31394), 'scipy.optimize.fmin_bfgs', 'fmin_bfgs', (['cost_fn', 'nu_0'], {'fprime': 'grad', 'maxiter': '(10000)', 'gtol': '(1e-15)'}), '(cost_fn, nu_0, fprime=grad, maxiter=10000, gtol=1e-15)\n', (31339, 31394), False, 'from scipy.optimize import fmin_bfgs, fmin_ncg\n'), ((2140, 2152), 'numpy.zeros', 'np.zeros', (['nS'], {}), '(nS)\n', (2148, 2152), True, 'import numpy as np\n'), ((11134, 11159), 'numpy.cov', 'np.cov', (['(Y - C * X)'], {'bias': '(1)'}), '(Y - C * X, bias=1)\n', (11140, 11159), True, 'import numpy as np\n'), ((13575, 13584), 'numpy.mat', 'np.mat', (['K'], {}), '(K)\n', (13581, 13584), True, 'import numpy as np\n'), ((22470, 22495), 'numpy.nonzero', 'np.nonzero', (['(mFR_curr == 0)'], {}), '(mFR_curr == 0)\n', (22480, 22495), True, 'import numpy as np\n'), ((24795, 24815), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (24809, 24815), False, 'import random\n'), ((28323, 28355), 'numpy.hstack', 'np.hstack', (['[c_1 - c_2, c_2, c_1]'], {}), '([c_1 - c_2, c_2, c_1])\n', (28332, 28355), True, 'import numpy as np\n'), ((28368, 28408), 'numpy.vstack', 'np.vstack', (['[(c_1 + c_2).T, c_1.T, c_2.T]'], {}), '([(c_1 + c_2).T, c_1.T, c_2.T])\n', (28377, 28408), True, 'import numpy as np\n'), ((28609, 28620), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (28617, 28620), True, 'import numpy as np\n'), ((28641, 28651), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (28648, 28651), True, 'import numpy as np\n'), ((28686, 28704), 'numpy.linalg.svd', 'np.linalg.svd', (['A_1'], {}), '(A_1)\n', (28699, 28704), True, 'import numpy as np\n'), ((28725, 28752), 'numpy.hstack', 'np.hstack', (['[s_1[0:2], 1, 1]'], {}), '([s_1[0:2], 1, 1])\n', (28734, 28752), True, 'import numpy as np\n'), ((28765, 28799), 'numpy.hstack', 'np.hstack', (['[u_1[:, 0:2], c_2, c_1]'], {}), '([u_1[:, 0:2], c_2, c_1])\n', (28774, 28799), True, 'import numpy as np\n'), ((28811, 28849), 'numpy.vstack', 'np.vstack', (['[v_1[0:2, :], c_1.T, c_2.T]'], {}), '([v_1[0:2, :], c_1.T, c_2.T])\n', (28820, 28849), True, 'import numpy as np\n'), ((28923, 28934), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (28931, 28934), True, 'import numpy as np\n'), ((1505, 1514), 'numpy.mat', 'np.mat', (['A'], {}), '(A)\n', (1511, 1514), True, 'import numpy as np\n'), ((1536, 1545), 'numpy.mat', 'np.mat', (['W'], {}), '(W)\n', (1542, 1545), True, 'import numpy as np\n'), ((1567, 1576), 'numpy.mat', 'np.mat', (['C'], {}), '(C)\n', (1573, 1576), True, 'import numpy as np\n'), ((1598, 1607), 'numpy.mat', 'np.mat', (['Q'], {}), '(Q)\n', (1604, 1607), True, 'import numpy as np\n'), ((4341, 4364), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (4347, 4364), True, 'import numpy as np\n'), ((5634, 5644), 'numpy.eye', 'np.eye', (['nX'], {}), '(nX)\n', (5640, 5644), True, 'import numpy as np\n'), ((6139, 6153), 'numpy.mat', 'np.mat', (['self.A'], {}), '(self.A)\n', (6145, 6153), True, 'import numpy as np\n'), ((6155, 6169), 'numpy.mat', 'np.mat', (['self.W'], {}), '(self.W)\n', (6161, 6169), True, 'import numpy as np\n'), ((6171, 6185), 'numpy.mat', 'np.mat', (['self.C'], {}), '(self.C)\n', (6177, 6185), True, 'import numpy as np\n'), ((6187, 6201), 'numpy.mat', 'np.mat', (['self.Q'], {}), '(self.Q)\n', (6193, 6201), True, 'import numpy as np\n'), ((6246, 6264), 'numpy.zeros', 'np.zeros', (['[nS, nS]'], {}), '([nS, nS])\n', (6254, 6264), True, 'import numpy as np\n'), ((6285, 6295), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (6291, 6295), True, 'import numpy as np\n'), ((7927, 7944), 'numpy.mat', 'np.mat', (['self.kf.A'], {}), '(self.kf.A)\n', (7933, 7944), True, 'import numpy as np\n'), ((7946, 7963), 'numpy.mat', 'np.mat', (['self.kf.W'], {}), '(self.kf.W)\n', (7952, 7963), True, 'import numpy as np\n'), ((7965, 7982), 'numpy.mat', 'np.mat', (['self.kf.H'], {}), '(self.kf.H)\n', (7971, 7982), True, 'import numpy as np\n'), ((7984, 8001), 'numpy.mat', 'np.mat', (['self.kf.Q'], {}), '(self.kf.Q)\n', (7990, 8001), True, 'import numpy as np\n'), ((8022, 8039), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (8030, 8039), True, 'import numpy as np\n'), ((9125, 9135), 'numpy.eye', 'np.eye', (['nX'], {}), '(nX)\n', (9131, 9135), True, 'import numpy as np\n'), ((10176, 10205), 'numpy.mat', 'np.mat', (['hidden_state[:, mask]'], {}), '(hidden_state[:, mask])\n', (10182, 10205), True, 'import numpy as np\n'), ((10267, 10287), 'numpy.mat', 'np.mat', (['obs[:, mask]'], {}), '(obs[:, mask])\n', (10273, 10287), True, 'import numpy as np\n'), ((10503, 10523), 'numpy.mat', 'np.mat', (['hidden_state'], {}), '(hidden_state)\n', (10509, 10523), True, 'import numpy as np\n'), ((10673, 10684), 'numpy.mat', 'np.mat', (['obs'], {}), '(obs)\n', (10679, 10684), True, 'import numpy as np\n'), ((11170, 11180), 'numpy.ndim', 'np.ndim', (['Q'], {}), '(Q)\n', (11177, 11180), True, 'import numpy as np\n'), ((11390, 11417), 'numpy.zeros', 'np.zeros', (['[n_obs, n_states]'], {}), '([n_obs, n_states])\n', (11398, 11417), True, 'import numpy as np\n'), ((12657, 12671), 'numpy.mat', 'np.mat', (['self.A'], {}), '(self.A)\n', (12663, 12671), True, 'import numpy as np\n'), ((12673, 12687), 'numpy.mat', 'np.mat', (['self.W'], {}), '(self.W)\n', (12679, 12687), True, 'import numpy as np\n'), ((12689, 12703), 'numpy.mat', 'np.mat', (['self.C'], {}), '(self.C)\n', (12695, 12703), True, 'import numpy as np\n'), ((12705, 12719), 'numpy.mat', 'np.mat', (['self.Q'], {}), '(self.Q)\n', (12711, 12719), True, 'import numpy as np\n'), ((12797, 12815), 'numpy.zeros', 'np.zeros', (['[nS, nS]'], {}), '([nS, nS])\n', (12805, 12815), True, 'import numpy as np\n'), ((12836, 12846), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (12842, 12846), True, 'import numpy as np\n'), ((13633, 13650), 'numpy.eye', 'np.eye', (['n_neurons'], {}), '(n_neurons)\n', (13639, 13650), True, 'import numpy as np\n'), ((16436, 16459), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (16442, 16459), True, 'import numpy as np\n'), ((19868, 19893), 'numpy.vstack', 'np.vstack', (['(z, main_priv)'], {}), '((z, main_priv))\n', (19877, 19893), True, 'import numpy as np\n'), ((25160, 25189), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.filt.Q.I'], {}), '(self.filt.Q.I)\n', (25174, 25189), True, 'import numpy as np\n'), ((1727, 1756), 'numpy.ones', 'np.ones', (['n_states'], {'dtype': 'bool'}), '(n_states, dtype=bool)\n', (1734, 1756), True, 'import numpy as np\n'), ((2225, 2241), 'numpy.array', 'np.array', (['self.A'], {}), '(self.A)\n', (2233, 2241), True, 'import numpy as np\n'), ((2509, 2526), 'numpy.linalg.pinv', 'np.linalg.pinv', (['Q'], {}), '(Q)\n', (2523, 2526), True, 'import numpy as np\n'), ((2723, 2752), 'numpy.ones', 'np.ones', (['n_states'], {'dtype': 'bool'}), '(n_states, dtype=bool)\n', (2730, 2752), True, 'import numpy as np\n'), ((5031, 5054), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (5037, 5054), True, 'import numpy as np\n'), ((6357, 6375), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (6364, 6375), True, 'import numpy as np\n'), ((6403, 6421), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (6410, 6421), True, 'import numpy as np\n'), ((6504, 6530), 'numpy.linalg.norm', 'np.linalg.norm', (['(K - last_K)'], {}), '(K - last_K)\n', (6518, 6530), True, 'import numpy as np\n'), ((11089, 11119), 'numpy.linalg.solve', 'np.linalg.solve', (['XtX_lamb', 'XtY'], {}), '(XtX_lamb, XtY)\n', (11104, 11119), True, 'import numpy as np\n'), ((12338, 12365), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X1.T', 'X2.T'], {}), '(X1.T, X2.T)\n', (12353, 12365), True, 'import numpy as np\n'), ((12395, 12408), 'numpy.dot', 'np.dot', (['A', 'X1'], {}), '(A, X1)\n', (12401, 12408), True, 'import numpy as np\n'), ((12873, 12891), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (12880, 12891), True, 'import numpy as np\n'), ((12919, 12937), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (12926, 12937), True, 'import numpy as np\n'), ((13653, 13670), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K'], {}), '(K)\n', (13667, 13670), True, 'import numpy as np\n'), ((14170, 14200), 'numpy.zeros', 'np.zeros', (['(self.A.shape[0], 1)'], {}), '((self.A.shape[0], 1))\n', (14178, 14200), True, 'import numpy as np\n'), ((14245, 14275), 'numpy.zeros', 'np.zeros', (['(self.A.shape[0], 1)'], {}), '((self.A.shape[0], 1))\n', (14253, 14275), True, 'import numpy as np\n'), ((14742, 14755), 'numpy.sum', 'np.sum', (['obs_t'], {}), '(obs_t)\n', (14748, 14755), True, 'import numpy as np\n'), ((18465, 18516), 'numpy.multiply', 'np.multiply', (['priv', "self.FA_kwargs['fa_priv_var_sc']"], {}), "(priv, self.FA_kwargs['fa_priv_var_sc'])\n", (18476, 18516), True, 'import numpy as np\n'), ((18591, 18642), 'numpy.multiply', 'np.multiply', (['shar', "self.FA_kwargs['fa_shar_var_sc']"], {}), "(shar, self.FA_kwargs['fa_shar_var_sc'])\n", (18602, 18642), True, 'import numpy as np\n'), ((18723, 18773), 'numpy.multiply', 'np.multiply', (['dmn', "self.FA_kwargs['fa_shar_var_sc']"], {}), "(dmn, self.FA_kwargs['fa_shar_var_sc'])\n", (18734, 18773), True, 'import numpy as np\n'), ((19225, 19284), 'numpy.multiply', 'np.multiply', (['main_shar', "self.FA_kwargs['fa_main_shared_sc']"], {}), "(main_shar, self.FA_kwargs['fa_main_shared_sc'])\n", (19236, 19284), True, 'import numpy as np\n'), ((19663, 19723), 'numpy.multiply', 'np.multiply', (['main_priv', "self.FA_kwargs['fa_main_private_sc']"], {}), "(main_priv, self.FA_kwargs['fa_main_private_sc'])\n", (19674, 19723), True, 'import numpy as np\n'), ((19985, 20023), 'numpy.mat', 'np.mat', (["self.FA_kwargs['own_pc_trans']"], {}), "(self.FA_kwargs['own_pc_trans'])\n", (19991, 20023), True, 'import numpy as np\n'), ((20024, 20035), 'numpy.mat', 'np.mat', (['dmn'], {}), '(dmn)\n', (20030, 20035), True, 'import numpy as np\n'), ((21748, 21764), 'numpy.zeros', 'np.zeros', (['[3, 7]'], {}), '([3, 7])\n', (21756, 21764), True, 'import numpy as np\n'), ((28446, 28514), 'numpy.diag', 'np.diag', (['[1.0 / nu[0], 1.0 / (nu[0] + nu[1]), 1.0 / (nu[2] - nu[0])]'], {}), '([1.0 / nu[0], 1.0 / (nu[0] + nu[1]), 1.0 / (nu[2] - nu[0])])\n', (28453, 28514), True, 'import numpy as np\n'), ((28541, 28587), 'numpy.diag', 'np.diag', (['[nu[0], nu[0] + nu[1], nu[2] - nu[0]]'], {}), '([nu[0], nu[0] + nu[1], nu[2] - nu[0]])\n', (28548, 28587), True, 'import numpy as np\n'), ((28883, 28906), 'numpy.diag', 'np.diag', (['(nu * c_scalars)'], {}), '(nu * c_scalars)\n', (28890, 28906), True, 'import numpy as np\n'), ((29146, 29156), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (29153, 29156), True, 'import numpy as np\n'), ((29471, 29496), 'numpy.linalg.det', 'np.linalg.det', (['S_star_inv'], {}), '(S_star_inv)\n', (29484, 29496), True, 'import numpy as np\n'), ((2568, 2585), 'numpy.linalg.pinv', 'np.linalg.pinv', (['Q'], {}), '(Q)\n', (2582, 2585), True, 'import numpy as np\n'), ((6990, 7024), 'numpy.eye', 'np.eye', (['n_state_vars', 'n_state_vars'], {}), '(n_state_vars, n_state_vars)\n', (6996, 7024), True, 'import numpy as np\n'), ((10225, 10241), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (10235, 10241), True, 'import numpy as np\n'), ((10341, 10362), 'numpy.all', 'np.all', (['(X[-1, :] == 1)'], {}), '(X[-1, :] == 1)\n', (10347, 10362), True, 'import numpy as np\n'), ((10578, 10599), 'numpy.all', 'np.all', (['(X[-1, :] == 1)'], {}), '(X[-1, :] == 1)\n', (10584, 10599), True, 'import numpy as np\n'), ((11025, 11043), 'numpy.eye', 'np.eye', (['x.shape[1]'], {}), '(x.shape[1])\n', (11031, 11043), True, 'import numpy as np\n'), ((12257, 12272), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (12264, 12272), True, 'import numpy as np\n'), ((14087, 14116), 'numpy.mean', 'np.mean', (['self.prev_drift_corr'], {}), '(self.prev_drift_corr)\n', (14094, 14116), True, 'import numpy as np\n'), ((14899, 14925), 'numpy.mat', 'np.mat', (['self.noise_rej_mFR'], {}), '(self.noise_rej_mFR)\n', (14905, 14925), True, 'import numpy as np\n'), ((30026, 30053), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_1)'], {}), '(S * A_1 * S * A_1)\n', (30034, 30053), True, 'import numpy as np\n'), ((30049, 30076), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_1)'], {}), '(S * A_2 * S * A_1)\n', (30057, 30076), True, 'import numpy as np\n'), ((30072, 30099), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_1)'], {}), '(S * A_3 * S * A_1)\n', (30080, 30099), True, 'import numpy as np\n'), ((30120, 30147), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_2)'], {}), '(S * A_1 * S * A_2)\n', (30128, 30147), True, 'import numpy as np\n'), ((30143, 30170), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_2)'], {}), '(S * A_2 * S * A_2)\n', (30151, 30170), True, 'import numpy as np\n'), ((30166, 30193), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_2)'], {}), '(S * A_3 * S * A_2)\n', (30174, 30193), True, 'import numpy as np\n'), ((30214, 30241), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_3)'], {}), '(S * A_1 * S * A_3)\n', (30222, 30241), True, 'import numpy as np\n'), ((30237, 30264), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_3)'], {}), '(S * A_2 * S * A_3)\n', (30245, 30264), True, 'import numpy as np\n'), ((30260, 30287), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_3)'], {}), '(S * A_3 * S * A_3)\n', (30268, 30287), True, 'import numpy as np\n'), ((6891, 6917), 'numpy.linalg.norm', 'np.linalg.norm', (['(K - last_K)'], {}), '(K - last_K)\n', (6905, 6917), True, 'import numpy as np\n'), ((8420, 8451), 'numpy.linalg.norm', 'np.linalg.norm', (['(K[n] - K[n - 1])'], {}), '(K[n] - K[n - 1])\n', (8434, 8451), True, 'import numpy as np\n'), ((10889, 10914), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X.T', 'Y.T'], {}), '(X.T, Y.T)\n', (10904, 10914), True, 'import numpy as np\n'), ((21614, 21630), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (21622, 21630), True, 'import numpy as np\n'), ((21659, 21670), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (21667, 21670), True, 'import numpy as np\n'), ((29889, 29993), 'numpy.hstack', 'np.hstack', (['[c_1.T * S_star * c_1 - c_2.T * S_star * c_2, c_1.T * S_star * c_2, c_2.T *\n S_star * c_1]'], {}), '([c_1.T * S_star * c_1 - c_2.T * S_star * c_2, c_1.T * S_star *\n c_2, c_2.T * S_star * c_1])\n', (29898, 29993), True, 'import numpy as np\n'), ((10402, 10417), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (10409, 10417), True, 'import numpy as np\n'), ((10639, 10654), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (10646, 10654), True, 'import numpy as np\n'), ((21631, 21640), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (21637, 21640), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This file is part of fintie.
# Copyright (C) 2018-present qytz <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""提供内部交易信息查询
信息获取通道包括:
* http://xueqiu.com/hq/insider/SZ002353
加载已保存的数据::
import pandas as pd
from pathlib import Path
df = pd.read_json(Path('xxx.json')
"""
import os
import time
import json
import asyncio
import logging
from pathlib import Path
from datetime import date
import click
import pandas as pd
from .cli import stock_cli_group, MODULE_DATA_DIR
from ..env import _init_in_session
from ..utils import fetch_http_data, add_doc
logger = logging.getLogger(__file__)
__all__ = ["async_get_inside_trade", "get_inside_trade"]
async def _init(session, force=False):
if force or not _init_in_session.get("xueqiu"):
_init_in_session["xueqiu"] = True
await session.get("https://xueqiu.com")
return True
async def async_get_inside_trade(session, symbol, data_path=None, return_df=True):
"""
从雪球获取内部交易数据
:param session: `aiohttp.ClientSession` 对象,同步接口不需要传
:param symbol: 股票代码
:param data_path: 数据保存路径
:param return_df: 是否返回 `pandas.DataFrame` 对象,False 返回原始数据
:returns: 原始数据或 `pandas.DataFrame` 对象,见 return_df 参数,
失败则返回 `None`
"""
await _init(session)
page_size = 10000
curr_page = 1
params = {"_": 0, "symbol": symbol, "page": curr_page, "size": page_size}
logger.info("start download inside_trade from xueqiu for %s...", symbol)
url = "https://xueqiu.com/stock/f10/skholderchg.json"
params["_"] = int(time.time() * 1000)
date_str = str(date.today())
async with session.get(url, params=params) as resp:
if resp.status != 200:
logger.warning("get inside_trade from %s failed: %s", url, resp.status)
return None
data = await resp.json()
if "list" not in data:
logger.warn("no inside_trade data downloaded for %s from %s: % ", symbol, url, data)
inside_trade_data = data["list"]
if not inside_trade_data:
logger.warn("no inside_trade data downloaded for %s from %s, return None", symbol, url)
return None
logger.info("download inside_trade for %s from %s finish", symbol, url)
if data_path:
data_path = Path(data_path) / MODULE_DATA_DIR / "inside_trade"
os.makedirs(data_path, exist_ok=True)
data_fname = "-".join((symbol, date_str)) + ".json"
data_file = data_path / data_fname
with data_file.open("w", encoding="utf-8") as dataf:
json.dump(inside_trade_data, dataf, indent=4, ensure_ascii=False)
if not return_df:
return inside_trade_data
df = pd.DataFrame(inside_trade_data)
# set index
# df.set_index("bonusimpdate", inplace=True)
return df
@add_doc(async_get_inside_trade.__doc__)
def get_inside_trade(*args, **kwargs):
ret = fetch_http_data(async_get_inside_trade, *args, **kwargs)
if isinstance(ret, Exception):
raise ret
return ret
@click.option("-s", "--symbol", required=True)
@click.option(
"-f",
"--save-path",
type=click.Path(exists=False)
)
@click.option("-p/-np", "--print/--no-print", "show", default=True)
@stock_cli_group.command("inside_trade")
@click.pass_context
def inside_trade_cli(ctx, symbol, save_path, show):
"""从雪球获取内部交易数据"""
if not save_path:
save_path = ctx.obj["data_path"]
data = get_inside_trade(symbol, save_path)
if show:
click.echo(data)
if __name__ == "__main__":
inside_trade_cli()
|
[
"pandas.DataFrame",
"json.dump",
"os.makedirs",
"click.option",
"datetime.date.today",
"click.echo",
"time.time",
"pathlib.Path",
"click.Path",
"logging.getLogger"
] |
[((1117, 1144), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1134, 1144), False, 'import logging\n'), ((3527, 3572), 'click.option', 'click.option', (['"""-s"""', '"""--symbol"""'], {'required': '(True)'}), "('-s', '--symbol', required=True)\n", (3539, 3572), False, 'import click\n'), ((3654, 3720), 'click.option', 'click.option', (['"""-p/-np"""', '"""--print/--no-print"""', '"""show"""'], {'default': '(True)'}), "('-p/-np', '--print/--no-print', 'show', default=True)\n", (3666, 3720), False, 'import click\n'), ((3196, 3227), 'pandas.DataFrame', 'pd.DataFrame', (['inside_trade_data'], {}), '(inside_trade_data)\n', (3208, 3227), True, 'import pandas as pd\n'), ((2118, 2130), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2128, 2130), False, 'from datetime import date\n'), ((2850, 2887), 'os.makedirs', 'os.makedirs', (['data_path'], {'exist_ok': '(True)'}), '(data_path, exist_ok=True)\n', (2861, 2887), False, 'import os\n'), ((3987, 4003), 'click.echo', 'click.echo', (['data'], {}), '(data)\n', (3997, 4003), False, 'import click\n'), ((3626, 3650), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (3636, 3650), False, 'import click\n'), ((2078, 2089), 'time.time', 'time.time', ([], {}), '()\n', (2087, 2089), False, 'import time\n'), ((3064, 3129), 'json.dump', 'json.dump', (['inside_trade_data', 'dataf'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(inside_trade_data, dataf, indent=4, ensure_ascii=False)\n', (3073, 3129), False, 'import json\n'), ((2791, 2806), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (2795, 2806), False, 'from pathlib import Path\n')]
|
import logging
from datetime import datetime
log = logging.getLogger(__name__)
class Timer(object):
def __init__(self, name, in_millis=True, init_message='',
log_warning_step_threshold=-1, log_warning_total_threshold=-1):
super(Timer, self).__init__()
self.name = name
self.init_time = datetime.utcnow()
self.prev_time = self.init_time
self.in_millis = in_millis
self.log_warning_step_threshold = log_warning_step_threshold
self.log_warning_total_threshold = log_warning_total_threshold
self.unit_symbol = 'ms' if in_millis else 's'
log.info('Started timer "{}" - {}'.format(self.name, init_message))
def next(self, msg):
current = datetime.utcnow()
delta_init = current - self.init_time
delta_prev = current - self.prev_time
log_level = logging.INFO
step_delta = self.__get_time_period(delta_prev)
if 0 < self.log_warning_step_threshold < step_delta:
log_level = logging.WARNING
total_delta = self.__get_time_period(delta_init)
if 0 < self.log_warning_total_threshold < total_delta:
log_level = logging.WARNING
log.log(log_level, 'Timer "{}" , step "{}" - total: {}{} | step: {}{}'.format(self.name, msg,
total_delta, self.unit_symbol,
step_delta, self.unit_symbol))
self.prev_time = current
def __get_time_period(self, delta):
if self.in_millis:
return self.__compute_millis(delta)
else:
return delta.seconds
@staticmethod
def __compute_millis(delta):
'''
:param delta:
:type delta: timedelta
:return: milliseconds
:rtype: int
'''
return delta.seconds * 1000 + delta.microseconds / 1000
|
[
"datetime.datetime.utcnow",
"logging.getLogger"
] |
[((53, 80), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (70, 80), False, 'import logging\n'), ((335, 352), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (350, 352), False, 'from datetime import datetime\n'), ((743, 760), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (758, 760), False, 'from datetime import datetime\n')]
|
import pytest
PREDICTION_TEXT = "This is a test prediction. If a finetune model predicts on this we may get an empty list as result."
@pytest.fixture(scope="module")
def model_group(indico):
results = indico.model_groups()
try:
return next(
result
for result in results
if not result["retrainRequired"]
and result["status"] == "COMPLETE"
and result.get_selected_model().get("id")
)
except StopIteration:
raise AssertionError(
"The authenticated user does not have a successfully trained model"
)
def test_model_group_predict(model_group):
result = model_group.predict([PREDICTION_TEXT])
# TODO: Break this test by task_type and have saved model groups for these tests. this will require a test user api token.
assert isinstance(result, list)
assert len(result) == 1
def test_model_group_info(model_group):
result = model_group.info()
assert isinstance(result, dict)
assert "class_counts" in result
assert "class_names" in result
assert "metrics" in result
def test_model_group_load(model_group):
"""
TODO: Ensure this test passes with Finetune model
"""
result = model_group.load()
assert result == "ready"
def test_model_group_predict_with_model_id(model_group):
model_id = model_group.get_selected_model().get("id")
result = model_group.predict([PREDICTION_TEXT], model_id=model_id)
assert isinstance(result, list)
assert len(result) == 1
|
[
"pytest.fixture"
] |
[((138, 168), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (152, 168), False, 'import pytest\n')]
|
import numpy as np
from .real_ffts import _RealFFTBasis
class LegendreBasis(_RealFFTBasis):
r"""
Discretize a continuous field into `deg` local states using a
Legendre polynomial basis such that,
.. math::
\frac{1}{\Delta x} \int_s m(h, x) dx =
\sum_0^{L-1} m[l, s] P_l(h)
where the :math:`P_l` are Legendre polynomials and the local state space
:math:`H` is mapped into the orthogonal domain of the Legendre polynomials
.. math::
-1 \le H \le 1
The mapping of :math:`H` into the domain is done automatically in PyMKS by
using the `domain` key work argument.
>>> n_states = 3
>>> X = np.array([[0.25, 0.1],
... [0.5, 0.25]])
>>> def P(x):
... x = 4 * x - 1
... polys = np.array((np.ones_like(x), x, (3.*x**2 - 1.) / 2.))
... tmp = (2. * np.arange(3)[:, None, None] + 1.) / 2. * polys
... return np.rollaxis(tmp, 0, 3)
>>> basis = LegendreBasis(n_states, [0., 0.5])
>>> assert(np.allclose(basis.discretize(X), P(X)))
If the microstructure local state values fall outside of the specified
domain they will no longer be mapped into the orthogonal domain of the
legendre polynomials.
>>> n_states = 2
>>> X = np.array([-1, 1])
>>> leg_basis = LegendreBasis(n_states, domain=[0, 1])
>>> leg_basis.discretize(X)
Traceback (most recent call last):
...
RuntimeError: X must be within the specified domain
"""
def discretize(self, X):
"""
Discretize `X`.
Args:
X (ND array): The microstructure, an `(n_samples, n_x, ...)`
shaped array where `n_samples` is the number of samples and
`n_x` is the spatial discretization.
Returns:
Float valued field of of Legendre polynomial coefficients.
>>> X = np.array([[-1, 1],
... [0, -1]])
>>> leg_basis = LegendreBasis(3, [-1, 1])
>>> def p(x):
... polys = np.array((np.ones_like(x), x, (3.*x**2 - 1.) / 2.))
... tmp = (2. * np.arange(3)[:, None, None] + 1.) / 2. * polys
... return np.rollaxis(tmp, 0, 3)
>>> assert(np.allclose(leg_basis.discretize(X), p(X)))
"""
self.check(X)
self._select_axes(X)
leg = np.polynomial.legendre
X_scaled = (2. * X - self.domain[0] - self.domain[1]) /\
(self.domain[1] - self.domain[0])
norm = (2. * np.array(self.n_states) + 1) / 2.
X_Legendre = (leg.legval(X_scaled, np.eye(len(self.n_states)) * norm))
return np.rollaxis(X_Legendre, 0, len(X_Legendre.shape))
|
[
"numpy.array"
] |
[((2494, 2517), 'numpy.array', 'np.array', (['self.n_states'], {}), '(self.n_states)\n', (2502, 2517), True, 'import numpy as np\n')]
|
"""
The point of this module is to run a series of tests on the bricklayer program passed,
so that the end user can be alerted if there are some issues happening
"""
import ast
import subprocess
import py_compile
import importlib
from bricklayer.utils.logger import Logger
from bricklayer.doctor.constants import HelpMessages
from bricklayer.doctor.ast_visitors import *
from difflib import get_close_matches
class Checker(object):
def check_program(self, program_name):
can_be_compiled = self.check_program_can_be_compiled(program_name)
if can_be_compiled:
self.check_program_is_using_appropriate_constructs(program_name)
self.check_program_isnt_mispelling_functions(program_name)
def check_program_can_be_compiled(self, program_name):
try:
py_compile.compile(program_name, doraise=True)
return True
except py_compile.PyCompileError as e:
if "IndentationError" in str(e):
Logger.debug(HelpMessages.BAD_INDENTATION)
else:
Logger.debug(e)
return False
def check_program_isnt_mispelling_functions(self, program_name):
user_defined_functions = self.collect_user_defined_functions(program_name)
available_functions = self.collect_available_functions(program_name)
called_functions = self.collect_called_functions(program_name)
for func in called_functions:
if func in user_defined_functions + available_functions:
continue
closest_matches = get_close_matches(func, user_defined_functions + available_functions)
Logger.debug(HelpMessages.INCORRECT_FUNCTION_NAME.format(func, closest_matches))
def check_program_is_using_appropriate_constructs(self, program_name):
pass
@staticmethod
def collect_user_defined_functions(program_name):
v = UserDefinedFunctionVisitor()
program_file = open(program_name, 'r')
code = program_file.read()
v.visit(ast.parse(code))
return v.functions
@staticmethod
def collect_available_functions(program_name):
v = ImportedModuleVisitor()
program_file = open(program_name, 'r')
code = program_file.read()
v.visit(ast.parse(code))
available_functions = dir(__builtins__)
for _import, _from in v.imports:
try:
if _from:
# It might be a function, constant, etc being imported, so we need
# to check
try:
available_functions += dir(importlib.import_module('.' + _import, package=_from))
except ImportError:
# Might be a function, constant, etct
m = importlib.import_module(_from)
f = getattr(m, _import)
if f is not None and hasattr(f, '__call__'):
available_functions.append(_import)
else:
available_functions += dir(importlib.import_module(_import))
except ImportError:
pass
return available_functions
@staticmethod
def collect_called_functions(program_name):
v = CalledFunctionVisitor()
program_file = open(program_name, 'r')
code = program_file.read()
v.visit(ast.parse(code))
return v.functions
|
[
"difflib.get_close_matches",
"importlib.import_module",
"py_compile.compile",
"bricklayer.utils.logger.Logger.debug",
"ast.parse",
"bricklayer.doctor.constants.HelpMessages.INCORRECT_FUNCTION_NAME.format"
] |
[((817, 863), 'py_compile.compile', 'py_compile.compile', (['program_name'], {'doraise': '(True)'}), '(program_name, doraise=True)\n', (835, 863), False, 'import py_compile\n'), ((1577, 1646), 'difflib.get_close_matches', 'get_close_matches', (['func', '(user_defined_functions + available_functions)'], {}), '(func, user_defined_functions + available_functions)\n', (1594, 1646), False, 'from difflib import get_close_matches\n'), ((2049, 2064), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (2058, 2064), False, 'import ast\n'), ((2297, 2312), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (2306, 2312), False, 'import ast\n'), ((3429, 3444), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (3438, 3444), False, 'import ast\n'), ((1672, 1738), 'bricklayer.doctor.constants.HelpMessages.INCORRECT_FUNCTION_NAME.format', 'HelpMessages.INCORRECT_FUNCTION_NAME.format', (['func', 'closest_matches'], {}), '(func, closest_matches)\n', (1715, 1738), False, 'from bricklayer.doctor.constants import HelpMessages\n'), ((996, 1038), 'bricklayer.utils.logger.Logger.debug', 'Logger.debug', (['HelpMessages.BAD_INDENTATION'], {}), '(HelpMessages.BAD_INDENTATION)\n', (1008, 1038), False, 'from bricklayer.utils.logger import Logger\n'), ((1073, 1088), 'bricklayer.utils.logger.Logger.debug', 'Logger.debug', (['e'], {}), '(e)\n', (1085, 1088), False, 'from bricklayer.utils.logger import Logger\n'), ((3106, 3138), 'importlib.import_module', 'importlib.import_module', (['_import'], {}), '(_import)\n', (3129, 3138), False, 'import importlib\n'), ((2640, 2693), 'importlib.import_module', 'importlib.import_module', (["('.' + _import)"], {'package': '_from'}), "('.' + _import, package=_from)\n", (2663, 2693), False, 'import importlib\n'), ((2825, 2855), 'importlib.import_module', 'importlib.import_module', (['_from'], {}), '(_from)\n', (2848, 2855), False, 'import importlib\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 10:58:30 2020
@author: <NAME>
"""
#%%importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%% read csv
dataset = pd.read_csv("svm_dataset.csv")
#%%
dataset.drop(["id","Unnamed: 32"],axis = 1, inplace = True)
# Malignant = 'M'
# Benign = 'B'
#%%
M = dataset[dataset.diagnosis == "M"]
B = dataset[dataset.diagnosis == "B"]
# Scatter Plot
plt.scatter(M.radius_mean,M.texture_mean,color = 'red', label = "Malignant", alpha = 0.3)
plt.scatter(B.radius_mean,B.texture_mean,color = 'green', label = "Benign", alpha = 0.3)
plt.xlabel("radius_mean")
plt.ylabel("texture_mean")
plt.legend()
plt.show()
#%%
dataset.diagnosis = [1 if each == "M" else 0 for each in dataset.diagnosis]
y = dataset.diagnosis.values
x_data = dataset.drop(["diagnosis"],axis = 1)
#%%
#Normalization
x = ((x_data - np.min(x_data)) / ((np.max(x_data)) - np.min(x_data)))
#%%
#Train-Test Split
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.3, random_state =1)
#%% SVM
from sklearn.svm import SVC
svm = SVC(random_state = 1)
svm.fit(x_train,y_train)
#%% Accuracy
print("Accuracy of SVM Algorithm :",svm.score(x_test,y_test))
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.model_selection.train_test_split",
"numpy.min",
"numpy.max",
"sklearn.svm.SVC",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((204, 234), 'pandas.read_csv', 'pd.read_csv', (['"""svm_dataset.csv"""'], {}), "('svm_dataset.csv')\n", (215, 234), True, 'import pandas as pd\n'), ((433, 522), 'matplotlib.pyplot.scatter', 'plt.scatter', (['M.radius_mean', 'M.texture_mean'], {'color': '"""red"""', 'label': '"""Malignant"""', 'alpha': '(0.3)'}), "(M.radius_mean, M.texture_mean, color='red', label='Malignant',\n alpha=0.3)\n", (444, 522), True, 'import matplotlib.pyplot as plt\n'), ((523, 611), 'matplotlib.pyplot.scatter', 'plt.scatter', (['B.radius_mean', 'B.texture_mean'], {'color': '"""green"""', 'label': '"""Benign"""', 'alpha': '(0.3)'}), "(B.radius_mean, B.texture_mean, color='green', label='Benign',\n alpha=0.3)\n", (534, 611), True, 'import matplotlib.pyplot as plt\n'), ((612, 637), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""radius_mean"""'], {}), "('radius_mean')\n", (622, 637), True, 'import matplotlib.pyplot as plt\n'), ((638, 664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""texture_mean"""'], {}), "('texture_mean')\n", (648, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (675, 677), True, 'import matplotlib.pyplot as plt\n'), ((678, 688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1096), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(x, y, test_size=0.3, random_state=1)\n', (1059, 1096), False, 'from sklearn.model_selection import train_test_split\n'), ((1142, 1161), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1145, 1161), False, 'from sklearn.svm import SVC\n'), ((880, 894), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (886, 894), True, 'import numpy as np\n'), ((900, 914), 'numpy.max', 'np.max', (['x_data'], {}), '(x_data)\n', (906, 914), True, 'import numpy as np\n'), ((918, 932), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (924, 932), True, 'import numpy as np\n')]
|
from Store import Store
import math
def main(function_name, request_id, runtime, input, output, to, keys):
store = Store(function_name, request_id, input, output, to, keys)
param = store.fetch(['x'])
x = int(param['x'])
res = {'sqrt_result': round(math.sqrt(x), 3)}
store.put(res, {})
|
[
"Store.Store",
"math.sqrt"
] |
[((124, 181), 'Store.Store', 'Store', (['function_name', 'request_id', 'input', 'output', 'to', 'keys'], {}), '(function_name, request_id, input, output, to, keys)\n', (129, 181), False, 'from Store import Store\n'), ((273, 285), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (282, 285), False, 'import math\n')]
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sunpy.image.util import to_norm, un_norm
def test_to_norm():
array_simple = np.array([10., 20., 30., 100.])
assert_allclose(to_norm(array_simple), np.array([0.1, 0.2, 0.3, 1.]))
array_simple_neg = np.array([-10., 0., 10., 90.])
assert_allclose(to_norm(array_simple_neg), np.array([0, 0.1, 0.2, 1.]))
def test_un_norm():
array_simple = np.array([10, 20, 30, 100.])
assert_allclose(un_norm(np.array([0.1, 0.2, 0.3, 1.]), array_simple), array_simple)
array_simple_neg = np.array([-10, 0, 10, 90])
assert_allclose(un_norm(np.array([0, 0.1, 0.2, 1.]), array_simple_neg), array_simple_neg)
|
[
"sunpy.image.util.to_norm",
"numpy.array"
] |
[((164, 199), 'numpy.array', 'np.array', (['[10.0, 20.0, 30.0, 100.0]'], {}), '([10.0, 20.0, 30.0, 100.0])\n', (172, 199), True, 'import numpy as np\n'), ((293, 327), 'numpy.array', 'np.array', (['[-10.0, 0.0, 10.0, 90.0]'], {}), '([-10.0, 0.0, 10.0, 90.0])\n', (301, 327), True, 'import numpy as np\n'), ((441, 470), 'numpy.array', 'np.array', (['[10, 20, 30, 100.0]'], {}), '([10, 20, 30, 100.0])\n', (449, 470), True, 'import numpy as np\n'), ((581, 607), 'numpy.array', 'np.array', (['[-10, 0, 10, 90]'], {}), '([-10, 0, 10, 90])\n', (589, 607), True, 'import numpy as np\n'), ((216, 237), 'sunpy.image.util.to_norm', 'to_norm', (['array_simple'], {}), '(array_simple)\n', (223, 237), False, 'from sunpy.image.util import to_norm, un_norm\n'), ((239, 269), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 1.0]'], {}), '([0.1, 0.2, 0.3, 1.0])\n', (247, 269), True, 'import numpy as np\n'), ((344, 369), 'sunpy.image.util.to_norm', 'to_norm', (['array_simple_neg'], {}), '(array_simple_neg)\n', (351, 369), False, 'from sunpy.image.util import to_norm, un_norm\n'), ((371, 399), 'numpy.array', 'np.array', (['[0, 0.1, 0.2, 1.0]'], {}), '([0, 0.1, 0.2, 1.0])\n', (379, 399), True, 'import numpy as np\n'), ((498, 528), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 1.0]'], {}), '([0.1, 0.2, 0.3, 1.0])\n', (506, 528), True, 'import numpy as np\n'), ((636, 664), 'numpy.array', 'np.array', (['[0, 0.1, 0.2, 1.0]'], {}), '([0, 0.1, 0.2, 1.0])\n', (644, 664), True, 'import numpy as np\n')]
|
import torch
from torch import Tensor
from torch import nn
from torch import functional as F
from typing import Union, Tuple, List, Iterable, Dict
import os,sys
import json
from .utils import fullname,import_from_string
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from log import logging
logger=logging.getLogger(__name__)
class MLP(nn.Module):
def __init__(self, in_features,
out_features,
bias = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None):
super(MLP,self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features):
'''
The output shape is like features.shape except last dim
'''
return self.linear(features)
def get_config_dict(self):
'''
一定要有dict,这样才能初始化Model
'''
return {'in_features': self.in_features, 'out_features': self.out_features, 'bias': self.bias, 'activation_function': fullname(self.activation_function)}
def save(self,output_path):
'''
同时保存dict
'''
os.makedirs(output_path,exist_ok=True)
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut)
torch.save(self.state_dict(),os.path.join(output_path, 'pytorch_model.bin'))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
config['activation_function'] = import_from_string(config['activation_function'])()
#上一行是因为激活函数是函数,不是整数,所以要给出位置
model = MLP(**config)
model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
return model
|
[
"torch.nn.Parameter",
"json.load",
"os.makedirs",
"log.logging.getLogger",
"torch.nn.Tanh",
"os.path.realpath",
"torch.nn.Linear",
"torch.device",
"os.path.join"
] |
[((331, 358), 'log.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (348, 358), False, 'from log import logging\n'), ((540, 549), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (547, 549), False, 'from torch import nn\n'), ((873, 920), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {'bias': 'bias'}), '(in_features, out_features, bias=bias)\n', (882, 920), False, 'from torch import nn\n'), ((1596, 1635), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (1607, 1635), False, 'import os, sys\n'), ((269, 295), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (285, 295), False, 'import os, sys\n'), ((991, 1016), 'torch.nn.Parameter', 'nn.Parameter', (['init_weight'], {}), '(init_weight)\n', (1003, 1016), False, 'from torch import nn\n'), ((1083, 1106), 'torch.nn.Parameter', 'nn.Parameter', (['init_bias'], {}), '(init_bias)\n', (1095, 1106), False, 'from torch import nn\n'), ((1799, 1845), 'os.path.join', 'os.path.join', (['output_path', '"""pytorch_model.bin"""'], {}), "(output_path, 'pytorch_model.bin')\n", (1811, 1845), False, 'import os, sys\n'), ((1984, 1998), 'json.load', 'json.load', (['fIn'], {}), '(fIn)\n', (1993, 1998), False, 'import json\n'), ((1653, 1693), 'os.path.join', 'os.path.join', (['output_path', '"""config.json"""'], {}), "(output_path, 'config.json')\n", (1665, 1693), False, 'import os, sys\n'), ((1914, 1953), 'os.path.join', 'os.path.join', (['input_path', '"""config.json"""'], {}), "(input_path, 'config.json')\n", (1926, 1953), False, 'import os, sys\n'), ((2199, 2244), 'os.path.join', 'os.path.join', (['input_path', '"""pytorch_model.bin"""'], {}), "(input_path, 'pytorch_model.bin')\n", (2211, 2244), False, 'import os, sys\n'), ((2259, 2278), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2271, 2278), False, 'import torch\n')]
|
#!/usr/bin/env python
"""
MetaWIBELE: summary_function_annotation module
Summary function annotations
Copyright (c) 2019 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import os.path
import re
import argparse
try:
from metawibele import config
from metawibele import utilities
except ImportError:
sys.exit("CRITICAL ERROR: Unable to find the MetaWIBELE python package." +
" Please check your install.")
# ---------------------------------------------------------------
# Description and arguments
# ---------------------------------------------------------------
description = """
Summary functional annotation
"""
def get_args ():
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-l', "--list",
help='input the list file of annotation files',
required=True)
parser.add_argument('-a', "--uniref-annotation",
help='input uniref90 annotation file',
required=True)
parser.add_argument('-b', "--basename",
help='specify the basename for output file',
default=None)
parser.add_argument('-c', "--cluster",
help='input the cluster file for protein families',
default=None)
parser.add_argument('-s', "--study",
help='specify the study name',
default=None)
parser.add_argument('-o', "--output",
help='output functional annotation file',
required=True)
values = parser.parse_args()
return values
# get_args
#==============================================================
# collect cluster info
#==============================================================
def collect_cluster_info (clust_file): # discovery_cohort.peptides.clust
cluster = {}
open_file = open(clust_file, "r")
myclust = ""
myclust_id = ""
for line in open_file.readlines():
line = line.strip()
if not len(line):
continue
if re.search("^>", line):
mym = re.search(">([^;]+)", line)
myclust = mym.group(1)
mym = re.search("cluster=([\d]+)", line)
myclust_id = "Cluster_" + mym.group(1)
if not myclust in cluster:
cluster[myclust] = {}
continue
mym = re.search("^([\S]+)", line)
myid = mym.group(1)
cluster[myclust][myid] = myclust_id
# foreach line
open_file.close()
return cluster
# function collect_cluster_info
#==============================================================
# collect UniRef annotation info
#==============================================================
def collect_uniref_info (clust_file): # summary_peptide_family_annotation.uniref90_annotation.tsv
note = {}
titles = {}
open_file = open(clust_file, "r")
line = open_file.readline()
line = line.strip()
info = line.split("\t")
id_flag = info[0]
for item in info:
titles[item] = info.index(item)
for line in open_file.readlines():
line = line.strip()
if not len(line):
continue
info = line.split("\t")
myid = info[0]
mynote = info[titles["note"]]
note[info[0]] = mynote
# foreach line
open_file.close()
return note, id_flag
# function collect_uniref_info
#==============================================================
# assign annotation to peptide families info
#==============================================================
def collect_annotation (list_file, id_flag, mybase):
annotation = {}
anns = {}
note = {}
open_list = open(list_file, "r")
for myfile in open_list.readlines():
myfile = myfile.strip()
if not len(myfile):
continue
if re.search("^#", myfile):
continue
if not os.path.isfile(myfile):
config.logger.info ("ERROR! File not exist!\t" + myfile)
continue
open_file = open(myfile, "r")
titles = {}
if re.search(mybase + "_([\S]+)_proteinfamilies", os.path.basename(myfile)):
mym = re.search(mybase + "_([\S]+)_proteinfamilies", os.path.basename(myfile))
method = mym.group(1)
else:
method = "metawibele"
for line in open_file.readlines():
line = line.strip()
if not len(line):
continue
info = line.split("\t")
if re.search("^" + id_flag, line):
for item in info:
titles[item] = info.index(item)
continue
myid = info[titles[id_flag]]
if "note" in titles:
note[myid] = info[titles["note"]]
if re.search("UniRef", method):
desc = info[titles["Protein_names"]]
tax = info[titles["Tax"]]
taxID = info[titles["TaxID"]]
reptax = info[titles["Rep_Tax"]]
reptaxID = info[titles["Rep_TaxID"]]
uniprot = info[titles["UniProtKB"]]
uniref = info[titles["unirefID"]]
if len(info) < 8:
config.logger.info ("WARNING!" + line)
mytype = info[1] + "\t" + info[2] + "\t" + desc + "\t" + tax + "\t" + taxID + "\t" + reptax + "\t" + reptaxID + "\t" + uniprot + "\t" + uniref
else:
mytype = info[1] + "\t" + info[2] + "\tNA\tNA\tNA\tNA\tNA\tNA\tNA"
if not myid in annotation:
annotation[myid] = {}
if not method in annotation[myid]:
annotation[myid][method] = []
annotation[myid][method].append(mytype)
if not myid in anns:
anns[myid] = {}
if not method in anns[myid]:
anns[myid][method] = {}
anns[myid][method][info[1]] = ""
# foreach line
open_file.close()
# foreach annotation file
open_list.close()
# collect clusters which have on decent homologies in UniRef90 or uncharacterized in UniRef90
annotation_non_uniref = {}
anns_uniref = {}
for myid in annotation.keys():
if "UniRef90" in anns[myid]:
flag = 0
if "UniRef90_unknown" in anns[myid]["UniRef90"] or "UniRef90_uncharacterized" in anns[myid]["UniRef90"]: # UniRef90 unannotated ones
flag = 1
if not myid in annotation_non_uniref:
annotation_non_uniref[myid] = {}
for method in annotation[myid]:
if not method in annotation_non_uniref[myid]:
annotation_non_uniref[myid][method] = []
for item in annotation[myid][method]:
annotation_non_uniref[myid][method].append(item)
# foreach method
# if unannotated in UniRef90
if flag == 0: # characterized by UniRef90
if not myid in anns_uniref:
anns_uniref[myid] = {}
for method in annotation[myid]:
if not method in anns_uniref[myid]:
anns_uniref[myid][method] = []
for item in annotation[myid][method]:
anns_uniref[myid][method].append(item)
# foreach method
# if have UniRef method
# foreach cluster
return annotation, anns_uniref, annotation_non_uniref, note
# collect_annotation
# assign_annotation
def assign_annotation (id_flag, pep_cluster, annotation, study, note1, note2, outfile):
outs = {}
anns = {}
number = {}
for pepid in sorted(pep_cluster.keys()): # foreach peptide family
flag = 0
for member in pep_cluster[pepid].keys():
if id_flag == utilities.PROTEIN_FAMILY_ID:
if pepid != member:
continue
myclust_id = pep_cluster[pepid][member]
if id_flag == utilities.PROTEIN_ID:
myclust_id = member
if not myclust_id in annotation: # no corresponding annotation
continue
if not myclust_id in outs:
outs[myclust_id] = {}
for method in sorted(annotation[myclust_id].keys()):
if not method in outs[myclust_id]:
outs[myclust_id][method] = {}
for myid in annotation[myclust_id][method]:
flag = 1
myinfo = myid.split("\t")
mytype = myinfo[0]
outs[myclust_id][method][myid] = ""
if not myclust_id in anns:
anns[myclust_id] = {}
anns[myclust_id][mytype] = ""
# foreach type
# foreach member
# foreach peptide cluster
interest = ["cellWall", "outerMembrane", "extracellular", "signaling", "transmembrane", "interaction", "PfamDomain", "COG", "KEGG-KOs", "GO"]
other = ["cytoplasmic", "cytoplasmicMembrane", "periplasmic",
"cellSurface", "cytoplasm", "membrane", "periplasm", "nucleus", "fimbrium", "virion", "sporeCore", "mitochondrion", "cellEnvelop", "cellMembrane", "cellInnerMembrane", "bacterialFlagellum",
"others", "Others"]
unknown = ["unknown", "hypothetical"]
uncharacterized = ["uncharacterized"]
interpro = ["SUPERFAMILY", "ProSitePatterns", "ProSiteProfiles", "Gene3D", "PANTHER", "TIGRFAM", "SFLD", "ProDom", "Hamap", "SMART", "CDD", "PRINTS", "PIRSF", "MobiDBLite", "Coils"]
total_num = 0
for myclust in anns.keys():
total_num = total_num + 1
interest_flag = 0
other_flag = 0
unchar_flag = 0
pfam_flag = 0
interpro_flag = 0
for mytype in anns[myclust].keys():
if not re.search("_", mytype):
# debug
config.logger.info (mytype)
continue
mym, category = mytype.split("_")
category = re.sub("GO\(BP\)", "GO", category)
category = re.sub("GO\(CC\)", "GO", category)
category = re.sub("GO\(MF\)", "GO", category)
if category in interest:
interest_flag = 1
if not category in number:
number[category] = {}
number[category][myclust] = ""
if category in other or category in interpro:
other_flag = 1
if category in uncharacterized:
unchar_flag = 1
#if category in interpro:
# interpro_flag = 1
# foreach type of annotation
if interest_flag == 0 and other_flag == 1: # other type annotation
category = "Others"
if not category in number:
number[category] = {}
number[category][myclust] = ""
if interest_flag == 0 and other_flag == 0 and unchar_flag == 1: # uncharacterized type annotation
category = "uncharacterized"
if not category in number:
number[category] = {}
number[category][myclust] = ""
if interest_flag == 0 and other_flag == 0 and unchar_flag == 0: # unknown type annotation
category = "Unknown"
if not category in number:
number[category] = {}
number[category][myclust] = ""
# foreach cluster
open_out = open(outfile, "w")
open_out.write(id_flag + "\tstudy\tmethod\tcategory\ttype\tdetail\tProtein_names\tTax\tTaxID\tRep_Tax\tRep_TaxID\tUniProtKB\tunirefID\tnote\n")
for myclust in sorted(outs.keys()):
note = {}
if myclust in note1:
note[note1[myclust]] = ""
if myclust in note2:
note[note2[myclust]] = ""
mynote = "good"
if len(note.keys()) > 0:
mynote = ";".join(sorted(note.keys()))
#if len(note.keys()) > 1:
# if "good" in note:
# note.pop("good")
# mynote = ";".join(sorted(note.keys()))
for method in sorted(outs[myclust].keys()):
for myid in sorted(outs[myclust][method].keys()):
myinfo = myid.split("\t")
mytype = myinfo[0]
if not re.search("_", mytype):
# debug
config.logger.info (mytype)
continue
mym, category = mytype.split("_")
if category in unknown:
category = "Unknown"
if category in other:
category = "Others"
open_out.write(myclust + "\t" + study + "\t" + method + "\t" + category + "\t" + myid + "\t" + mynote + "\n")
# foreach type
# foreach method
# foreach peptide family
open_out.close()
# assign_annotation
#==============================================================
########### Main processing ############
#==============================================================
def main():
### get arguments ###
values = get_args ()
myfamily = config.protein_family
mybase = config.basename
mystudy = config.study
if values.cluster:
myfamily = values.cluster
if values.basename:
mybase = values.basename
if values.study:
mystudy = values.study
config.logger.info ("### Start summary_function_annotation step ####")
### collect cluster info ###
config.logger.info ("Get cluster info ......starting")
pep_cluster = collect_cluster_info (myfamily)
config.logger.info ("Get cluster info ......done")
### collect annotation info ###
config.logger.info ("Get annotation info ......starting")
note1, id_flag = collect_uniref_info (values.uniref_annotation)
annotation, anns_uniref, anns_non_uniref, note2 = collect_annotation (values.list, id_flag, mybase)
config.logger.info ("Get annotation info ......done")
### assign annotation to peptide families ###
config.logger.info ("Assign annotation......starting")
assign_annotation (id_flag, pep_cluster, annotation, mystudy, note1, note2, values.output)
uniref_out = re.sub(".tsv", ".uniref.tsv", values.output)
assign_annotation (id_flag, pep_cluster, anns_uniref, mystudy, note1, note2, uniref_out)
uniref_non = re.sub(".tsv", ".non_uniref.tsv", values.output)
assign_annotation (id_flag, pep_cluster, anns_non_uniref, mystudy, note1, note2, uniref_non)
config.logger.info ("Assign annotation......done")
config.logger.info ("### Finish summary_function_annotation step ####")
# end: main
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"os.path.basename",
"metawibele.config.logger.info",
"os.path.isfile",
"re.sub",
"re.search",
"sys.exit"
] |
[((1696, 1744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (1719, 1744), False, 'import argparse\n'), ((12309, 12378), 'metawibele.config.logger.info', 'config.logger.info', (['"""### Start summary_function_annotation step ####"""'], {}), "('### Start summary_function_annotation step ####')\n", (12327, 12378), False, 'from metawibele import config\n'), ((12414, 12467), 'metawibele.config.logger.info', 'config.logger.info', (['"""Get cluster info ......starting"""'], {}), "('Get cluster info ......starting')\n", (12432, 12467), False, 'from metawibele import config\n'), ((12517, 12566), 'metawibele.config.logger.info', 'config.logger.info', (['"""Get cluster info ......done"""'], {}), "('Get cluster info ......done')\n", (12535, 12566), False, 'from metawibele import config\n'), ((12604, 12660), 'metawibele.config.logger.info', 'config.logger.info', (['"""Get annotation info ......starting"""'], {}), "('Get annotation info ......starting')\n", (12622, 12660), False, 'from metawibele import config\n'), ((12829, 12881), 'metawibele.config.logger.info', 'config.logger.info', (['"""Get annotation info ......done"""'], {}), "('Get annotation info ......done')\n", (12847, 12881), False, 'from metawibele import config\n'), ((12932, 12985), 'metawibele.config.logger.info', 'config.logger.info', (['"""Assign annotation......starting"""'], {}), "('Assign annotation......starting')\n", (12950, 12985), False, 'from metawibele import config\n'), ((13093, 13137), 're.sub', 're.sub', (['""".tsv"""', '""".uniref.tsv"""', 'values.output'], {}), "('.tsv', '.uniref.tsv', values.output)\n", (13099, 13137), False, 'import re\n'), ((13242, 13290), 're.sub', 're.sub', (['""".tsv"""', '""".non_uniref.tsv"""', 'values.output'], {}), "('.tsv', '.non_uniref.tsv', values.output)\n", (13248, 13290), False, 'import re\n'), ((13386, 13435), 'metawibele.config.logger.info', 'config.logger.info', (['"""Assign annotation......done"""'], {}), "('Assign annotation......done')\n", (13404, 13435), False, 'from metawibele import config\n'), ((13439, 13509), 'metawibele.config.logger.info', 'config.logger.info', (['"""### Finish summary_function_annotation step ####"""'], {}), "('### Finish summary_function_annotation step ####')\n", (13457, 13509), False, 'from metawibele import config\n'), ((1338, 1447), 'sys.exit', 'sys.exit', (["('CRITICAL ERROR: Unable to find the MetaWIBELE python package.' +\n ' Please check your install.')"], {}), "('CRITICAL ERROR: Unable to find the MetaWIBELE python package.' +\n ' Please check your install.')\n", (1346, 1447), False, 'import sys\n'), ((3043, 3064), 're.search', 're.search', (['"""^>"""', 'line'], {}), "('^>', line)\n", (3052, 3064), False, 'import re\n'), ((3291, 3319), 're.search', 're.search', (['"""^([\\\\S]+)"""', 'line'], {}), "('^([\\\\S]+)', line)\n", (3300, 3319), False, 'import re\n'), ((4613, 4636), 're.search', 're.search', (['"""^#"""', 'myfile'], {}), "('^#', myfile)\n", (4622, 4636), False, 'import re\n'), ((3075, 3102), 're.search', 're.search', (['""">([^;]+)"""', 'line'], {}), "('>([^;]+)', line)\n", (3084, 3102), False, 'import re\n'), ((3138, 3173), 're.search', 're.search', (['"""cluster=([\\\\d]+)"""', 'line'], {}), "('cluster=([\\\\d]+)', line)\n", (3147, 3173), False, 'import re\n'), ((4659, 4681), 'os.path.isfile', 'os.path.isfile', (['myfile'], {}), '(myfile)\n', (4673, 4681), False, 'import os\n'), ((4686, 4741), 'metawibele.config.logger.info', 'config.logger.info', (["('ERROR! File not exist!\\t' + myfile)"], {}), "('ERROR! File not exist!\\t' + myfile)\n", (4704, 4741), False, 'from metawibele import config\n'), ((4853, 4877), 'os.path.basename', 'os.path.basename', (['myfile'], {}), '(myfile)\n', (4869, 4877), False, 'import os\n'), ((5148, 5178), 're.search', 're.search', (["('^' + id_flag)", 'line'], {}), "('^' + id_flag, line)\n", (5157, 5178), False, 'import re\n'), ((5352, 5379), 're.search', 're.search', (['"""UniRef"""', 'method'], {}), "('UniRef', method)\n", (5361, 5379), False, 'import re\n'), ((9596, 9632), 're.sub', 're.sub', (['"""GO\\\\(BP\\\\)"""', '"""GO"""', 'category'], {}), "('GO\\\\(BP\\\\)', 'GO', category)\n", (9602, 9632), False, 'import re\n'), ((9645, 9681), 're.sub', 're.sub', (['"""GO\\\\(CC\\\\)"""', '"""GO"""', 'category'], {}), "('GO\\\\(CC\\\\)', 'GO', category)\n", (9651, 9681), False, 'import re\n'), ((9694, 9730), 're.sub', 're.sub', (['"""GO\\\\(MF\\\\)"""', '"""GO"""', 'category'], {}), "('GO\\\\(MF\\\\)', 'GO', category)\n", (9700, 9730), False, 'import re\n'), ((4937, 4961), 'os.path.basename', 'os.path.basename', (['myfile'], {}), '(myfile)\n', (4953, 4961), False, 'import os\n'), ((9464, 9486), 're.search', 're.search', (['"""_"""', 'mytype'], {}), "('_', mytype)\n", (9473, 9486), False, 'import re\n'), ((9504, 9530), 'metawibele.config.logger.info', 'config.logger.info', (['mytype'], {}), '(mytype)\n', (9522, 9530), False, 'from metawibele import config\n'), ((5669, 5706), 'metawibele.config.logger.info', 'config.logger.info', (["('WARNING!' + line)"], {}), "('WARNING!' + line)\n", (5687, 5706), False, 'from metawibele import config\n'), ((11411, 11433), 're.search', 're.search', (['"""_"""', 'mytype'], {}), "('_', mytype)\n", (11420, 11433), False, 'import re\n'), ((11453, 11479), 'metawibele.config.logger.info', 'config.logger.info', (['mytype'], {}), '(mytype)\n', (11471, 11479), False, 'from metawibele import config\n')]
|
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns. Because of the complexity of estimating covariance matrices
(and the importance of efficient computations), this module mostly provides a convenient
wrapper around the underrated `sklearn.covariance` module.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- sample covariance
- semicovariance
- exponentially weighted covariance
- mininum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
import warnings
import numpy as np
import pandas as pd
import sklearn.covariance
from .expected_returns import returns_from_prices
def sample_cov(prices, frequency=252):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
return daily_returns.cov() * frequency
def semicovariance(prices, benchmark=0.000079, frequency=252):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
drops = np.fmin(daily_returns - benchmark, 0)
return drops.cov() * frequency
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean()[-1]
def exp_cov(prices, span=180, frequency=252):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
daily_returns = returns_from_prices(prices)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
daily_returns.iloc[:, i], daily_returns.iloc[:, j], span
)
return pd.DataFrame(S * frequency, columns=assets, index=assets)
def min_cov_determinant(prices, frequency=252, random_state=None):
"""
Calculate the minimum covariance determinant, an estimator of the covariance matrix
that is more robust to noise.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param random_state: random seed to make results reproducible, defaults to None
:type random_state: int, optional
:return: annualised estimate of covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
X = prices.pct_change().dropna(how="all")
X = np.nan_to_num(X.values)
raw_cov_array = sklearn.covariance.fast_mcd(X, random_state=random_state)[1]
return pd.DataFrame(raw_cov_array, index=assets, columns=assets) * frequency
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
cov_matrix = pd.DataFrame(cov_matrix)
Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))
return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)
class CovarianceShrinkage:
"""
Provide methods for computing shrinkage estimates of the covariance matrix, using the
sample covariance matrix and choosing the structured estimator to be an identity matrix
multiplied by the average sample variance. The shrinkage constant can be input manually,
though there exist methods (notably Ledoit Wolf) to estimate the optimal value.
Instance variables:
- ``X`` (returns)
- ``S`` (sample covariance matrix)
- ``delta`` (shrinkage constant)
"""
def __init__(self, prices, frequency=252):
"""
:param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)
:type frequency: int, optional
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
self.frequency = frequency
self.X = prices.pct_change().dropna(how="all")
self.S = self.X.cov().values
self.delta = None # shrinkage constant
def format_and_annualise(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
return (
pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency
)
def shrunk_covariance(self, delta=0.2):
"""
Shrink a sample covariance matrix to the identity matrix (scaled by the average
sample variance). This method does not estimate an optimal shrinkage parameter,
it requires manual input.
:param delta: shrinkage parameter, defaults to 0.2.
:type delta: float, optional
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
self.delta = delta
N = self.S.shape[1]
# Shrinkage target
mu = np.trace(self.S) / N
F = np.identity(N) * mu
# Shrinkage
shrunk_cov = delta * F + (1 - delta) * self.S
return self.format_and_annualise(shrunk_cov)
def ledoit_wolf(self, shrinkage_target="constant_variance"):
"""
Calculate the Ledoit-Wolf shrinkage estimate for a particular
shrinkage target.
:param shrinkage_target: choice of shrinkage target, either ``constant_variance``,
``single_factor`` or ``constant_correlation``. Defaults to
``constant_variance``.
:type shrinkage_target: str, optional
:raises NotImplementedError: if the shrinkage_target is unrecognised
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
if shrinkage_target == "constant_variance":
X = np.nan_to_num(self.X.values)
shrunk_cov, self.delta = sklearn.covariance.ledoit_wolf(X)
elif shrinkage_target == "single_factor":
shrunk_cov, self.delta = self._ledoit_wolf_single_factor()
elif shrinkage_target == "constant_correlation":
shrunk_cov, self.delta = self._ledoit_wolf_constant_correlation()
else:
raise NotImplementedError
return self.format_and_annualise(shrunk_cov)
def _ledoit_wolf_single_factor(self):
"""
Helper method to calculate the Ledoit-Wolf shrinkage estimate
with the Sharpe single-factor matrix as the shrinkage target.
See Ledoit and Wolf (2001).
:return: shrunk sample covariance matrix, shrinkage constant
:rtype: np.ndarray, float
"""
X = np.nan_to_num(self.X.values)
# De-mean returns
t, n = np.shape(X)
Xm = X - X.mean(axis=0)
xmkt = X.mean(axis=1).reshape(t, 1)
# compute sample covariance matrix
sample = np.cov(np.append(Xm, xmkt, axis=1), rowvar=False) * (t - 1) / t
betas = sample[0:n, n].reshape(n, 1)
varmkt = sample[n, n]
sample = sample[:n, :n]
F = np.dot(betas, betas.T) / varmkt
F[np.eye(n) == 1] = np.diag(sample)
# compute shrinkage parameters
c = np.linalg.norm(sample - F, "fro") ** 2
y = Xm ** 2
p = 1 / t * np.sum(np.dot(y.T, y)) - np.sum(sample ** 2)
# r is divided into diagonal
# and off-diagonal terms, and the off-diagonal term
# is itself divided into smaller terms
rdiag = 1 / t * np.sum(y ** 2) - sum(np.diag(sample) ** 2)
z = Xm * np.tile(xmkt, (n,))
v1 = 1 / t * np.dot(y.T, z) - np.tile(betas, (n,)) * sample
roff1 = (
np.sum(v1 * np.tile(betas, (n,)).T) / varmkt
- np.sum(np.diag(v1) * betas.T) / varmkt
)
v3 = 1 / t * np.dot(z.T, z) - varmkt * sample
roff3 = (
np.sum(v3 * np.dot(betas, betas.T)) / varmkt ** 2
- np.sum(np.diag(v3).reshape(-1, 1) * betas ** 2) / varmkt ** 2
)
roff = 2 * roff1 - roff3
r = rdiag + roff
# compute shrinkage constant
k = (p - r) / c
delta = max(0, min(1, k / t))
# compute the estimator
shrunk_cov = delta * F + (1 - delta) * sample
return shrunk_cov, delta
def _ledoit_wolf_constant_correlation(self):
"""
Helper method to calculate the Ledoit-Wolf shrinkage estimate
with the constant correlation matrix as the shrinkage target.
See Ledoit and Wolf (2003)
:return: shrunk sample covariance matrix, shrinkage constant
:rtype: np.ndarray, float
"""
X = np.nan_to_num(self.X.values)
t, n = np.shape(X)
S = self.S # sample cov matrix
# Constant correlation target
var = np.diag(S).reshape(-1, 1)
std = np.sqrt(var)
_var = np.tile(var, (n,))
_std = np.tile(std, (n,))
r_bar = (np.sum(S / (_std * _std.T)) - n) / (n * (n - 1))
F = r_bar * (_std * _std.T)
F[np.eye(n) == 1] = var.reshape(-1)
# Estimate pi
Xm = X - X.mean(axis=0)
y = Xm ** 2
pi_mat = np.dot(y.T, y) / t - 2 * np.dot(Xm.T, Xm) * S / t + S ** 2
pi_hat = np.sum(pi_mat)
# Theta matrix, expanded term by term
term1 = np.dot((X ** 3).T, X) / t
help_ = np.dot(X.T, X) / t
help_diag = np.diag(help_)
term2 = np.tile(help_diag, (n, 1)).T * S
term3 = help_ * _var
term4 = _var * S
theta_mat = term1 - term2 - term3 + term4
theta_mat[np.eye(n) == 1] = np.zeros(n)
rho_hat = sum(np.diag(pi_mat)) + r_bar * np.sum(
np.dot((1 / std), std.T) * theta_mat
)
# Estimate gamma
gamma_hat = np.linalg.norm(S - F, "fro") ** 2
# Compute shrinkage constant
kappa_hat = (pi_hat - rho_hat) / gamma_hat
delta = max(0.0, min(1.0, kappa_hat / t))
# Compute shrunk covariance matrix
shrunk_cov = delta * F + (1 - delta) * S
return shrunk_cov, delta
def oracle_approximating(self):
"""
Calculate the Oracle Approximating Shrinkage estimate
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
X = np.nan_to_num(self.X.values)
shrunk_cov, self.delta = sklearn.covariance.oas(X)
return self.format_and_annualise(shrunk_cov)
|
[
"pandas.DataFrame",
"numpy.fmin",
"numpy.trace",
"numpy.sum",
"numpy.nan_to_num",
"numpy.eye",
"numpy.zeros",
"numpy.identity",
"numpy.shape",
"numpy.append",
"numpy.linalg.norm",
"numpy.tile",
"numpy.dot",
"warnings.warn",
"numpy.diag",
"numpy.sqrt"
] |
[((2751, 2788), 'numpy.fmin', 'np.fmin', (['(daily_returns - benchmark)', '(0)'], {}), '(daily_returns - benchmark, 0)\n', (2758, 2788), True, 'import numpy as np\n'), ((4556, 4572), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4564, 4572), True, 'import numpy as np\n'), ((4771, 4828), 'pandas.DataFrame', 'pd.DataFrame', (['(S * frequency)'], {'columns': 'assets', 'index': 'assets'}), '(S * frequency, columns=assets, index=assets)\n', (4783, 4828), True, 'import pandas as pd\n'), ((5800, 5823), 'numpy.nan_to_num', 'np.nan_to_num', (['X.values'], {}), '(X.values)\n', (5813, 5823), True, 'import numpy as np\n'), ((6505, 6573), 'pandas.DataFrame', 'pd.DataFrame', (['corr'], {'index': 'cov_matrix.index', 'columns': 'cov_matrix.index'}), '(corr, index=cov_matrix.index, columns=cov_matrix.index)\n', (6517, 6573), True, 'import pandas as pd\n'), ((1415, 1477), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (1428, 1477), False, 'import warnings\n'), ((1495, 1515), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (1507, 1515), True, 'import pandas as pd\n'), ((2590, 2652), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (2603, 2652), False, 'import warnings\n'), ((2670, 2690), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (2682, 2690), True, 'import pandas as pd\n'), ((3409, 3477), 'warnings.warn', 'warnings.warn', (['"""it is recommended to use a higher span, e.g 30 days"""'], {}), "('it is recommended to use a higher span, e.g 30 days')\n", (3422, 3477), False, 'import warnings\n'), ((4284, 4346), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (4297, 4346), False, 'import warnings\n'), ((4364, 4384), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (4376, 4384), True, 'import pandas as pd\n'), ((5617, 5679), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (5630, 5679), False, 'import warnings\n'), ((5697, 5717), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (5709, 5717), True, 'import pandas as pd\n'), ((5916, 5973), 'pandas.DataFrame', 'pd.DataFrame', (['raw_cov_array'], {'index': 'assets', 'columns': 'assets'}), '(raw_cov_array, index=assets, columns=assets)\n', (5928, 5973), True, 'import pandas as pd\n'), ((6281, 6343), 'warnings.warn', 'warnings.warn', (['"""cov_matrix is not a dataframe"""', 'RuntimeWarning'], {}), "('cov_matrix is not a dataframe', RuntimeWarning)\n", (6294, 6343), False, 'import warnings\n'), ((6365, 6389), 'pandas.DataFrame', 'pd.DataFrame', (['cov_matrix'], {}), '(cov_matrix)\n', (6377, 6389), True, 'import pandas as pd\n'), ((6468, 6492), 'numpy.dot', 'np.dot', (['cov_matrix', 'Dinv'], {}), '(cov_matrix, Dinv)\n', (6474, 6492), True, 'import numpy as np\n'), ((10590, 10618), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (10603, 10618), True, 'import numpy as np\n'), ((10661, 10672), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (10669, 10672), True, 'import numpy as np\n'), ((11053, 11068), 'numpy.diag', 'np.diag', (['sample'], {}), '(sample)\n', (11060, 11068), True, 'import numpy as np\n'), ((12563, 12591), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (12576, 12591), True, 'import numpy as np\n'), ((12607, 12618), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12615, 12618), True, 'import numpy as np\n'), ((12753, 12765), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (12760, 12765), True, 'import numpy as np\n'), ((12781, 12799), 'numpy.tile', 'np.tile', (['var', '(n,)'], {}), '(var, (n,))\n', (12788, 12799), True, 'import numpy as np\n'), ((12815, 12833), 'numpy.tile', 'np.tile', (['std', '(n,)'], {}), '(std, (n,))\n', (12822, 12833), True, 'import numpy as np\n'), ((13148, 13162), 'numpy.sum', 'np.sum', (['pi_mat'], {}), '(pi_mat)\n', (13154, 13162), True, 'import numpy as np\n'), ((13307, 13321), 'numpy.diag', 'np.diag', (['help_'], {}), '(help_)\n', (13314, 13321), True, 'import numpy as np\n'), ((13511, 13522), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13519, 13522), True, 'import numpy as np\n'), ((14196, 14224), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (14209, 14224), True, 'import numpy as np\n'), ((7536, 7598), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (7549, 7598), False, 'import warnings\n'), ((7620, 7640), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (7632, 7640), True, 'import pandas as pd\n'), ((8263, 8320), 'pandas.DataFrame', 'pd.DataFrame', (['raw_cov_array'], {'index': 'assets', 'columns': 'assets'}), '(raw_cov_array, index=assets, columns=assets)\n', (8275, 8320), True, 'import pandas as pd\n'), ((8896, 8912), 'numpy.trace', 'np.trace', (['self.S'], {}), '(self.S)\n', (8904, 8912), True, 'import numpy as np\n'), ((8929, 8943), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (8940, 8943), True, 'import numpy as np\n'), ((9769, 9797), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (9782, 9797), True, 'import numpy as np\n'), ((10993, 11015), 'numpy.dot', 'np.dot', (['betas', 'betas.T'], {}), '(betas, betas.T)\n', (10999, 11015), True, 'import numpy as np\n'), ((11121, 11154), 'numpy.linalg.norm', 'np.linalg.norm', (['(sample - F)', '"""fro"""'], {}), "(sample - F, 'fro')\n", (11135, 11154), True, 'import numpy as np\n'), ((11225, 11244), 'numpy.sum', 'np.sum', (['(sample ** 2)'], {}), '(sample ** 2)\n', (11231, 11244), True, 'import numpy as np\n'), ((11474, 11493), 'numpy.tile', 'np.tile', (['xmkt', '(n,)'], {}), '(xmkt, (n,))\n', (11481, 11493), True, 'import numpy as np\n'), ((13226, 13247), 'numpy.dot', 'np.dot', (['(X ** 3).T', 'X'], {}), '((X ** 3).T, X)\n', (13232, 13247), True, 'import numpy as np\n'), ((13268, 13282), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (13274, 13282), True, 'import numpy as np\n'), ((13685, 13713), 'numpy.linalg.norm', 'np.linalg.norm', (['(S - F)', '"""fro"""'], {}), "(S - F, 'fro')\n", (13699, 13713), True, 'import numpy as np\n'), ((6422, 6441), 'numpy.diag', 'np.diag', (['cov_matrix'], {}), '(cov_matrix)\n', (6429, 6441), True, 'import numpy as np\n'), ((11035, 11044), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (11041, 11044), True, 'import numpy as np\n'), ((11414, 11428), 'numpy.sum', 'np.sum', (['(y ** 2)'], {}), '(y ** 2)\n', (11420, 11428), True, 'import numpy as np\n'), ((11515, 11529), 'numpy.dot', 'np.dot', (['y.T', 'z'], {}), '(y.T, z)\n', (11521, 11529), True, 'import numpy as np\n'), ((11532, 11552), 'numpy.tile', 'np.tile', (['betas', '(n,)'], {}), '(betas, (n,))\n', (11539, 11552), True, 'import numpy as np\n'), ((11721, 11735), 'numpy.dot', 'np.dot', (['z.T', 'z'], {}), '(z.T, z)\n', (11727, 11735), True, 'import numpy as np\n'), ((12713, 12723), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (12720, 12723), True, 'import numpy as np\n'), ((12851, 12878), 'numpy.sum', 'np.sum', (['(S / (_std * _std.T))'], {}), '(S / (_std * _std.T))\n', (12857, 12878), True, 'import numpy as np\n'), ((12946, 12955), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (12952, 12955), True, 'import numpy as np\n'), ((13338, 13364), 'numpy.tile', 'np.tile', (['help_diag', '(n, 1)'], {}), '(help_diag, (n, 1))\n', (13345, 13364), True, 'import numpy as np\n'), ((13493, 13502), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (13499, 13502), True, 'import numpy as np\n'), ((13545, 13560), 'numpy.diag', 'np.diag', (['pi_mat'], {}), '(pi_mat)\n', (13552, 13560), True, 'import numpy as np\n'), ((10817, 10844), 'numpy.append', 'np.append', (['Xm', 'xmkt'], {'axis': '(1)'}), '(Xm, xmkt, axis=1)\n', (10826, 10844), True, 'import numpy as np\n'), ((11207, 11221), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (11213, 11221), True, 'import numpy as np\n'), ((11435, 11450), 'numpy.diag', 'np.diag', (['sample'], {}), '(sample)\n', (11442, 11450), True, 'import numpy as np\n'), ((13072, 13086), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (13078, 13086), True, 'import numpy as np\n'), ((11658, 11669), 'numpy.diag', 'np.diag', (['v1'], {}), '(v1)\n', (11665, 11669), True, 'import numpy as np\n'), ((11796, 11818), 'numpy.dot', 'np.dot', (['betas', 'betas.T'], {}), '(betas, betas.T)\n', (11802, 11818), True, 'import numpy as np\n'), ((13592, 13614), 'numpy.dot', 'np.dot', (['(1 / std)', 'std.T'], {}), '(1 / std, std.T)\n', (13598, 13614), True, 'import numpy as np\n'), ((11604, 11624), 'numpy.tile', 'np.tile', (['betas', '(n,)'], {}), '(betas, (n,))\n', (11611, 11624), True, 'import numpy as np\n'), ((13097, 13113), 'numpy.dot', 'np.dot', (['Xm.T', 'Xm'], {}), '(Xm.T, Xm)\n', (13103, 13113), True, 'import numpy as np\n'), ((11855, 11866), 'numpy.diag', 'np.diag', (['v3'], {}), '(v3)\n', (11862, 11866), True, 'import numpy as np\n')]
|
import pygame, os, sys
from Game import *
from pygame.locals import *
pygame.init()
class MainMenu:
def __init__(self):
self.main()
def main(self):
class Button:
def create_button(self, surface, color, x, y, length, height, width, text, text_color):
surface = self.draw_button(surface, color, length, height, x, y, width)
surface = self.write_text(surface, text, text_color, length, height, x, y)
self.rect = pygame.Rect(x, y, length, height)
return surface
def write_text(self, surface, text, text_color, length, height, x, y):
font_size = int(length // len(text))
myFont = pygame.font.Font('Data/Allods.ttf', font_size)
myText = myFont.render(text, 1, text_color)
surface.blit(myText,
((x + length / 2) - myText.get_width() / 2, (y + height / 2) - myText.get_height() / 2))
return surface
def draw_button(self, surface, color, length, height, x, y, width):
for i in range(1, 10):
s = pygame.Surface((length + (i * 2), height + (i * 2)))
s.fill(color)
alpha = (255 / (i + 2))
if alpha <= 0:
alpha = 1
s.set_alpha(alpha)
pygame.draw.rect(s, color, (x - i, y - i, length + i, height + i), width)
surface.blit(s, (x - i, y - i))
pygame.draw.rect(surface, color, (x, y, length, height), 0)
pygame.draw.rect(surface, (190, 190, 190), (x, y, length, height), 1)
return surface
def pressed(self, mouse):
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else:
return False
else:
return False
else:
return False
else:
return False
class Button_Game:
def __init__(self):
self.main()
# Создаем экран
def display(self):
self.screen = pygame.display.set_mode((600, 530))
pygame.display.set_caption('Выбор пути')
# Фоновое изображение + варианты меню
def update_display(self):
image = pygame.image.load('Data/mainmenu.jpg')
self.screen.blit(image, (0,0))
self.screen.blit(pygame.image.load('Data/row.png'), (30, 5))
BigFont = pygame.font.Font('Data/Allods.ttf', 43)
c = (255,255,255)
self.screen.blit(BigFont.render('А В ПОМОЩЬ ТЕБЕ', 0, (252, 124, 71)), (125, 37))
SmallFront = pygame.font.Font('Data/Allods.ttf', 28)
self.screen.blit(SmallFront.render('Советы мои да пожелания', 0, c), (115, 107))
self.screen.blit(SmallFront.render('дракона сразить 10 раз', 0, c),(145, 132))
self.screen.blit(SmallFront.render('да самому не пропасть.', 0, c), (135, 157))
self.screen.blit(SmallFront.render('Да смотри принцессу свою', 0, c), (95, 202))
self.screen.blit(SmallFront.render('возлюбленную береги от меча своего.', 0, c), (35, 227))
self.screen.blit(SmallFront.render('Не дай дракону огнедышащему', 0, c), (90, 272))
self.screen.blit(SmallFront.render('на глазах у возлюбленной', 0, c), (120, 297))
self.screen.blit(SmallFront.render('сжечь 5 раз тебя', 0, c), (170, 322))
self.screen.blit(SmallFront.render('потомкам на посмешище!', 0, c), (105, 347))
# Parameters: surface, color, x, y, length, height, width, text, text_color
self.Button1.create_button(self.screen, (107, 142, 255), 140, 420, 300, 50, 0, 'Спасти принцессу!',
(255, 255, 255))
pygame.display.flip()
def main(self):
self.Button1 = Button()
self.display()
while True:
self.update_display()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == MOUSEBUTTONDOWN:
if self.Button1.pressed(pygame.mouse.get_pos()):
game = StartGame()
sys.exit()
class Button_Next:
def __init__(self):
self.main()
# Создаем экран
def display(self):
self.screen = pygame.display.set_mode((600, 530))
pygame.display.set_caption('Выбор пути')
# Фоновое изображение + варианты меню
def update_display(self):
image = pygame.image.load('Data/mainmenu.jpg')
self.screen.blit(image, (0,0))
self.screen.blit(pygame.image.load('Data/prediction.png'), (30,5))
BigFont = pygame.font.Font('Data/Allods.ttf', 53)
c = (255,255,255)
self.screen.blit(BigFont.render('ПРЕДИСЛОВИЕ', 0, (252, 124, 71)), (145, 37))
SmallFront = pygame.font.Font('Data/Allods.ttf', 28)
self.screen.blit(SmallFront.render('Тебе предстоит играть за влюбленного', 0, c), (25, 107))
self.screen.blit(SmallFront.render('рыцаря, которому отец принцессы', 0, c),(55, 132))
self.screen.blit(SmallFront.render('дал от ворот поворот.', 0, c), (155, 157))
self.screen.blit(SmallFront.render('Он запер её в каменном замке', 0, c), (65, 202))
self.screen.blit(SmallFront.render('и посадил на стражу дракона.', 0, c), (65, 227))
self.screen.blit(SmallFront.render('Лишь ты можешь помочь', 0, c), (100, 272))
self.screen.blit(SmallFront.render('романтичному рыцарю', 0, c), (120, 297))
self.screen.blit(SmallFront.render('разрушить все преграды на пути', 0, c), (50, 322))
self.screen.blit(SmallFront.render('настоящей любви к прекрасной даме', 0, c), (25, 347))
# Parameters: surface, color, x, y, length, height, width, text, text_color
self.Button1.create_button(self.screen, (107, 142, 255), 205, 420, 200, 50, 0, 'Продолжай!',
(255, 255, 255))
pygame.display.flip()
# Run the loop
def main(self):
self.Button1 = Button()
self.display()
while True:
self.update_display()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == MOUSEBUTTONDOWN:
if self.Button1.pressed(pygame.mouse.get_pos()):
next = Button_Game()
w = pygame.display.set_mode((10, 10))
pygame.mixer.music.load('Data/sound.mp3')
pygame.mixer.music.play()
obj = Button_Next()
while pygame.mixer.music.get_busy():
pass
|
[
"pygame.quit",
"pygame.font.Font",
"pygame.Surface",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"pygame.Rect",
"pygame.mixer.music.play",
"pygame.init",
"pygame.display.flip",
"pygame.mixer.music.get_busy",
"pygame.mixer.music.load",
"pygame.image.load",
"pygame.mouse.get_pos",
"pygame.display.set_caption",
"sys.exit"
] |
[((75, 88), 'pygame.init', 'pygame.init', ([], {}), '()\n', (86, 88), False, 'import pygame, os, sys\n'), ((7671, 7704), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(10, 10)'], {}), '((10, 10))\n', (7694, 7704), False, 'import pygame, os, sys\n'), ((7714, 7755), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""Data/sound.mp3"""'], {}), "('Data/sound.mp3')\n", (7737, 7755), False, 'import pygame, os, sys\n'), ((7765, 7790), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (7788, 7790), False, 'import pygame, os, sys\n'), ((7835, 7864), 'pygame.mixer.music.get_busy', 'pygame.mixer.music.get_busy', ([], {}), '()\n', (7862, 7864), False, 'import pygame, os, sys\n'), ((511, 544), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'length', 'height'], {}), '(x, y, length, height)\n', (522, 544), False, 'import pygame, os, sys\n'), ((743, 789), 'pygame.font.Font', 'pygame.font.Font', (['"""Data/Allods.ttf"""', 'font_size'], {}), "('Data/Allods.ttf', font_size)\n", (759, 789), False, 'import pygame, os, sys\n'), ((1597, 1656), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', 'color', '(x, y, length, height)', '(0)'], {}), '(surface, color, (x, y, length, height), 0)\n', (1613, 1656), False, 'import pygame, os, sys\n'), ((1674, 1743), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', '(190, 190, 190)', '(x, y, length, height)', '(1)'], {}), '(surface, (190, 190, 190), (x, y, length, height), 1)\n', (1690, 1743), False, 'import pygame, os, sys\n'), ((2572, 2607), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 530)'], {}), '((600, 530))\n', (2595, 2607), False, 'import pygame, os, sys\n'), ((2625, 2665), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Выбор пути"""'], {}), "('Выбор пути')\n", (2651, 2665), False, 'import pygame, os, sys\n'), ((2783, 2821), 'pygame.image.load', 'pygame.image.load', (['"""Data/mainmenu.jpg"""'], {}), "('Data/mainmenu.jpg')\n", (2800, 2821), False, 'import pygame, os, sys\n'), ((2975, 3014), 'pygame.font.Font', 'pygame.font.Font', (['"""Data/Allods.ttf"""', '(43)'], {}), "('Data/Allods.ttf', 43)\n", (2991, 3014), False, 'import pygame, os, sys\n'), ((3179, 3218), 'pygame.font.Font', 'pygame.font.Font', (['"""Data/Allods.ttf"""', '(28)'], {}), "('Data/Allods.ttf', 28)\n", (3195, 3218), False, 'import pygame, os, sys\n'), ((4432, 4453), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4451, 4453), False, 'import pygame, os, sys\n'), ((5203, 5238), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 530)'], {}), '((600, 530))\n', (5226, 5238), False, 'import pygame, os, sys\n'), ((5256, 5296), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Выбор пути"""'], {}), "('Выбор пути')\n", (5282, 5296), False, 'import pygame, os, sys\n'), ((5414, 5452), 'pygame.image.load', 'pygame.image.load', (['"""Data/mainmenu.jpg"""'], {}), "('Data/mainmenu.jpg')\n", (5431, 5452), False, 'import pygame, os, sys\n'), ((5612, 5651), 'pygame.font.Font', 'pygame.font.Font', (['"""Data/Allods.ttf"""', '(53)'], {}), "('Data/Allods.ttf', 53)\n", (5628, 5651), False, 'import pygame, os, sys\n'), ((5812, 5851), 'pygame.font.Font', 'pygame.font.Font', (['"""Data/Allods.ttf"""', '(28)'], {}), "('Data/Allods.ttf', 28)\n", (5828, 5851), False, 'import pygame, os, sys\n'), ((7087, 7108), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (7106, 7108), False, 'import pygame, os, sys\n'), ((1188, 1236), 'pygame.Surface', 'pygame.Surface', (['(length + i * 2, height + i * 2)'], {}), '((length + i * 2, height + i * 2))\n', (1202, 1236), False, 'import pygame, os, sys\n'), ((1453, 1526), 'pygame.draw.rect', 'pygame.draw.rect', (['s', 'color', '(x - i, y - i, length + i, height + i)', 'width'], {}), '(s, color, (x - i, y - i, length + i, height + i), width)\n', (1469, 1526), False, 'import pygame, os, sys\n'), ((2904, 2937), 'pygame.image.load', 'pygame.image.load', (['"""Data/row.png"""'], {}), "('Data/row.png')\n", (2921, 2937), False, 'import pygame, os, sys\n'), ((4664, 4682), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4680, 4682), False, 'import pygame, os, sys\n'), ((5535, 5575), 'pygame.image.load', 'pygame.image.load', (['"""Data/prediction.png"""'], {}), "('Data/prediction.png')\n", (5552, 5575), False, 'import pygame, os, sys\n'), ((7347, 7365), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7363, 7365), False, 'import pygame, os, sys\n'), ((4768, 4781), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4779, 4781), False, 'import pygame, os, sys\n'), ((7451, 7464), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7462, 7464), False, 'import pygame, os, sys\n'), ((4896, 4918), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4916, 4918), False, 'import pygame, os, sys\n'), ((5006, 5016), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5014, 5016), False, 'import pygame, os, sys\n'), ((7579, 7601), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7599, 7601), False, 'import pygame, os, sys\n')]
|
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Create Status and Fault model
Revision ID: accfe645090a
Revises:
Create Date: 2016-03-15 16:29:57.408348
"""
# revision identifiers, used by Alembic.
revision = 'faade1155a0a'
down_revision = '7<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql.expression import func
def upgrade():
op.create_table(
'aim_statuses',
sa.Column('id', sa.String(255), primary_key=True),
sa.Column('resource_type', sa.String(255), nullable=False),
sa.Column('resource_id', sa.Integer, nullable=False),
sa.Column('sync_status', sa.String(50), nullable=True),
sa.Column('sync_message', sa.TEXT, default=''),
sa.Column('health_score', sa.Integer),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('resource_type', 'resource_id',
name='uniq_aim_statuses_identity'),
sa.Index('idx_aim_statuses_identity', 'resource_type', 'resource_id'))
op.create_table(
'aim_faults',
sa.Column('status_id', sa.String(length=255), nullable=False),
sa.Column('fault_code', sa.String(25), nullable=False),
sa.Column('severity', sa.String(25), nullable=False),
sa.Column('description', sa.String(255), default=''),
sa.Column('cause', sa.String(255), default=''),
sa.Column('last_update_timestamp', sa.TIMESTAMP,
server_default=func.now(), onupdate=func.now()),
sa.Column('external_identifier', sa.String(255), primary_key=True),
sa.PrimaryKeyConstraint('external_identifier'),
sa.ForeignKeyConstraint(['status_id'],
['aim_statuses.id'],
ondelete='CASCADE'),
sa.Index('idx_aim_faults_status_id', 'status_id'))
def downgrade():
pass
|
[
"sqlalchemy.Index",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.sql.expression.func.now",
"sqlalchemy.UniqueConstraint",
"sqlalchemy.Column",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String"
] |
[((1173, 1225), 'sqlalchemy.Column', 'sa.Column', (['"""resource_id"""', 'sa.Integer'], {'nullable': '(False)'}), "('resource_id', sa.Integer, nullable=False)\n", (1182, 1225), True, 'import sqlalchemy as sa\n'), ((1299, 1345), 'sqlalchemy.Column', 'sa.Column', (['"""sync_message"""', 'sa.TEXT'], {'default': '""""""'}), "('sync_message', sa.TEXT, default='')\n", (1308, 1345), True, 'import sqlalchemy as sa\n'), ((1355, 1392), 'sqlalchemy.Column', 'sa.Column', (['"""health_score"""', 'sa.Integer'], {}), "('health_score', sa.Integer)\n", (1364, 1392), True, 'import sqlalchemy as sa\n'), ((1402, 1431), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1425, 1431), True, 'import sqlalchemy as sa\n'), ((1441, 1532), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""resource_type"""', '"""resource_id"""'], {'name': '"""uniq_aim_statuses_identity"""'}), "('resource_type', 'resource_id', name=\n 'uniq_aim_statuses_identity')\n", (1460, 1532), True, 'import sqlalchemy as sa\n'), ((1565, 1634), 'sqlalchemy.Index', 'sa.Index', (['"""idx_aim_statuses_identity"""', '"""resource_type"""', '"""resource_id"""'], {}), "('idx_aim_statuses_identity', 'resource_type', 'resource_id')\n", (1573, 1634), True, 'import sqlalchemy as sa\n'), ((2203, 2249), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""external_identifier"""'], {}), "('external_identifier')\n", (2226, 2249), True, 'import sqlalchemy as sa\n'), ((2259, 2338), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['status_id']", "['aim_statuses.id']"], {'ondelete': '"""CASCADE"""'}), "(['status_id'], ['aim_statuses.id'], ondelete='CASCADE')\n", (2282, 2338), True, 'import sqlalchemy as sa\n'), ((2412, 2461), 'sqlalchemy.Index', 'sa.Index', (['"""idx_aim_faults_status_id"""', '"""status_id"""'], {}), "('idx_aim_faults_status_id', 'status_id')\n", (2420, 2461), True, 'import sqlalchemy as sa\n'), ((1062, 1076), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (1071, 1076), True, 'import sqlalchemy as sa\n'), ((1132, 1146), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (1141, 1146), True, 'import sqlalchemy as sa\n'), ((1260, 1273), 'sqlalchemy.String', 'sa.String', (['(50)'], {}), '(50)\n', (1269, 1273), True, 'import sqlalchemy as sa\n'), ((1711, 1732), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (1720, 1732), True, 'import sqlalchemy as sa\n'), ((1783, 1796), 'sqlalchemy.String', 'sa.String', (['(25)'], {}), '(25)\n', (1792, 1796), True, 'import sqlalchemy as sa\n'), ((1845, 1858), 'sqlalchemy.String', 'sa.String', (['(25)'], {}), '(25)\n', (1854, 1858), True, 'import sqlalchemy as sa\n'), ((1910, 1924), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (1919, 1924), True, 'import sqlalchemy as sa\n'), ((1966, 1980), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (1975, 1980), True, 'import sqlalchemy as sa\n'), ((2160, 2174), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (2169, 2174), True, 'import sqlalchemy as sa\n'), ((2085, 2095), 'sqlalchemy.sql.expression.func.now', 'func.now', ([], {}), '()\n', (2093, 2095), False, 'from sqlalchemy.sql.expression import func\n'), ((2106, 2116), 'sqlalchemy.sql.expression.func.now', 'func.now', ([], {}), '()\n', (2114, 2116), False, 'from sqlalchemy.sql.expression import func\n')]
|
# -*- coding: utf-8 -*-
from openregistry.lots.core.utils import (
update_file_content_type,
json_view,
context_unpack,
APIResource,
)
from openregistry.lots.core.utils import (
save_lot, oplotsresource, apply_patch,
)
from openregistry.lots.core.validation import (
validate_decision_post,
validate_decision_patch_data,
validate_decision_update_in_not_allowed_status
)
from openregistry.lots.bargain.validation import (
validate_decision_by_decisionOf
)
post_validators = (
validate_decision_post,
validate_decision_update_in_not_allowed_status
)
patch_validators = (
validate_decision_patch_data,
validate_decision_update_in_not_allowed_status,
validate_decision_by_decisionOf,
)
@oplotsresource(name='bargain:Lot Decisions',
collection_path='/lots/{lot_id}/decisions',
path='/lots/{lot_id}/decisions/{decision_id}',
_internal_type='bargain',
description="Lot related decisions")
class LotDecisionResource(APIResource):
@json_view(permission='view_lot')
def collection_get(self):
"""Lot Decision List"""
collection_data = [i.serialize("view") for i in self.context.decisions]
return {'data': collection_data}
@json_view(content_type="application/json", permission='upload_lot_decisions', validators=post_validators)
def collection_post(self):
"""Lot Decision Upload"""
decision = self.request.validated['decision']
self.context.decisions.append(decision)
if save_lot(self.request):
self.LOGGER.info(
'Created lot decision {}'.format(decision.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'lot_decision_create'}, {'decision_id': decision.id})
)
self.request.response.status = 201
decision_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(
_route_name=decision_route,
decision_id=decision.id,
_query={}
)
return {'data': decision.serialize("view")}
@json_view(permission='view_lot')
def get(self):
"""Lot Decision Read"""
decision = self.request.validated['decision']
return {'data': decision.serialize("view")}
@json_view(content_type="application/json", permission='upload_lot_decisions', validators=patch_validators)
def patch(self):
"""Lot Decision Update"""
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info(
'Updated lot decision {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'lot_decision_patch'})
)
return {'data': self.request.context.serialize("view")}
|
[
"openregistry.lots.core.utils.save_lot",
"openregistry.lots.core.utils.oplotsresource",
"openregistry.lots.core.utils.json_view",
"openregistry.lots.core.utils.update_file_content_type",
"openregistry.lots.core.utils.context_unpack"
] |
[((742, 954), 'openregistry.lots.core.utils.oplotsresource', 'oplotsresource', ([], {'name': '"""bargain:Lot Decisions"""', 'collection_path': '"""/lots/{lot_id}/decisions"""', 'path': '"""/lots/{lot_id}/decisions/{decision_id}"""', '_internal_type': '"""bargain"""', 'description': '"""Lot related decisions"""'}), "(name='bargain:Lot Decisions', collection_path=\n '/lots/{lot_id}/decisions', path=\n '/lots/{lot_id}/decisions/{decision_id}', _internal_type='bargain',\n description='Lot related decisions')\n", (756, 954), False, 'from openregistry.lots.core.utils import save_lot, oplotsresource, apply_patch\n'), ((1051, 1083), 'openregistry.lots.core.utils.json_view', 'json_view', ([], {'permission': '"""view_lot"""'}), "(permission='view_lot')\n", (1060, 1083), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((1273, 1383), 'openregistry.lots.core.utils.json_view', 'json_view', ([], {'content_type': '"""application/json"""', 'permission': '"""upload_lot_decisions"""', 'validators': 'post_validators'}), "(content_type='application/json', permission=\n 'upload_lot_decisions', validators=post_validators)\n", (1282, 1383), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((2218, 2250), 'openregistry.lots.core.utils.json_view', 'json_view', ([], {'permission': '"""view_lot"""'}), "(permission='view_lot')\n", (2227, 2250), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((2414, 2525), 'openregistry.lots.core.utils.json_view', 'json_view', ([], {'content_type': '"""application/json"""', 'permission': '"""upload_lot_decisions"""', 'validators': 'patch_validators'}), "(content_type='application/json', permission=\n 'upload_lot_decisions', validators=patch_validators)\n", (2423, 2525), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((1557, 1579), 'openregistry.lots.core.utils.save_lot', 'save_lot', (['self.request'], {}), '(self.request)\n', (1565, 1579), False, 'from openregistry.lots.core.utils import save_lot, oplotsresource, apply_patch\n'), ((2664, 2702), 'openregistry.lots.core.utils.update_file_content_type', 'update_file_content_type', (['self.request'], {}), '(self.request)\n', (2688, 2702), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((1696, 1798), 'openregistry.lots.core.utils.context_unpack', 'context_unpack', (['self.request', "{'MESSAGE_ID': 'lot_decision_create'}", "{'decision_id': decision.id}"], {}), "(self.request, {'MESSAGE_ID': 'lot_decision_create'}, {\n 'decision_id': decision.id})\n", (1710, 1798), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n'), ((2830, 2896), 'openregistry.lots.core.utils.context_unpack', 'context_unpack', (['self.request', "{'MESSAGE_ID': 'lot_decision_patch'}"], {}), "(self.request, {'MESSAGE_ID': 'lot_decision_patch'})\n", (2844, 2896), False, 'from openregistry.lots.core.utils import update_file_content_type, json_view, context_unpack, APIResource\n')]
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
import argparse
import pathlib
from direct.cli.utils import file_or_url
from direct.environment import Args
from direct.train import train_from_argparse
def register_parser(parser: argparse._SubParsersAction):
"""Register wsi commands to a root parser."""
epilog = """
Examples:
---------
Run on single machine:
$ direct train experiment_dir --num-gpus 8 --cfg cfg.yaml [--training-root training_set --validation-root validation_set]
Run on multiple machines:
(machine0)$ direct train experiment_dir --machine-rank 0 --num-machines 2 --dist-url <URL> [--training-root training_set --validation-root validation_set] [--other-flags]
(machine1)$ direct train experiment_dir --machine-rank 1 --num-machines 2 --dist-url <URL> [--training-root training_set --validation-root validation_set] [--other-flags]
"""
common_parser = Args(add_help=False)
train_parser = parser.add_parser(
"train",
help="Train models using direct.",
parents=[common_parser],
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
train_parser.add_argument(
"experiment_dir",
type=pathlib.Path,
help="Path to the experiment directory.",
)
train_parser.add_argument("--training-root", type=pathlib.Path, help="Path to the training data.", required=False)
train_parser.add_argument(
"--validation-root", type=pathlib.Path, help="Path to the validation data.", required=False
)
train_parser.add_argument(
"--cfg",
dest="cfg_file",
help="Config file for training. Can be either a local file or a remote URL.",
required=True,
type=file_or_url,
)
train_parser.add_argument(
"--initialization-checkpoint",
type=file_or_url,
help="If this value is set to a proper checkpoint when training starts, "
"the model will be initialized with the weights given. "
"No other keys in the checkpoint will be loaded. "
"When another checkpoint would be available and the --resume flag is used, "
"this flag is ignored. This can be a path to a file or an URL. "
"If a URL is given the checkpoint will first be downloaded to the environmental variable "
"`DIRECT_MODEL_DOWNLOAD_DIR` (default=current directory). Be aware that if `model_checkpoint` is "
"set in the configuration that this flag will overwrite the configuration value, also in the dumped config.",
)
train_parser.add_argument("--resume", help="Resume training if possible.", action="store_true")
train_parser.add_argument(
"--force-validation",
help="Start with a validation round, when recovering from a crash. "
"If you use this option, be aware that when combined with --resume, "
"each new run will start with a validation round.",
action="store_true",
)
train_parser.add_argument("--name", help="Run name.", required=False, type=str)
train_parser.set_defaults(subcommand=train_from_argparse)
|
[
"direct.environment.Args"
] |
[((965, 985), 'direct.environment.Args', 'Args', ([], {'add_help': '(False)'}), '(add_help=False)\n', (969, 985), False, 'from direct.environment import Args\n')]
|
#!/bin/python
# -*- coding: utf-8 -*-
import time
import numpy as np
import scipy.linalg as sl
import pandas as pd
from econsieve import KalmanFilter, TEnKF
from grgrlib.core import timeprint
from grgrlib.multiprocessing import serializer
from econsieve.stats import logpdf
def create_obs_cov(self, scale_obs=0.1):
self.Z = np.array(self.data)
sig_obs = np.var(self.Z, axis=0)*scale_obs**2
obs_cov = np.diagflat(sig_obs)
return obs_cov
def get_p_init_lyapunov(self, Q):
pmat = self.precalc_mat[0]
qmat = self.precalc_mat[1]
F = np.vstack((pmat[1, 0][:, :-self.neps],
qmat[1, 0][:-self.neps, :-self.neps]))
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
Q = E @ Q @ E.T
p4 = sl.solve_discrete_lyapunov(F[self.dimp:,:], Q[self.dimp:,self.dimp:])
return F @ p4 @ F.T + Q
def get_eps_lin(self, x, xp, rcond=1e-14):
"""Get filter-implied (smoothed) shocks for linear model
"""
qmat = self.precalc_mat[1]
if self.filter.name == 'KalmanFilter':
pmat = self.precalc_mat[0]
F = self.filter.F
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
else:
F = qmat[1, 0][:, :-self.neps]
E = qmat[1, 0][:, -self.neps:]
return np.linalg.pinv(E, rcond) @ (x - F@xp)
def create_filter(self, R=None, N=None, ftype=None, seed=None, incl_obs=False, reduced_form=False, **fargs):
self.Z = np.array(self.data)
if ftype == 'KalmanFilter':
ftype = 'KF'
elif ftype == 'ParticleFilter':
ftype = 'PF'
elif ftype == 'AuxiliaryParticleFilter':
ftype = 'APF'
if ftype == 'KF':
f = KalmanFilter(dim_x=self.dimx, dim_z=self.nobs)
elif ftype in ('PF', 'APF'):
print(
'Warning: Particle filter is experimental and currently not under development.')
from .pfilter import ParticleFilter
if N is None:
N = 10000
aux_bs = ftype == 'APF'
f = ParticleFilter(N=N, dim_x=self.dimx,
dim_z=self.nobs, auxiliary_bootstrap=aux_bs)
else:
ftype = 'TEnKF'
if N is None:
N = int((self.dimq-self.dimeps)**2/2) + 1
dimx = self.dimq-self.dimeps if reduced_form else self.dimx
f = TEnKF(N=N, dim_x=dimx, dim_z=self.nobs, seed=seed, **fargs)
f.reduced_form = reduced_form
if R is not None:
f.R = R
# use lyapunov equation as default. Otherwise to be defined manually via `*.filter.p`
f.init_cov = None
try:
f.Q = self.QQ(self.ppar) @ self.QQ(self.ppar)
except AttributeError:
f.Q = self.fdict['QQ'] @ self.fdict['QQ']
self.filter = f
return f
def get_ll(self, **args):
return run_filter(self, smoother=False, get_ll=True, **args)
def run_filter(self, smoother=True, get_ll=False, init_cov=None, dispatch=None, rcond=1e-14, seed=None, verbose=False):
if verbose:
st = time.time()
self.Z = np.array(self.data)
dimp = self.dimp
if init_cov is not None:
self.filter.init_cov = init_cov
# assign current transition & observation functions (of parameters)
if self.filter.name == 'KalmanFilter':
pmat = self.precalc_mat[0]
qmat = self.precalc_mat[1]
F = np.vstack((pmat[1, 0][:, :-self.neps],
qmat[1, 0][:-self.neps, :-self.neps]))
F = np.pad(F, ((0, 0), (dimp, 0)))
self.filter.F = F
self.filter.H = np.hstack((self.hx[0], self.hx[1])), self.hx[2]
if self.filter.Q.shape[0] == self.neps:
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
self.filter.Q = E @ self.filter.Q @ E.T
if self.filter.init_cov is None:
p4 = sl.solve_discrete_lyapunov(F[dimp:,dimp:], self.filter.Q[dimp:,dimp:])
self.filter.P = F[:,dimp:] @ p4 @ F.T[dimp:] + self.filter.Q
else:
self.filter.P = self.filter.init_cov
elif dispatch or self.filter.name == 'ParticleFilter':
from .engine import func_dispatch
t_func_jit, o_func_jit, get_eps_jit = func_dispatch(self, full=True)
self.filter.t_func = t_func_jit
self.filter.o_func = o_func_jit
self.filter.get_eps = get_eps_jit
elif self.filter.reduced_form:
self.filter.t_func = lambda *x: self.t_func(*x, get_obs=True)
self.filter.o_func = None
if self.filter.init_cov is None:
qmat = self.precalc_mat[1]
F = qmat[1, 0][:-self.neps, :-self.neps]
E = qmat[1, 0][:-self.neps, -self.neps:]
Q = E @ self.filter.Q @ E.T
self.filter.P = sl.solve_discrete_lyapunov(F, Q)
else:
self.filter.P = self.filter.init_cov
else:
self.filter.t_func = self.t_func
self.filter.o_func = self.o_func
if self.filter.init_cov is None:
self.filter.P = get_p_init_lyapunov(self, self.filter.Q)
else:
self.filter.P = self.filter.init_cov
self.filter.get_eps = self.get_eps_lin
if self.filter.name == 'KalmanFilter':
means, covs, ll = self.filter.batch_filter(self.Z)
if smoother:
means, covs, _, _ = self.filter.rts_smoother(
means, covs, inv=lambda x: np.linalg.pinv(x, rcond=rcond))
if get_ll:
res = ll
else:
means = means
res = (means, covs)
elif self.filter.name == 'ParticleFilter':
res = self.filter.batch_filter(self.Z)
if smoother:
if verbose > 0:
print('[run_filter:]'.ljust(
15, ' ')+' Filtering done after %s seconds, starting smoothing...' % np.round(time.time()-st, 3))
if isinstance(smoother, bool):
smoother = 10
res = self.filter.smoother(smoother)
else:
res = self.filter.batch_filter(
self.Z, calc_ll=get_ll, store=smoother, seed=seed, verbose=verbose > 0)
if smoother:
res = self.filter.rts_smoother(res, rcond=rcond)
if get_ll:
if np.isnan(res):
res = -np.inf
self.ll = res
else:
self.X = res
if verbose > 0:
mess = '[run_filter:]'.ljust(
15, ' ')+' Filtering done in %s.' % timeprint(time.time()-st, 3)
if get_ll:
mess += 'Likelihood is %s.' % res
print(mess)
return res
def extract(self, sample=None, nsamples=1, init_cov=None, precalc=True, seed=0, nattemps=4, accept_failure=False, verbose=True, debug=False, l_max=None, k_max=None, **npasargs):
"""Extract the timeseries of (smoothed) shocks.
Parameters
----------
sample : array, optional
Provide one or several parameter vectors used for which the smoothed shocks are calculated (default is the current `self.par`)
nsamples : int, optional
Number of `npas`-draws for each element in `sample`. Defaults to 1
nattemps : int, optional
Number of attemps per sample to crunch the sample with a different seed. Defaults to 4
Returns
-------
tuple
The result(s)
"""
import tqdm
import os
from grgrlib.core import map2arr
if sample is None:
if type(self).__name__ == "DSGE_DUMMY":
sample = None
else:
sample = self.par
if np.ndim(sample) <= 1:
sample = [sample]
np.random.seed(seed)
fname = self.filter.name
verbose = max(verbose, debug)
if hasattr(self, 'pool'):
from .estimation import create_pool
create_pool(self)
if fname == 'ParticleFilter':
raise NotImplementedError
elif fname == 'KalmanFilter':
if nsamples > 1:
print('[extract:]'.ljust(
15, ' ')+' Setting `nsamples` to 1 as the linear filter does not rely on sampling.')
nsamples = 1
debug = not hasattr(self, 'debug') or self.debug
self.debug = True
else:
if self.filter.reduced_form:
self.create_filter(
R=self.filter.R, N=self.filter.N, reduced_form=False)
print('[extract:]'.ljust(
15, ' ')+' Extraction requires filter in non-reduced form. Recreating filter instance.')
npas = serializer(self.filter.npas)
self.debug |= debug
if sample[0] is not None:
set_par = serializer(self.set_par)
run_filter = serializer(self.run_filter)
t_func = serializer(self.t_func)
edim = len(self.shocks)
xdim = len(self.vv)
odim = len(self.observables)
obs_func = serializer(self.obs)
filter_get_eps = serializer(self.get_eps_lin)
dimeps = self.dimeps
dimp = self.dimp
seeds = np.random.randint(2**31, size=nsamples) # win explodes with 2**32
sample = [(x, y) for x in sample for y in seeds]
def runner(arg):
par, seed_loc = arg
if par is not None:
set_par(par, l_max=l_max, k_max=k_max)
res = run_filter(verbose=verbose > 2, seed=seed_loc, init_cov=init_cov)
if fname == 'KalmanFilter':
means, covs = res
res = means.copy()
resid = np.empty((means.shape[0]-1, dimeps))
for t, x in enumerate(means[1:]):
resid[t] = filter_get_eps(x, res[t])
res[t+1] = t_func(res[t], resid[t], linear=True)[0]
return par, res[0], resid, 0
np.random.shuffle(res)
sample = np.dstack((obs_func(res), res[..., dimp:]))
inits = res[:, 0, :]
def t_func_loc(states, eps):
(q, pobs), flag = t_func(states, eps, get_obs=True)
return np.hstack((pobs, q)), flag
for natt in range(nattemps):
try:
init, resid, flags = npas(func=t_func_loc, X=sample, init_states=inits, verbose=max(
len(sample) == 1, verbose-1), seed=seed_loc, nsamples=1, **npasargs)
return par, init, resid[0], flags
except Exception as e:
raised_error = e
if accept_failure:
print('[extract:]'.ljust(
15, ' ') + "got an error: '%s' (after %s unsuccessful attemps)." % (raised_error, natt+1))
return None
else:
import sys
raise type(raised_error)(str(raised_error) + ' (after %s unsuccessful attemps).' %
(natt+1)).with_traceback(sys.exc_info()[2])
wrap = tqdm.tqdm if (verbose and len(sample) >
1) else (lambda x, **kwarg: x)
res = wrap(self.mapper(runner, sample), unit=' sample(s)',
total=len(sample), dynamic_ncols=True)
pars, init, resid, flags = map2arr(res)
if hasattr(self, 'pool') and self.pool:
self.pool.close()
if fname == 'KalmanFilter':
self.debug = debug
if resid.shape[0] == 1:
resid[0] = pd.DataFrame(
resid[0], index=self.data.index[:-1], columns=self.shocks)
edict = {'pars': pars,
'init': init,
'resid': resid,
'flags': flags}
return edict
|
[
"econsieve.KalmanFilter",
"numpy.random.seed",
"numpy.diagflat",
"numpy.empty",
"numpy.isnan",
"numpy.random.randint",
"sys.exc_info",
"numpy.linalg.pinv",
"econsieve.TEnKF",
"numpy.pad",
"pandas.DataFrame",
"numpy.ndim",
"grgrlib.multiprocessing.serializer",
"numpy.var",
"numpy.random.shuffle",
"numpy.hstack",
"scipy.linalg.solve_discrete_lyapunov",
"numpy.vstack",
"time.time",
"numpy.array",
"grgrlib.core.map2arr"
] |
[((332, 351), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (340, 351), True, 'import numpy as np\n'), ((416, 436), 'numpy.diagflat', 'np.diagflat', (['sig_obs'], {}), '(sig_obs)\n', (427, 436), True, 'import numpy as np\n'), ((565, 642), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps])'], {}), '((pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps]))\n', (574, 642), True, 'import numpy as np\n'), ((671, 748), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (680, 748), True, 'import numpy as np\n'), ((797, 868), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F[self.dimp:, :]', 'Q[self.dimp:, self.dimp:]'], {}), '(F[self.dimp:, :], Q[self.dimp:, self.dimp:])\n', (823, 868), True, 'import scipy.linalg as sl\n'), ((1524, 1543), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (1532, 1543), True, 'import numpy as np\n'), ((3080, 3099), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (3088, 3099), True, 'import numpy as np\n'), ((7601, 7621), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7615, 7621), True, 'import numpy as np\n'), ((8615, 8642), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.run_filter'], {}), '(self.run_filter)\n', (8625, 8642), False, 'from grgrlib.multiprocessing import serializer\n'), ((8656, 8679), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.t_func'], {}), '(self.t_func)\n', (8666, 8679), False, 'from grgrlib.multiprocessing import serializer\n'), ((8781, 8801), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.obs'], {}), '(self.obs)\n', (8791, 8801), False, 'from grgrlib.multiprocessing import serializer\n'), ((8823, 8851), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.get_eps_lin'], {}), '(self.get_eps_lin)\n', (8833, 8851), False, 'from grgrlib.multiprocessing import serializer\n'), ((8912, 8953), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31)'], {'size': 'nsamples'}), '(2 ** 31, size=nsamples)\n', (8929, 8953), True, 'import numpy as np\n'), ((10912, 10924), 'grgrlib.core.map2arr', 'map2arr', (['res'], {}), '(res)\n', (10919, 10924), False, 'from grgrlib.core import map2arr\n'), ((366, 388), 'numpy.var', 'np.var', (['self.Z'], {'axis': '(0)'}), '(self.Z, axis=0)\n', (372, 388), True, 'import numpy as np\n'), ((1159, 1236), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (1168, 1236), True, 'import numpy as np\n'), ((1361, 1385), 'numpy.linalg.pinv', 'np.linalg.pinv', (['E', 'rcond'], {}), '(E, rcond)\n', (1375, 1385), True, 'import numpy as np\n'), ((1758, 1804), 'econsieve.KalmanFilter', 'KalmanFilter', ([], {'dim_x': 'self.dimx', 'dim_z': 'self.nobs'}), '(dim_x=self.dimx, dim_z=self.nobs)\n', (1770, 1804), False, 'from econsieve import KalmanFilter, TEnKF\n'), ((3054, 3065), 'time.time', 'time.time', ([], {}), '()\n', (3063, 3065), False, 'import time\n'), ((3392, 3469), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps])'], {}), '((pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps]))\n', (3401, 3469), True, 'import numpy as np\n'), ((3505, 3535), 'numpy.pad', 'np.pad', (['F', '((0, 0), (dimp, 0))'], {}), '(F, ((0, 0), (dimp, 0)))\n', (3511, 3535), True, 'import numpy as np\n'), ((6279, 6292), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (6287, 6292), True, 'import numpy as np\n'), ((7548, 7563), 'numpy.ndim', 'np.ndim', (['sample'], {}), '(sample)\n', (7555, 7563), True, 'import numpy as np\n'), ((8572, 8596), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.set_par'], {}), '(self.set_par)\n', (8582, 8596), False, 'from grgrlib.multiprocessing import serializer\n'), ((9618, 9640), 'numpy.random.shuffle', 'np.random.shuffle', (['res'], {}), '(res)\n', (9635, 9640), True, 'import numpy as np\n'), ((11104, 11175), 'pandas.DataFrame', 'pd.DataFrame', (['resid[0]'], {'index': 'self.data.index[:-1]', 'columns': 'self.shocks'}), '(resid[0], index=self.data.index[:-1], columns=self.shocks)\n', (11116, 11175), True, 'import pandas as pd\n'), ((2384, 2443), 'econsieve.TEnKF', 'TEnKF', ([], {'N': 'N', 'dim_x': 'dimx', 'dim_z': 'self.nobs', 'seed': 'seed'}), '(N=N, dim_x=dimx, dim_z=self.nobs, seed=seed, **fargs)\n', (2389, 2443), False, 'from econsieve import KalmanFilter, TEnKF\n'), ((3587, 3622), 'numpy.hstack', 'np.hstack', (['(self.hx[0], self.hx[1])'], {}), '((self.hx[0], self.hx[1]))\n', (3596, 3622), True, 'import numpy as np\n'), ((3700, 3777), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (3709, 3777), True, 'import numpy as np\n'), ((3915, 3987), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F[dimp:, dimp:]', 'self.filter.Q[dimp:, dimp:]'], {}), '(F[dimp:, dimp:], self.filter.Q[dimp:, dimp:])\n', (3941, 3987), True, 'import scipy.linalg as sl\n'), ((8469, 8497), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.filter.npas'], {}), '(self.filter.npas)\n', (8479, 8497), False, 'from grgrlib.multiprocessing import serializer\n'), ((9362, 9400), 'numpy.empty', 'np.empty', (['(means.shape[0] - 1, dimeps)'], {}), '((means.shape[0] - 1, dimeps))\n', (9370, 9400), True, 'import numpy as np\n'), ((9854, 9874), 'numpy.hstack', 'np.hstack', (['(pobs, q)'], {}), '((pobs, q))\n', (9863, 9874), True, 'import numpy as np\n'), ((4821, 4853), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F', 'Q'], {}), '(F, Q)\n', (4847, 4853), True, 'import scipy.linalg as sl\n'), ((10637, 10651), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (10649, 10651), False, 'import sys\n'), ((5456, 5486), 'numpy.linalg.pinv', 'np.linalg.pinv', (['x'], {'rcond': 'rcond'}), '(x, rcond=rcond)\n', (5470, 5486), True, 'import numpy as np\n'), ((6490, 6501), 'time.time', 'time.time', ([], {}), '()\n', (6499, 6501), False, 'import time\n'), ((5891, 5902), 'time.time', 'time.time', ([], {}), '()\n', (5900, 5902), False, 'import time\n')]
|
#!/usr/bin/env python
import pyttsx3
import rospy
from std_msgs.msg import String
import time
from Speech2Text import Speech2Text
class Speaker(object):
def __init__(self, speed=130, volume=1.0, voice_id=10):
self.engine = pyttsx3.init()
self.set_speed(speed)
self.set_volume(volume)
self.set_voice(voice_id)
self.publisher = rospy.Publisher(
"/home_edu/facial",
String,
queue_size=1,
latch=True
)
def set_speed(self, speed):
self.engine.setProperty("rate", speed)
def set_volume(self, volume):
self.engine.setProperty("volume", volume)
def set_voice(self, voice_id):
voices = self.engine.getProperty("voices")
# print(len(voices))
if voice_id >= 0 and voice_id < len(voices):
voice = voices[voice_id]
self.engine.setProperty("voice", voice.id)
else:
print("No voice id: ", voice_id)
def say(self, msg, f1="happy-2", f2="happy", keep_message=False):
print(msg)
cmd = f1 + ":" + msg
self.publisher.publish(cmd)
self.engine.say(msg)
self.engine.runAndWait()
cmd = f2 + ":"
if keep_message:
cmd = cmd + msg
self.publisher.publish(cmd)
if __name__ == "__main__":
rospy.init_node("home_edu_speaker", anonymous=True)
P = Speaker(140, 1.0, 16)
s = Speech2Text()
s.ambient_noise()
P.say("Hello, I am ready.")
while True:
# s.ambient_noise()
print("ready")
t = s.listen()
print(t)
if t == "goodbye":
break
if t.find("introduce") >= 0:
P.say("Hi, nice to meet you.")
time.sleep(1)
P.say("I am PCMS home service robot.", "happy-1")
time.sleep(1)
P.say("Service robots assist people by doing household chores.", "smart")
time.sleep(1)
P.say("And I am your home assistant.")
if t.find("thank") >= 0:
P.say("this is my pleasure", "happy-2")
print("bye-bye")
P.say("bye-bye")
# while not rospy.is_shutdown():
# P.say("Hi, nice to meet you.")
# time.sleep(1)
# P.say("I am PCMS home service robot.", "happy-1")
# time.sleep(1)
# P.say("Service robots assist human beings, typically by performing a job that is dirty, dull, distant, dangerous or repetitive, including household chores.", "smart")
# time.sleep(1)
# P.say("And I am your home assistant.")
# time.sleep(10)
|
[
"pyttsx3.init",
"Speech2Text.Speech2Text",
"rospy.Publisher",
"time.sleep",
"rospy.init_node"
] |
[((1395, 1446), 'rospy.init_node', 'rospy.init_node', (['"""home_edu_speaker"""'], {'anonymous': '(True)'}), "('home_edu_speaker', anonymous=True)\n", (1410, 1446), False, 'import rospy\n'), ((1490, 1503), 'Speech2Text.Speech2Text', 'Speech2Text', ([], {}), '()\n', (1501, 1503), False, 'from Speech2Text import Speech2Text\n'), ((242, 256), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (254, 256), False, 'import pyttsx3\n'), ((386, 455), 'rospy.Publisher', 'rospy.Publisher', (['"""/home_edu/facial"""', 'String'], {'queue_size': '(1)', 'latch': '(True)'}), "('/home_edu/facial', String, queue_size=1, latch=True)\n", (401, 455), False, 'import rospy\n'), ((1802, 1815), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1812, 1815), False, 'import time\n'), ((1890, 1903), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1900, 1903), False, 'import time\n'), ((2002, 2015), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2012, 2015), False, 'import time\n')]
|
import pandas as pd
data = pd.read_csv('data/T_UWWTPS.csv')
df_toclean = pd.DataFrame(data = {'Member State': data['rptMStateKey'], 'Latitude': data['uwwLatitude'],
'Longitude': data['uwwLongitude'], 'LoadEntering': data['uwwLoadEnteringUWWTP'], 'Capacity':
data['uwwCapacity'], 'NRemoval': data['uwwNRemoval'], 'PRemoval': data['uwwPRemoval']})
df_no_missing = df_toclean.dropna()
|
[
"pandas.read_csv",
"pandas.DataFrame"
] |
[((27, 59), 'pandas.read_csv', 'pd.read_csv', (['"""data/T_UWWTPS.csv"""'], {}), "('data/T_UWWTPS.csv')\n", (38, 59), True, 'import pandas as pd\n'), ((73, 357), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Member State': data['rptMStateKey'], 'Latitude': data['uwwLatitude'],\n 'Longitude': data['uwwLongitude'], 'LoadEntering': data[\n 'uwwLoadEnteringUWWTP'], 'Capacity': data['uwwCapacity'], 'NRemoval':\n data['uwwNRemoval'], 'PRemoval': data['uwwPRemoval']}"}), "(data={'Member State': data['rptMStateKey'], 'Latitude': data[\n 'uwwLatitude'], 'Longitude': data['uwwLongitude'], 'LoadEntering': data\n ['uwwLoadEnteringUWWTP'], 'Capacity': data['uwwCapacity'], 'NRemoval':\n data['uwwNRemoval'], 'PRemoval': data['uwwPRemoval']})\n", (85, 357), True, 'import pandas as pd\n')]
|
import re
from tracardi_dot_notation.dot_accessor import DotAccessor
from tracardi_dot_notation.utils.singleton import Singleton
class DotTemplate(metaclass=Singleton):
def __init__(self):
self._regex = re.compile(r"\{{2}\s*((?:payload|profile|event|session|flow|memory)"
r"@[\[\]0-9a-zA-a_\-\.]+(?<![\.\[]))\s*\}{2}")
def render(self, template, dot: DotAccessor):
return re.sub(self._regex, lambda x: str(dot[x.group(1)]), template)
|
[
"re.compile"
] |
[((219, 349), 're.compile', 're.compile', (['"""\\\\{{2}\\\\s*((?:payload|profile|event|session|flow|memory)@[\\\\[\\\\]0-9a-zA-a_\\\\-\\\\.]+(?<![\\\\.\\\\[]))\\\\s*\\\\}{2}"""'], {}), "(\n '\\\\{{2}\\\\s*((?:payload|profile|event|session|flow|memory)@[\\\\[\\\\]0-9a-zA-a_\\\\-\\\\.]+(?<![\\\\.\\\\[]))\\\\s*\\\\}{2}'\n )\n", (229, 349), False, 'import re\n')]
|
import pprint
from .constants import HostPrograms, Ports, ServerCommands
import requests
try:
import websockets
import asyncio
except:
print("failed to import websockets/asyncio")
class Client(object):
"""
Base client from which all other Clients will inherit
"""
def __init__(self):
super(Client, self).__init__()
self.host_address = "127.0.0.1"
self.port = 65500
self.__echo_execution = True
self.__echo_payload = True
def set_echo_execution(self, value):
"""
If set to true, the client will print out the response coming from the server
:param value: *bool*
:return: None
"""
self.__echo_execution = value
def echo_execution(self):
"""
Return True or False whether or not responses from the server are printed in the client
:return: *bool*
"""
return self.__echo_execution
def set_echo_payload(self, value):
"""
If set to true, the client will print the JSON payload it's sending to the server
:param value: *bool*
:return:
"""
self.__echo_payload = value
def echo_payload(self):
"""
Return True or False whether or not the JSON payloads sent to the server are printed in the client
:return:
"""
return self.__echo_payload
def is_host_online(self):
"""
Convenience function to call on the client. "is_online" comes from the core module
:return: *bool*
"""
response = self.execute("is_online", {})
return response.get("Success")
def execute(self, command, parameters={}):
"""
Executes a given command for this client. The server will look for this command in the modules it has loaded
:param command: *string* or *function* The command name or the actual function that you can import from the
modules module
:param parameters: *dict* of the parameters (arguments) for the the command. These have to match the argument
names on the function in the module exactly
:return: *dict* of the response coming from the server
From a Skyhook server it looks like:
{
'ReturnValue': ['Camera', 'Cube', 'Cube.001', 'Light'],
'Success': True,
'Time': '09:43:18'
}
From Unreal it looks like this:
{
'ReturnValue': ['/Game/Developers/cooldeveloper/Maps/ScatterDemoLevel/ScatterDemoLevelMaster.ScatterDemoLevelMaster',
'/Game/Apple/Core/UI/Widgets/WBP_CriticalHealthLevelVignette.WBP_CriticalHealthLevelVignette',
'/Game/Apple/Lighting/LUTs/RGBTable16x1_Level_01.RGBTable16x1_Level_01']
}
"""
if callable(command):
command = command.__name__
url = "http://%s:%s" % (self.host_address, self.port)
payload = self.__create_payload(command, parameters)
response = requests.post(url, json=payload).json()
if self.echo_payload():
pprint.pprint(payload)
if self.echo_execution():
pprint.pprint(response)
return response
def __create_payload(self, command, parameters):
"""
Constructs the dictionary for the JSON payload that will be sent to the server
:param command: *string* name of the command
:param parameters: *dictionary*
:return: *dictionary*
"""
payload = {
"FunctionName": command,
"Parameters": parameters
}
return payload
class BlenderClient(Client):
"""
Custom client for Blender
"""
def __init__(self):
super(BlenderClient, self).__init__()
self.host_program = HostPrograms.blender
self.port = Ports.blender
class MayaClient(Client):
"""
Custom client for Maya
"""
def __init__(self):
super(MayaClient, self).__init__()
self.host_program = HostPrograms.maya
self.port = Ports.maya
class HoudiniClient(Client):
"""
Custom client for Houdini
"""
def __init__(self):
super(HoudiniClient, self).__init__()
self.host_program = HostPrograms.houdini
self.port = Ports.houdini
class UnrealClient(Client):
"""
Custom client for Unreal. This overwrites most of the basic functionality because we can't run a Skyhook server
in Unreal, but have to rely on Web Remote Control.
There is a file called skyhook in /Game/Python/ that holds the SkyHook classes to be used with this client.
This file has to be loaded by running "import skyhook" in Unreal's Python editor, or imported when the project
is loaded.
"""
def __init__(self):
super(UnrealClient, self).__init__()
self.host_program = HostPrograms.unreal
self.__command_object_path = "/Engine/PythonTypes.Default__SkyHookCommands"
self.__server_command_object_path = "/Engine/PythonTypes.Default__SkyHookServerCommands"
self.__headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
self.port = Ports.unreal
def execute(self, command, parameters={}, function=True, property=False):
"""
Will execute the command so Web Remote Control understands it.
:param command: *string* command name
:param parameters: *dict* of the parameters (arguments) for the the command. These have to match the argument
names on the function in the module exactly
:param function: *bool* ignore, not used
:param property: *bool* ignore, not used
:return: *dict* of the response coming from Web Remote Control
"""
url = "http://%s:%s/remote/object/call" % (self.host_address, self.port)
if command in dir(ServerCommands):
payload = self.__create_payload(command, parameters, self.__server_command_object_path)
used_object_path = self.__server_command_object_path
else:
payload = self.__create_payload(command, parameters, self.__command_object_path)
used_object_path = self.__command_object_path
requests.put(url, json=payload, headers=self.__headers).json()
response = self.__get_response(used_object_path)
if self.echo_payload():
pprint.pprint(payload)
if self.echo_execution():
pprint.pprint(response)
return response
def set_command_object_path(self, path="/Engine/PythonTypes.Default__PythonClassName"):
"""
Set the object path for commands
:param path: *string* path. For Python functions, this has to be something like /Engine/PythonTypes.Default__<PythonClassName>
You need to add the leading 'Default__', this is what Unreal Engine expects
:return: None
"""
self.__command_object_path = path
def command_object_path(self):
"""
Gets the command object path
:return: *string* Object path
"""
return self.__command_object_path
def __get_response(self, object_path):
url = "http://%s:%s/remote/object/call" % (self.host_address, self.port)
payload = self.__create_payload("get_reply", {}, object_path)
response = requests.put(url, json=payload, headers=self.__headers).json()
try:
return_value = eval(response.get("ReturnValue"))
response = {'ReturnValue': return_value}
except:
pass
return response
def __create_payload(self, command, parameters, object_path, echo_payload=True):
payload = {
"ObjectPath": object_path,
"FunctionName": command,
"Parameters": parameters,
"GenerateTransaction": True
}
return payload
# class WebsocketClient(Client):
# def __init__(self):
# super(WebsocketClient, self).__init__()
# self.uri = "ws://%s:%s" % (self.host_address, self.port)
# print(self.uri)
#
# def execute(self, command, parameters={}):
# payload = self.__create_payload(command, parameters)
# print(payload)
#
# async def send():
# async with websockets.connect(self.uri) as websocket:
# await websocket.send(payload)
# response = await websocket.recv()
# return response
#
# response = asyncio.get_event_loop().run_until_complete(send())
# return response
#
# def __create_payload(self, command, parameters):
# """
# Constructs the dictionary for the JSON payload that will be sent to the server
#
# :param command: *string* name of the command
# :param parameters: *dictionary*
# :return: *dictionary*
# """
# payload = {
# "FunctionName": command,
# "Parameters": parameters
# }
#
# return payload
|
[
"requests.put",
"requests.post",
"pprint.pprint"
] |
[((3131, 3153), 'pprint.pprint', 'pprint.pprint', (['payload'], {}), '(payload)\n', (3144, 3153), False, 'import pprint\n'), ((3201, 3224), 'pprint.pprint', 'pprint.pprint', (['response'], {}), '(response)\n', (3214, 3224), False, 'import pprint\n'), ((6414, 6436), 'pprint.pprint', 'pprint.pprint', (['payload'], {}), '(payload)\n', (6427, 6436), False, 'import pprint\n'), ((6484, 6507), 'pprint.pprint', 'pprint.pprint', (['response'], {}), '(response)\n', (6497, 6507), False, 'import pprint\n'), ((3046, 3078), 'requests.post', 'requests.post', (['url'], {'json': 'payload'}), '(url, json=payload)\n', (3059, 3078), False, 'import requests\n'), ((6249, 6304), 'requests.put', 'requests.put', (['url'], {'json': 'payload', 'headers': 'self.__headers'}), '(url, json=payload, headers=self.__headers)\n', (6261, 6304), False, 'import requests\n'), ((7368, 7423), 'requests.put', 'requests.put', (['url'], {'json': 'payload', 'headers': 'self.__headers'}), '(url, json=payload, headers=self.__headers)\n', (7380, 7423), False, 'import requests\n')]
|
"""Construction of 2D data tables from ASCII files obtained from models/simulations
or other sources.
Currently supported are the files generated for the papers:
| Nuclear Physics Meets the Sources of the Ultra-High Energy Cosmic Rays
| <NAME>, <NAME>, <NAME>
| Sci.Rep. 7 (2017) 1, 4882
| e-Print: 1607.07989 [astro-ph.HE]
| DOI: 10.1038/s41598-017-05120-7
| A new view on Auger data and cosmogenic neutrinos in light of different nuclear disintegration and air-shower models
| <NAME>, <NAME>, <NAME>, <NAME>
| Astrophys.J. 873 (2019) 1, 88
| e-Print: 1901.03338 [astro-ph.HE]
| DOI: 10.3847/1538-4357/ab05ce
"""
from os import listdir
from os.path import join
import numpy as np
from six import with_metaclass
from prince_data_utils import resource_path
from prince.util import info
class CrossSectionsFromAscii(object):
"""Each class derived from this one is expected to load the
data from some form of source independently and provide at the
end definitions for the parameters:
self.energy_grid, self.mothers_daughters, self.fragment_yields,
self.inel_mothers, self.inelastic_cross_sctions.
Args:
f_root (str): The root name of the tabulated files, e.g. CRP2_TALYS_.
"""
def __init__(self, f_root= 'CRP2_TALYS_'):
self.energy_grid = None
self.inel_mothers = None
self.mothers_daughters = None
self.inelastic_cross_sctions = None
self.fragment_yields = None
self._load(f_root)
assert self.energy_grid is not None
assert self.inel_mothers is not None
assert self.mothers_daughters is not None
assert self.inelastic_cross_sctions is not None
assert self.fragment_yields is not None
self._check_consistency()
def _load(self, f_root):
"""Load cross section tables from files into memory.
Needs to be defined in derived classes."""
if not f_root.endswith('_'):
f_root += '_'
f_root = join(resource_path, 'photo-nuclear',f_root)
info(0, 'Loading files', f_root + '*')
self.energy_grid = np.loadtxt(f_root + 'egrid.dat.bz2')*1e-3 # to GeV
self._inel_cs_tables = np.loadtxt(f_root + 'nonel.dat.bz2')
self._inel_fragment_yields = np.loadtxt(f_root + 'incl_i_j.dat.bz2')
assert self.energy_grid.shape[0] == \
self._inel_cs_tables.shape[1] - 1 == \
self._inel_fragment_yields.shape[1] - 2, \
'Tables e-grids inconsistent {0} != {1} != {2}'.format(
self.energy_grid.shape[0], self._inel_cs_tables.shape[1] - 1,
self._inel_fragment_yields.shape[1] - 2)
# Chunk the tables into their contents
self.inel_mothers = self._inel_cs_tables[:,0].astype('int')
self.inelastic_cross_sctions = self._inel_cs_tables[:,1:]*1e-27 #mbarn -> cm2
self.mothers_daughters = self._inel_fragment_yields[:,0:2].astype('int')
self.fragment_yields = self._inel_fragment_yields[:,2:]*1e-27 #mbarn -> cm2
def _check_consistency(self):
"""Some cross checks for dimenstions and consistency between
inelastic cross sections and yields are performed."""
assert self.inel_mothers.shape[0] == self.inelastic_cross_sctions.shape[0]
assert self.energy_grid.shape[0] == self.inelastic_cross_sctions.shape[1]
assert self.mothers_daughters.shape[0] == self.fragment_yields.shape[0]
assert self.energy_grid.shape[0] == self.fragment_yields.shape[1]
class PhotoMesonCSFromPickle(CrossSectionsFromAscii):
def _load(self, f_root_):
"""Load from pickled dictionaries"""
f_root = join(resource_path, 'photo-meson',f_root_)
info(0, 'Loading files', f_root + '*')
raw_csec = np.load(f_root + 'crosssec.npy',allow_pickle=True)
energy, csec_proton, csec_neutron = raw_csec
csec = np.load(f_root + 'redistribution_logbins.npy',allow_pickle=True)
energy_redist, xbins, redist_proton, redist_neutron = csec
# sophia crossections are in mubarn; convert here to cm^2
csec_proton, csec_neutron = csec_proton * 1e-30, csec_neutron * 1e-30
daughters_proton = np.array(list(redist_proton.keys()))
fragments_proton = np.array(list(redist_proton.values()))
daughters_neutron = np.array(list(redist_neutron.keys()))
fragments_neutron = np.array(list(redist_neutron.values()))
assert np.all(energy == energy_redist)
assert xbins.shape[0]-1 == fragments_proton.shape[-1] == fragments_neutron.shape[-1]
self.energy_grid = energy
self.xbins = xbins
self.inel_mothers = np.array([101, 100])
self.inelastic_cross_sctions = np.stack([csec_proton, csec_neutron])
channels_proton = np.stack(
[np.full(daughters_proton.shape, 101), daughters_proton],axis=1)
channels_neutron = np.stack(
[np.full(daughters_neutron.shape, 100), daughters_neutron],axis=1)
self.mothers_daughters = np.concatenate(
[channels_proton, channels_neutron])
# Fragments in raw data are in dn/dx, but we need dsigma/dx = dn/dx * sigma
self.fragment_yields = np.concatenate([
fragments_proton * csec_proton[None,:,None],
fragments_neutron * csec_neutron[None,:,None]
])
|
[
"numpy.stack",
"numpy.full",
"numpy.load",
"numpy.concatenate",
"prince.util.info",
"numpy.array",
"numpy.loadtxt",
"os.path.join",
"numpy.all"
] |
[((2030, 2074), 'os.path.join', 'join', (['resource_path', '"""photo-nuclear"""', 'f_root'], {}), "(resource_path, 'photo-nuclear', f_root)\n", (2034, 2074), False, 'from os.path import join\n'), ((2082, 2120), 'prince.util.info', 'info', (['(0)', '"""Loading files"""', "(f_root + '*')"], {}), "(0, 'Loading files', f_root + '*')\n", (2086, 2120), False, 'from prince.util import info\n'), ((2230, 2266), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'nonel.dat.bz2')"], {}), "(f_root + 'nonel.dat.bz2')\n", (2240, 2266), True, 'import numpy as np\n'), ((2304, 2343), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'incl_i_j.dat.bz2')"], {}), "(f_root + 'incl_i_j.dat.bz2')\n", (2314, 2343), True, 'import numpy as np\n'), ((3720, 3763), 'os.path.join', 'join', (['resource_path', '"""photo-meson"""', 'f_root_'], {}), "(resource_path, 'photo-meson', f_root_)\n", (3724, 3763), False, 'from os.path import join\n'), ((3771, 3809), 'prince.util.info', 'info', (['(0)', '"""Loading files"""', "(f_root + '*')"], {}), "(0, 'Loading files', f_root + '*')\n", (3775, 3809), False, 'from prince.util import info\n'), ((3829, 3880), 'numpy.load', 'np.load', (["(f_root + 'crosssec.npy')"], {'allow_pickle': '(True)'}), "(f_root + 'crosssec.npy', allow_pickle=True)\n", (3836, 3880), True, 'import numpy as np\n'), ((3948, 4013), 'numpy.load', 'np.load', (["(f_root + 'redistribution_logbins.npy')"], {'allow_pickle': '(True)'}), "(f_root + 'redistribution_logbins.npy', allow_pickle=True)\n", (3955, 4013), True, 'import numpy as np\n'), ((4506, 4537), 'numpy.all', 'np.all', (['(energy == energy_redist)'], {}), '(energy == energy_redist)\n', (4512, 4537), True, 'import numpy as np\n'), ((4721, 4741), 'numpy.array', 'np.array', (['[101, 100]'], {}), '([101, 100])\n', (4729, 4741), True, 'import numpy as np\n'), ((4781, 4818), 'numpy.stack', 'np.stack', (['[csec_proton, csec_neutron]'], {}), '([csec_proton, csec_neutron])\n', (4789, 4818), True, 'import numpy as np\n'), ((5081, 5132), 'numpy.concatenate', 'np.concatenate', (['[channels_proton, channels_neutron]'], {}), '([channels_proton, channels_neutron])\n', (5095, 5132), True, 'import numpy as np\n'), ((5261, 5378), 'numpy.concatenate', 'np.concatenate', (['[fragments_proton * csec_proton[None, :, None], fragments_neutron *\n csec_neutron[None, :, None]]'], {}), '([fragments_proton * csec_proton[None, :, None], \n fragments_neutron * csec_neutron[None, :, None]])\n', (5275, 5378), True, 'import numpy as np\n'), ((2148, 2184), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'egrid.dat.bz2')"], {}), "(f_root + 'egrid.dat.bz2')\n", (2158, 2184), True, 'import numpy as np\n'), ((4868, 4904), 'numpy.full', 'np.full', (['daughters_proton.shape', '(101)'], {}), '(daughters_proton.shape, 101)\n', (4875, 4904), True, 'import numpy as np\n'), ((4982, 5019), 'numpy.full', 'np.full', (['daughters_neutron.shape', '(100)'], {}), '(daughters_neutron.shape, 100)\n', (4989, 5019), True, 'import numpy as np\n')]
|
import os
from mushroom_rl.utils.preprocessors import MinMaxPreprocessor
from mushroom_rl.utils.callbacks import PlotDataset
import numpy as np
from mushroom_rl.algorithms.policy_search import REINFORCE
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.core import Core, Logger
from mushroom_rl.environments import LQR
from mushroom_rl.policy import StateStdGaussianPolicy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.optimizers import AdaptiveOptimizer
from tqdm import tqdm
"""
This script shows how to use preprocessors and plot callback.
"""
tqdm.monitor_interval = 0
def experiment(n_epochs, n_iterations, ep_per_run, save_states_to_disk):
np.random.seed()
logger = Logger('plot_and_norm_example', results_dir=None)
logger.strong_line()
logger.info('Plotting and normalization example')
# MDP
mdp = LQR.generate(dimensions=2, max_pos=10., max_action=5., episodic=True)
approximator = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma_weights = 2 * np.ones(sigma.weights_size)
sigma.set_weights(sigma_weights)
policy = StateStdGaussianPolicy(approximator, sigma)
# Agent
optimizer = AdaptiveOptimizer(eps=.01)
algorithm_params = dict(optimizer=optimizer)
agent = REINFORCE(mdp.info, policy, **algorithm_params)
# normalization callback
prepro = MinMaxPreprocessor(mdp_info=mdp.info)
# plotting callback
plotter = PlotDataset(mdp.info, obs_normalized=True)
# Train
core = Core(agent, mdp, callback_step=plotter, preprocessors=[prepro])
# training loop
for n in range(n_epochs):
core.learn(n_episodes=n_iterations * ep_per_run,
n_episodes_per_fit=ep_per_run)
dataset = core.evaluate(n_episodes=ep_per_run, render=False)
J = np.mean(compute_J(dataset,mdp.info.gamma))
logger.epoch_info(n+1, J=J)
if save_states_to_disk:
# save normalization / plot states to disk path
logger.info('Saving plotting and normalization data')
os.makedirs("./logs/plot_and_norm", exist_ok=True)
prepro.save("./logs/plot_and_norm/preprocessor.msh")
plotter.save_state("./logs/plot_and_norm/plotting_state")
# load states from disk path
logger.info('Loading preprocessor and plotter')
prerpo = MinMaxPreprocessor.load("./logs/plot_and_norm/preprocessor.msh")
plotter.load_state("./logs/plot_and_norm/plotting_state")
if __name__ == '__main__':
experiment(n_epochs=10, n_iterations=10, ep_per_run=100,
save_states_to_disk=False)
|
[
"mushroom_rl.utils.dataset.compute_J",
"mushroom_rl.core.Logger",
"numpy.random.seed",
"mushroom_rl.environments.LQR.generate",
"mushroom_rl.algorithms.policy_search.REINFORCE",
"os.makedirs",
"mushroom_rl.utils.preprocessors.MinMaxPreprocessor.load",
"numpy.ones",
"mushroom_rl.utils.preprocessors.MinMaxPreprocessor",
"mushroom_rl.core.Core",
"mushroom_rl.utils.callbacks.PlotDataset",
"mushroom_rl.policy.StateStdGaussianPolicy",
"mushroom_rl.approximators.regressor.Regressor",
"mushroom_rl.utils.optimizers.AdaptiveOptimizer"
] |
[((778, 794), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (792, 794), True, 'import numpy as np\n'), ((809, 858), 'mushroom_rl.core.Logger', 'Logger', (['"""plot_and_norm_example"""'], {'results_dir': 'None'}), "('plot_and_norm_example', results_dir=None)\n", (815, 858), False, 'from mushroom_rl.core import Core, Logger\n'), ((959, 1030), 'mushroom_rl.environments.LQR.generate', 'LQR.generate', ([], {'dimensions': '(2)', 'max_pos': '(10.0)', 'max_action': '(5.0)', 'episodic': '(True)'}), '(dimensions=2, max_pos=10.0, max_action=5.0, episodic=True)\n', (971, 1030), False, 'from mushroom_rl.environments import LQR\n'), ((1049, 1170), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (1058, 1170), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((1238, 1359), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (1247, 1359), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((1504, 1547), 'mushroom_rl.policy.StateStdGaussianPolicy', 'StateStdGaussianPolicy', (['approximator', 'sigma'], {}), '(approximator, sigma)\n', (1526, 1547), False, 'from mushroom_rl.policy import StateStdGaussianPolicy\n'), ((1577, 1604), 'mushroom_rl.utils.optimizers.AdaptiveOptimizer', 'AdaptiveOptimizer', ([], {'eps': '(0.01)'}), '(eps=0.01)\n', (1594, 1604), False, 'from mushroom_rl.utils.optimizers import AdaptiveOptimizer\n'), ((1665, 1712), 'mushroom_rl.algorithms.policy_search.REINFORCE', 'REINFORCE', (['mdp.info', 'policy'], {}), '(mdp.info, policy, **algorithm_params)\n', (1674, 1712), False, 'from mushroom_rl.algorithms.policy_search import REINFORCE\n'), ((1756, 1793), 'mushroom_rl.utils.preprocessors.MinMaxPreprocessor', 'MinMaxPreprocessor', ([], {'mdp_info': 'mdp.info'}), '(mdp_info=mdp.info)\n', (1774, 1793), False, 'from mushroom_rl.utils.preprocessors import MinMaxPreprocessor\n'), ((1833, 1875), 'mushroom_rl.utils.callbacks.PlotDataset', 'PlotDataset', (['mdp.info'], {'obs_normalized': '(True)'}), '(mdp.info, obs_normalized=True)\n', (1844, 1875), False, 'from mushroom_rl.utils.callbacks import PlotDataset\n'), ((1900, 1963), 'mushroom_rl.core.Core', 'Core', (['agent', 'mdp'], {'callback_step': 'plotter', 'preprocessors': '[prepro]'}), '(agent, mdp, callback_step=plotter, preprocessors=[prepro])\n', (1904, 1963), False, 'from mushroom_rl.core import Core, Logger\n'), ((1425, 1452), 'numpy.ones', 'np.ones', (['sigma.weights_size'], {}), '(sigma.weights_size)\n', (1432, 1452), True, 'import numpy as np\n'), ((2437, 2487), 'os.makedirs', 'os.makedirs', (['"""./logs/plot_and_norm"""'], {'exist_ok': '(True)'}), "('./logs/plot_and_norm', exist_ok=True)\n", (2448, 2487), False, 'import os\n'), ((2726, 2790), 'mushroom_rl.utils.preprocessors.MinMaxPreprocessor.load', 'MinMaxPreprocessor.load', (['"""./logs/plot_and_norm/preprocessor.msh"""'], {}), "('./logs/plot_and_norm/preprocessor.msh')\n", (2749, 2790), False, 'from mushroom_rl.utils.preprocessors import MinMaxPreprocessor\n'), ((2211, 2245), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset', 'mdp.info.gamma'], {}), '(dataset, mdp.info.gamma)\n', (2220, 2245), False, 'from mushroom_rl.utils.dataset import compute_J\n')]
|
import dictionary
# HW 7
def main_menu():
print('Inventory Menu')
print('[1]: Add/Update Inventory')
print('[2]: Delete Inventory')
print('[3]: Exit')
user_input = input('Select an option: ')
if user_input == '1':
option1()
elif user_input == '2':
option2()
elif user_input == '3':
print('Thank you...Goodbye!.')
quit()
else:
print('You have entered an invalid entry')
print()
main_menu()
def option1():
type_dictionary = {}
print('Add / Update Widget Inventory')
widget_name = input('Enter Widget Name: ')
quantity = int(input('Enter the quantity of Widget: '))
widgets_dictionary = dictionary.add_inventory(type_dictionary, widget_name, quantity)
print()
print(widgets_dictionary)
print('\tNOTE* To update Widget Quantity, CONTINUE & use the same Widget Name.')
user_choice = input('Would you like to continue?: ')
while user_choice.lower() == 'y':
widget_name = input('Enter Widget Name: ')
quantity = int(input('Enter the quantity of Widget: '))
widgets_dictionary = dictionary.add_inventory(type_dictionary, widget_name, quantity)
print()
print(widgets_dictionary)
print()
user_choice = input('Would you like to continue?: ')
if user_choice.lower() == 'n':
main_menu()
if user_choice.lower() == 'n':
main_menu()
def option2():
type_dictionary = {'Widget1' : 0, 'Widget2': 10, 'Widget3': 20}
print('Remove Widget from Inventory')
print('\tCurrent Inventory: Widget1, Widget2, Widget3')
widget_name = input('Enter the name of Widget to Delete: ')
widgets_dictionary = dictionary.remove_inventory_widget(type_dictionary, widget_name)
print()
print(widgets_dictionary)
print()
user_choice = input('Would you like to remove another?: ')
while user_choice.lower() == 'y':
widget_name = input('Enter the name of Widget to Delete: ')
widgets_dictionary = dictionary.remove_inventory_widget(type_dictionary, widget_name)
print()
print(widgets_dictionary)
print()
user_choice = input('Would you like to continue?: ')
if user_choice.lower() == 'n':
main_menu()
if user_choice.lower() == 'n':
main_menu()
def option3():
print('Thank you.....Goodbye!')
quit()
main_menu()
# HW 6
# def main_menu():
# print('Main Menu')
# print('[1]: Get Distance Matrix')
# print('[2]: Exit')
# user_input = input('Select an option: ')
# if user_input == '1':
# option1()
# elif user_input == '2':
# option2()
# else:
# print('You have entered an invalid entry')
# print()
# main_menu()
# def option1():
# print()
# print('Calculating Distance Matrix\n' 'Enter at least 2 sequences of DNA, maximum of 10 characters each.')
# dataset = []
# add_sequence = 'Y'
# while add_sequence == 'y' or add_sequence == 'Y':
# dna_sequence = input('Enter the sequence of DNA: ')
# dna_list = list(dna_sequence)
# dataset.append(dna_list)
# print('Continue adding data?\n' 'Only continue if at least 2 sequences have been added.')
# print()
# add_sequence = input('Enter y to continue or n to calculate: ')
# print()
# print('Here is the data entered.')
# for data in dataset:
# print('\t', data)
# print()
# print('The Distance Matrix of the data provided is: ')
# p_distance_matrix = dictionary.get_p_distance_matrix(dataset)
# for row in p_distance_matrix:
# for e in row:
# print(format(e, '12.1f'), end=' ')
# print('')
# print()
# user_input = input('Would you like to continue again? y/n: ')
# user_result = user_input
# if user_result == 'n' or user_result == 'N':
# main_menu()
# if user_result == 'y' or user_result == 'Y':
# option1()
# def option2():
# print('Thank you....Goodbye')
# quit()
# main_menu()
|
[
"dictionary.remove_inventory_widget",
"dictionary.add_inventory"
] |
[((697, 761), 'dictionary.add_inventory', 'dictionary.add_inventory', (['type_dictionary', 'widget_name', 'quantity'], {}), '(type_dictionary, widget_name, quantity)\n', (721, 761), False, 'import dictionary\n'), ((1713, 1777), 'dictionary.remove_inventory_widget', 'dictionary.remove_inventory_widget', (['type_dictionary', 'widget_name'], {}), '(type_dictionary, widget_name)\n', (1747, 1777), False, 'import dictionary\n'), ((1128, 1192), 'dictionary.add_inventory', 'dictionary.add_inventory', (['type_dictionary', 'widget_name', 'quantity'], {}), '(type_dictionary, widget_name, quantity)\n', (1152, 1192), False, 'import dictionary\n'), ((2030, 2094), 'dictionary.remove_inventory_widget', 'dictionary.remove_inventory_widget', (['type_dictionary', 'widget_name'], {}), '(type_dictionary, widget_name)\n', (2064, 2094), False, 'import dictionary\n')]
|
import pygame
from src.const import *
from src.game_objects.foes.foe import Foe
from src.game_objects.projectiles.projectile import Projectile
class Bullet(Projectile):
def __init__(self, *args,
dim=(10,10),
depth=BULLET_DEPTH,
**kwargs):
self.images = {"init": pygame.Surface(dim)}
super().__init__(*args, depth=depth, init_image_key="init", **kwargs)
self.image.fill(YELLOW)
# kinematics
self.dx = 8
self.damage = 35
def draw(self):
super().draw()
def update(self):
super().update()
collidee = self.collide_with_class(Foe)
if collidee:
collidee.hp -= self.damage
collidee.damaged_flash()
self.kill()
|
[
"pygame.Surface"
] |
[((332, 351), 'pygame.Surface', 'pygame.Surface', (['dim'], {}), '(dim)\n', (346, 351), False, 'import pygame\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-04-08 12:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('teams', '0036_auto_20180403_0201'),
]
operations = [
migrations.AddField(
model_name='teammember',
name='irc_channel_acl_ok',
field=models.BooleanField(default=False, help_text='Maintained by the IRC bot, do not edit manually. True if the teammembers NickServ username has been added to the Team IRC channels ACL.'),
),
migrations.AlterField(
model_name='teammember',
name='approved',
field=models.BooleanField(default=False, help_text='True if this membership is approved. False if not.'),
),
migrations.AlterField(
model_name='teammember',
name='responsible',
field=models.BooleanField(default=False, help_text='True if this teammember is responsible for this Team. False if not.'),
),
migrations.AlterField(
model_name='teammember',
name='team',
field=models.ForeignKey(help_text='The Team this membership relates to', on_delete=django.db.models.deletion.PROTECT, to='teams.Team'),
),
migrations.AlterField(
model_name='teammember',
name='user',
field=models.ForeignKey(help_text='The User object this team membership relates to', on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.BooleanField"
] |
[((479, 672), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Maintained by the IRC bot, do not edit manually. True if the teammembers NickServ username has been added to the Team IRC channels ACL."""'}), "(default=False, help_text=\n 'Maintained by the IRC bot, do not edit manually. True if the teammembers NickServ username has been added to the Team IRC channels ACL.'\n )\n", (498, 672), False, 'from django.db import migrations, models\n'), ((790, 893), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""True if this membership is approved. False if not."""'}), "(default=False, help_text=\n 'True if this membership is approved. False if not.')\n", (809, 893), False, 'from django.db import migrations, models\n'), ((1019, 1139), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""True if this teammember is responsible for this Team. False if not."""'}), "(default=False, help_text=\n 'True if this teammember is responsible for this Team. False if not.')\n", (1038, 1139), False, 'from django.db import migrations, models\n'), ((1258, 1390), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""The Team this membership relates to"""', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""teams.Team"""'}), "(help_text='The Team this membership relates to',\n on_delete=django.db.models.deletion.PROTECT, to='teams.Team')\n", (1275, 1390), False, 'from django.db import migrations, models\n'), ((1510, 1672), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""The User object this team membership relates to"""', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), "(help_text=\n 'The User object this team membership relates to', on_delete=django.db.\n models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)\n", (1527, 1672), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import unittest
from tornado.ioloop import IOLoop
from ssguan.ignitor.orm import dbpool, config as orm_config, update
from ssguan.ignitor.sched import scheduler
from ssguan.ignitor.sched.cronjob import CronJob, CJRunner
from ssguan.ignitor.utility import kind
class CJRunner1(CJRunner):
def __init__(self, cronjob):
super(CJRunner1, self).__init__(cronjob)
def run(self, run_params, cjrunlog, caller):
print("runonce")
class CJRunnerRALL1(CJRunner):
def __init__(self, cronjob):
super(CJRunnerRALL1, self).__init__(cronjob)
def run(self, cjrunlog, caller):
'_logger.info("CJRunnerRALL1l===%s===%s", caller, self.cronjob.run_params)'
class CJRunnerRALL2(CJRunner):
def __init__(self, cronjob):
super(CJRunnerRALL2, self).__init__(cronjob)
def run(self, cjrunlog, caller):
'_logger.info("CJRunnerRALL22===%s===%s", caller)'
class CJRunnerROnce1(CJRunner):
def __init__(self, cronjob):
super(CJRunnerROnce1, self).__init__(cronjob)
def run(self, cjrunlog, caller):
'_logger.info("CJRunnerROnce1===%s===%s", caller, self.cronjob.run_params)'
class SchedulerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
update.install()
update.upgrade('ignitor.audit')
update.upgrade('ignitor.sched')
def test_create_cronjob(self):
cronjob = scheduler.create_cronjob("jobcreate", "jobcredeeate", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnode1cc",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
self.assertEqual(cronjob.fire_year, "2009/2")
self.assertEqual(cronjob.fire_hour, "2-3")
self.assertEqual(cronjob.fire_minute, "*")
self.assertEqual(cronjob.fire_second, 0)
self.assertEqual(cronjob.job_desc, "jobcredeeate")
self.assertEqual(cronjob.job_group, None)
self.assertEqual(cronjob.job_node, "cjnode1cc")
cronjob = scheduler.create_cronjob("jobcreate2", "jobcredeeate2", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnode1cc",
job_group="deadadf", logged=False)
self.assertIsNotNone(cronjob.next_run_time)
self.assertEqual(cronjob.job_group, "deadadf")
self.assertFalse(cronjob.logged)
self.assertEqual(cronjob.job_node, "cjnode1cc")
def test_delete_cronjob(self):
cronjob = scheduler.create_cronjob("jobde", "jobde", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnode1cc",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
scheduler.delete_cronjob(cronjob.key())
job = scheduler.get_cronjob(job_id=cronjob.key())
self.assertIsNone(job)
def test_get_cronjob(self):
cronjob = scheduler.create_cronjob("jobdeccc", "jobdeeeee", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnode1cc",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
job2 = scheduler.get_cronjob(job_id=cronjob.key())
self.assertEqual(job2.job_name, cronjob.job_name)
job2 = scheduler.get_cronjob(job_name="jobdeccc")
self.assertEqual(job2.job_name, cronjob.job_name)
self.assertEqual(job2.key(), cronjob.key())
def test_break_cronjob(self):
cronjob = scheduler.create_cronjob("cjnode1bre", "cjnode1bre", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnode1bre",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
job = scheduler.break_cronjob(cronjob.key(), True)
self.assertTrue(job.broken)
job = scheduler.break_cronjob(cronjob.key(), False)
self.assertFalse(job.broken)
def test_fetch_cronjobs(self):
query = CronJob.all()
query.delete(None)
scheduler.create_cronjob("cjnodefetchcjs", "cjnodefetchcjs", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnodefetchcjs",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
scheduler.create_cronjob("2fffasdf22", "cjnodefetc22hcjs2", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnodefetchcjs",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*", broken=True)
scheduler.create_cronjob("fa2e32323", "22323", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnodefetchcjs",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
scheduler.create_cronjob("fa2e3aad2323", "2ddd2323", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnodefetchcjs222",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
cj = scheduler.create_cronjob("3222323fff", "asdfdf", "tests.ignitor.sched.scheduler_test.CJRunner1", "cjnodefetchcjs222",
fire_year="2009/2",
fire_month="1",
fire_day="5",
fire_hour="2-3",
fire_minute="*")
cjs = scheduler.fetch_cronjobs()
self.assertEqual(len(cjs), 5)
cjs = scheduler.fetch_cronjobs(job_name='cjnodefetchc')
self.assertEqual(len(cjs), 1)
cjs = scheduler.fetch_cronjobs(job_node="cjnodefetchcjs222")
self.assertEqual(len(cjs), 2)
cjs = scheduler.fetch_cronjobs(job_node="cjnodefetchcjs222", broken=True)
self.assertEqual(len(cjs), 0)
cj.broken = True
cj.update(None)
cjs = scheduler.fetch_cronjobs(job_node="cjnodefetchcjs222", broken=True)
self.assertEqual(len(cjs), 1)
def test_scheduler_new(self):
cjnode1 = scheduler.Scheduler("cjnodenew1")
cjnode12 = scheduler.Scheduler("cjnodenew1")
self.assertEqual(cjnode1, cjnode12)
self.assertEqual(cjnode12._node, "cjnodenew1")
cjnode2 = scheduler.Scheduler("cjnodenew2")
self.assertNotEqual(cjnode1, cjnode2)
self.assertEqual(cjnode2._node, "cjnodenew2")
def test_scheduler_run_all(self):
cj1 = scheduler.create_cronjob("runall1", "runall1", "tests.ignitor.sched.scheduler_test.CJRunnerRALL1", "runallnode1",
fire_year="*",
fire_month="*",
fire_day="*",
fire_hour="*",
fire_minute="*",
fire_second="0/5")
cj2 = scheduler.create_cronjob("runall2", "runall2", "tests.ignitor.sched.scheduler_test.CJRunnerRALL2", "runallnode1",
fire_year="*",
fire_month="*",
fire_day="*",
fire_hour="*",
fire_minute="*",
fire_second="0/3",
broken=True)
scheduler.create_cronjob("runall3", "runall3", "tests.ignitor.sched.scheduler_test.CJRunnerRALL2", "runallnode2",
fire_year="*",
fire_month="*",
fire_day="*",
fire_hour="*",
fire_minute="*",
fire_second="0/2")
def update_next_time():
q1 = cj1.all()
q1.filter("_id =", cj1.key())
q1.set("next_run_time", kind.utcnow() - timedelta(seconds=10))
q1.update(None)
q2 = cj1.all()
q2.filter("_id =", cj2.key())
q2.set("next_run_time", kind.utcnow() - timedelta(seconds=12))
q2.update(None)
update_next_time()
scher = scheduler.Scheduler("runallnode1")
scher.run_all(None, broken=False)
scheduler.break_cronjob(cj2.key(), False)
update_next_time()
scher.run_all(None)
def test_scheduler_run_once(self):
cj1 = scheduler.create_cronjob("runonce1", "runonce1", "tests.ignitor.sched.scheduler_test.CJRunnerROnce1", "runallnode222",
fire_year="*",
fire_month="*",
fire_day="*",
fire_hour="*",
fire_minute="*",
fire_second="0/5")
scher = scheduler.Scheduler("runallnode222")
scher.run_once(cj1.key(), None)
scher.run_once(cj1.key(), None)
scher.run_once(cj1.key(), None)
def test_scheduler_running(self):
cjnode1 = scheduler.Scheduler("cjnodenew1")
self.assertFalse(cjnode1.is_running())
cjnode12 = scheduler.Scheduler("cjnodenew1")
self.assertFalse(cjnode12.is_running())
io_loop = IOLoop.current()
def assert_running():
self.assertTrue(cjnode1.is_running())
self.assertTrue(cjnode12.is_running())
io_loop.call_later(0.1, assert_running)
io_loop.call_later(0.2, io_loop.stop)
cjnode1.start(1, io_loop=io_loop)
self.assertTrue(cjnode1.is_running())
self.assertTrue(cjnode12.is_running())
cjnode1.stop()
self.assertFalse(cjnode1.is_running())
self.assertFalse(cjnode12.is_running())
@classmethod
def tearDownClass(cls):
dbpool.drop_db(orm_config.get_default_dbinfo())
|
[
"tornado.ioloop.IOLoop.current",
"ssguan.ignitor.sched.scheduler.get_cronjob",
"ssguan.ignitor.utility.kind.utcnow",
"ssguan.ignitor.orm.config.get_default_dbinfo",
"ssguan.ignitor.sched.scheduler.create_cronjob",
"ssguan.ignitor.orm.update.install",
"ssguan.ignitor.orm.update.upgrade",
"ssguan.ignitor.sched.scheduler.fetch_cronjobs",
"datetime.timedelta",
"ssguan.ignitor.sched.scheduler.Scheduler",
"ssguan.ignitor.sched.cronjob.CronJob.all"
] |
[((2127, 2143), 'ssguan.ignitor.orm.update.install', 'update.install', ([], {}), '()\n', (2141, 2143), False, 'from ssguan.ignitor.orm import dbpool, config as orm_config, update\n'), ((2153, 2184), 'ssguan.ignitor.orm.update.upgrade', 'update.upgrade', (['"""ignitor.audit"""'], {}), "('ignitor.audit')\n", (2167, 2184), False, 'from ssguan.ignitor.orm import dbpool, config as orm_config, update\n'), ((2194, 2225), 'ssguan.ignitor.orm.update.upgrade', 'update.upgrade', (['"""ignitor.sched"""'], {}), "('ignitor.sched')\n", (2208, 2225), False, 'from ssguan.ignitor.orm import dbpool, config as orm_config, update\n'), ((2291, 2498), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""jobcreate"""', '"""jobcredeeate"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnode1cc"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('jobcreate', 'jobcredeeate',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnode1cc', fire_year=\n '2009/2', fire_month='1', fire_day='5', fire_hour='2-3', fire_minute='*')\n", (2315, 2498), False, 'from ssguan.ignitor.sched import scheduler\n'), ((3101, 3261), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""jobcreate2"""', '"""jobcredeeate2"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnode1cc"""'], {'job_group': '"""deadadf"""', 'logged': '(False)'}), "('jobcreate2', 'jobcredeeate2',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnode1cc', job_group=\n 'deadadf', logged=False)\n", (3125, 3261), False, 'from ssguan.ignitor.sched import scheduler\n'), ((3583, 3779), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""jobde"""', '"""jobde"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnode1cc"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('jobde', 'jobde',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnode1cc', fire_year=\n '2009/2', fire_month='1', fire_day='5', fire_hour='2-3', fire_minute='*')\n", (3607, 3779), False, 'from ssguan.ignitor.sched import scheduler\n'), ((4188, 4391), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""jobdeccc"""', '"""jobdeeeee"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnode1cc"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('jobdeccc', 'jobdeeeee',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnode1cc', fire_year=\n '2009/2', fire_month='1', fire_day='5', fire_hour='2-3', fire_minute='*')\n", (4212, 4391), False, 'from ssguan.ignitor.sched import scheduler\n'), ((4733, 4775), 'ssguan.ignitor.sched.scheduler.get_cronjob', 'scheduler.get_cronjob', ([], {'job_name': '"""jobdeccc"""'}), "(job_name='jobdeccc')\n", (4754, 4775), False, 'from ssguan.ignitor.sched import scheduler\n'), ((4948, 5155), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""cjnode1bre"""', '"""cjnode1bre"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnode1bre"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('cjnode1bre', 'cjnode1bre',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnode1bre', fire_year\n ='2009/2', fire_month='1', fire_day='5', fire_hour='2-3', fire_minute='*')\n", (4972, 5155), False, 'from ssguan.ignitor.sched import scheduler\n'), ((5617, 5630), 'ssguan.ignitor.sched.cronjob.CronJob.all', 'CronJob.all', ([], {}), '()\n', (5628, 5630), False, 'from ssguan.ignitor.sched.cronjob import CronJob, CJRunner\n'), ((5668, 5890), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""cjnodefetchcjs"""', '"""cjnodefetchcjs"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnodefetchcjs"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('cjnodefetchcjs', 'cjnodefetchcjs',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnodefetchcjs',\n fire_year='2009/2', fire_month='1', fire_day='5', fire_hour='2-3',\n fire_minute='*')\n", (5692, 5890), False, 'from ssguan.ignitor.sched import scheduler\n'), ((6103, 6337), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""2fffasdf22"""', '"""cjnodefetc22hcjs2"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnodefetchcjs"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""', 'broken': '(True)'}), "('2fffasdf22', 'cjnodefetc22hcjs2',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnodefetchcjs',\n fire_year='2009/2', fire_month='1', fire_day='5', fire_hour='2-3',\n fire_minute='*', broken=True)\n", (6127, 6337), False, 'from ssguan.ignitor.sched import scheduler\n'), ((6550, 6758), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""fa2e32323"""', '"""22323"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnodefetchcjs"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('fa2e32323', '22323',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnodefetchcjs',\n fire_year='2009/2', fire_month='1', fire_day='5', fire_hour='2-3',\n fire_minute='*')\n", (6574, 6758), False, 'from ssguan.ignitor.sched import scheduler\n'), ((6971, 7188), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""fa2e3aad2323"""', '"""2ddd2323"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnodefetchcjs222"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('fa2e3aad2323', '2ddd2323',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnodefetchcjs222',\n fire_year='2009/2', fire_month='1', fire_day='5', fire_hour='2-3',\n fire_minute='*')\n", (6995, 7188), False, 'from ssguan.ignitor.sched import scheduler\n'), ((7406, 7619), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""3222323fff"""', '"""asdfdf"""', '"""tests.ignitor.sched.scheduler_test.CJRunner1"""', '"""cjnodefetchcjs222"""'], {'fire_year': '"""2009/2"""', 'fire_month': '"""1"""', 'fire_day': '"""5"""', 'fire_hour': '"""2-3"""', 'fire_minute': '"""*"""'}), "('3222323fff', 'asdfdf',\n 'tests.ignitor.sched.scheduler_test.CJRunner1', 'cjnodefetchcjs222',\n fire_year='2009/2', fire_month='1', fire_day='5', fire_hour='2-3',\n fire_minute='*')\n", (7430, 7619), False, 'from ssguan.ignitor.sched import scheduler\n'), ((7838, 7864), 'ssguan.ignitor.sched.scheduler.fetch_cronjobs', 'scheduler.fetch_cronjobs', ([], {}), '()\n', (7862, 7864), False, 'from ssguan.ignitor.sched import scheduler\n'), ((7919, 7968), 'ssguan.ignitor.sched.scheduler.fetch_cronjobs', 'scheduler.fetch_cronjobs', ([], {'job_name': '"""cjnodefetchc"""'}), "(job_name='cjnodefetchc')\n", (7943, 7968), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8023, 8077), 'ssguan.ignitor.sched.scheduler.fetch_cronjobs', 'scheduler.fetch_cronjobs', ([], {'job_node': '"""cjnodefetchcjs222"""'}), "(job_node='cjnodefetchcjs222')\n", (8047, 8077), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8132, 8199), 'ssguan.ignitor.sched.scheduler.fetch_cronjobs', 'scheduler.fetch_cronjobs', ([], {'job_node': '"""cjnodefetchcjs222"""', 'broken': '(True)'}), "(job_node='cjnodefetchcjs222', broken=True)\n", (8156, 8199), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8305, 8372), 'ssguan.ignitor.sched.scheduler.fetch_cronjobs', 'scheduler.fetch_cronjobs', ([], {'job_node': '"""cjnodefetchcjs222"""', 'broken': '(True)'}), "(job_node='cjnodefetchcjs222', broken=True)\n", (8329, 8372), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8476, 8509), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""cjnodenew1"""'], {}), "('cjnodenew1')\n", (8495, 8509), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8530, 8563), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""cjnodenew1"""'], {}), "('cjnodenew1')\n", (8549, 8563), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8692, 8725), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""cjnodenew2"""'], {}), "('cjnodenew2')\n", (8711, 8725), False, 'from ssguan.ignitor.sched import scheduler\n'), ((8892, 9114), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""runall1"""', '"""runall1"""', '"""tests.ignitor.sched.scheduler_test.CJRunnerRALL1"""', '"""runallnode1"""'], {'fire_year': '"""*"""', 'fire_month': '"""*"""', 'fire_day': '"""*"""', 'fire_hour': '"""*"""', 'fire_minute': '"""*"""', 'fire_second': '"""0/5"""'}), "('runall1', 'runall1',\n 'tests.ignitor.sched.scheduler_test.CJRunnerRALL1', 'runallnode1',\n fire_year='*', fire_month='*', fire_day='*', fire_hour='*', fire_minute\n ='*', fire_second='0/5')\n", (8916, 9114), False, 'from ssguan.ignitor.sched import scheduler\n'), ((9375, 9610), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""runall2"""', '"""runall2"""', '"""tests.ignitor.sched.scheduler_test.CJRunnerRALL2"""', '"""runallnode1"""'], {'fire_year': '"""*"""', 'fire_month': '"""*"""', 'fire_day': '"""*"""', 'fire_hour': '"""*"""', 'fire_minute': '"""*"""', 'fire_second': '"""0/3"""', 'broken': '(True)'}), "('runall2', 'runall2',\n 'tests.ignitor.sched.scheduler_test.CJRunnerRALL2', 'runallnode1',\n fire_year='*', fire_month='*', fire_day='*', fire_hour='*', fire_minute\n ='*', fire_second='0/3', broken=True)\n", (9399, 9610), False, 'from ssguan.ignitor.sched import scheduler\n'), ((9908, 10130), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""runall3"""', '"""runall3"""', '"""tests.ignitor.sched.scheduler_test.CJRunnerRALL2"""', '"""runallnode2"""'], {'fire_year': '"""*"""', 'fire_month': '"""*"""', 'fire_day': '"""*"""', 'fire_hour': '"""*"""', 'fire_minute': '"""*"""', 'fire_second': '"""0/2"""'}), "('runall3', 'runall3',\n 'tests.ignitor.sched.scheduler_test.CJRunnerRALL2', 'runallnode2',\n fire_year='*', fire_month='*', fire_day='*', fire_hour='*', fire_minute\n ='*', fire_second='0/2')\n", (9932, 10130), False, 'from ssguan.ignitor.sched import scheduler\n'), ((10822, 10856), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""runallnode1"""'], {}), "('runallnode1')\n", (10841, 10856), False, 'from ssguan.ignitor.sched import scheduler\n'), ((11073, 11300), 'ssguan.ignitor.sched.scheduler.create_cronjob', 'scheduler.create_cronjob', (['"""runonce1"""', '"""runonce1"""', '"""tests.ignitor.sched.scheduler_test.CJRunnerROnce1"""', '"""runallnode222"""'], {'fire_year': '"""*"""', 'fire_month': '"""*"""', 'fire_day': '"""*"""', 'fire_hour': '"""*"""', 'fire_minute': '"""*"""', 'fire_second': '"""0/5"""'}), "('runonce1', 'runonce1',\n 'tests.ignitor.sched.scheduler_test.CJRunnerROnce1', 'runallnode222',\n fire_year='*', fire_month='*', fire_day='*', fire_hour='*', fire_minute\n ='*', fire_second='0/5')\n", (11097, 11300), False, 'from ssguan.ignitor.sched import scheduler\n'), ((11563, 11599), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""runallnode222"""'], {}), "('runallnode222')\n", (11582, 11599), False, 'from ssguan.ignitor.sched import scheduler\n'), ((11787, 11820), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""cjnodenew1"""'], {}), "('cjnodenew1')\n", (11806, 11820), False, 'from ssguan.ignitor.sched import scheduler\n'), ((11889, 11922), 'ssguan.ignitor.sched.scheduler.Scheduler', 'scheduler.Scheduler', (['"""cjnodenew1"""'], {}), "('cjnodenew1')\n", (11908, 11922), False, 'from ssguan.ignitor.sched import scheduler\n'), ((11991, 12007), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (12005, 12007), False, 'from tornado.ioloop import IOLoop\n'), ((2071, 2102), 'ssguan.ignitor.orm.config.get_default_dbinfo', 'orm_config.get_default_dbinfo', ([], {}), '()\n', (2100, 2102), True, 'from ssguan.ignitor.orm import dbpool, config as orm_config, update\n'), ((12578, 12609), 'ssguan.ignitor.orm.config.get_default_dbinfo', 'orm_config.get_default_dbinfo', ([], {}), '()\n', (12607, 12609), True, 'from ssguan.ignitor.orm import dbpool, config as orm_config, update\n'), ((10517, 10530), 'ssguan.ignitor.utility.kind.utcnow', 'kind.utcnow', ([], {}), '()\n', (10528, 10530), False, 'from ssguan.ignitor.utility import kind\n'), ((10533, 10554), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (10542, 10554), False, 'from datetime import timedelta\n'), ((10701, 10714), 'ssguan.ignitor.utility.kind.utcnow', 'kind.utcnow', ([], {}), '()\n', (10712, 10714), False, 'from ssguan.ignitor.utility import kind\n'), ((10717, 10738), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(12)'}), '(seconds=12)\n', (10726, 10738), False, 'from datetime import timedelta\n')]
|
# Taken from: https://github.com/UKPLab/sentence-transformers/blob/master/sentence_transformers/util.py
import os
import requests
from tqdm.autonotebook import tqdm
import tarfile
def http_get(url, path):
"""
Downloads a URL to a given path on disc
"""
if os.path.dirname(path) != '':
os.makedirs(os.path.dirname(path), exist_ok=True)
req = requests.get(url, stream=True)
if req.status_code != 200:
print("Exception when trying to download {}. Response {}".format(url, req.status_code), file=sys.stderr)
req.raise_for_status()
return
download_filepath = path+"_part"
with open(download_filepath, "wb") as file_binary:
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total, unit_scale=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
file_binary.write(chunk)
os.rename(download_filepath, path)
progress.close()
def extract_archive(archive, destination):
with tarfile.open(archive, "r:gz") as tar:
tar.extractall(path=destination)
|
[
"tqdm.autonotebook.tqdm",
"os.rename",
"os.path.dirname",
"requests.get",
"tarfile.open"
] |
[((372, 402), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (384, 402), False, 'import requests\n'), ((1088, 1122), 'os.rename', 'os.rename', (['download_filepath', 'path'], {}), '(download_filepath, path)\n', (1097, 1122), False, 'import os\n'), ((274, 295), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (289, 295), False, 'import os\n'), ((840, 884), 'tqdm.autonotebook.tqdm', 'tqdm', ([], {'unit': '"""B"""', 'total': 'total', 'unit_scale': '(True)'}), "(unit='B', total=total, unit_scale=True)\n", (844, 884), False, 'from tqdm.autonotebook import tqdm\n'), ((1198, 1227), 'tarfile.open', 'tarfile.open', (['archive', '"""r:gz"""'], {}), "(archive, 'r:gz')\n", (1210, 1227), False, 'import tarfile\n'), ((323, 344), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (338, 344), False, 'import os\n')]
|
# Generated by Django 2.0.7 on 2018-08-02 13:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0006_usertask_is_file_answer'),
]
operations = [
migrations.CreateModel(
name='UserValidateCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField(blank=True, null=True, verbose_name='用户id')),
('email', models.CharField(blank=True, max_length=255, null=True, verbose_name='发送邮箱')),
('code', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.db.models.AutoField"
] |
[((342, 435), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (358, 435), False, 'from django.db import migrations, models\n'), ((462, 525), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""用户id"""'}), "(blank=True, null=True, verbose_name='用户id')\n", (481, 525), False, 'from django.db import migrations, models\n'), ((554, 630), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""发送邮箱"""'}), "(blank=True, max_length=255, null=True, verbose_name='发送邮箱')\n", (570, 630), False, 'from django.db import migrations, models\n'), ((658, 690), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (674, 690), False, 'from django.db import migrations, models\n'), ((721, 781), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""创建时间"""'}), "(auto_now_add=True, verbose_name='创建时间')\n", (741, 781), False, 'from django.db import migrations, models\n')]
|
"""
Tests for EmbargoMiddleware
"""
from contextlib import contextmanager
from unittest import mock
from unittest.mock import patch, MagicMock
import geoip2.database
import maxminddb
import ddt
import pytest
from django.conf import settings
from django.test.utils import override_settings
from django.core.cache import cache
from django.db import connection
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase, mixed_store_config
)
from common.djangoapps.student.roles import (
GlobalStaff, CourseRole, OrgRole,
CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole
)
from common.djangoapps.util.testing import UrlResetMixin
from ..models import (
RestrictedCourse, Country, CountryAccessRule,
)
from .. import api as embargo_api
from ..exceptions import InvalidAccessPoint
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {})
@ddt.ddt
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@skip_unless_lms
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
class EmbargoCheckAccessApiTests(ModuleStoreTestCase):
"""Test the embargo API calls to determine whether a user has access. """
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create()
self.restricted_course = RestrictedCourse.objects.create(course_key=self.course.id)
Country.objects.create(country='US')
Country.objects.create(country='IR')
Country.objects.create(country='CU')
# Clear the cache to prevent interference between tests
cache.clear()
@ddt.data(
# IP country, profile_country, blacklist, whitelist, allow_access
('US', None, [], [], True),
('IR', None, ['IR', 'CU'], [], False),
('US', 'IR', ['IR', 'CU'], [], False),
('IR', 'IR', ['IR', 'CU'], [], False),
('US', None, [], ['US'], True),
('IR', None, [], ['US'], False),
('US', 'IR', [], ['US'], False),
)
@ddt.unpack
def test_country_access_rules(self, ip_country, profile_country, blacklist, whitelist, allow_access):
# Configure the access rules
for whitelist_country in whitelist:
CountryAccessRule.objects.create(
rule_type=CountryAccessRule.WHITELIST_RULE,
restricted_course=self.restricted_course,
country=Country.objects.get(country=whitelist_country)
)
for blacklist_country in blacklist:
CountryAccessRule.objects.create(
rule_type=CountryAccessRule.BLACKLIST_RULE,
restricted_course=self.restricted_course,
country=Country.objects.get(country=blacklist_country)
)
# Configure the user's profile country
if profile_country is not None:
self.user.profile.country = profile_country
self.user.profile.save()
# Appear to make a request from an IP in a particular country
with self._mock_geoip(ip_country):
# Call the API. Note that the IP address we pass in doesn't
# matter, since we're injecting a mock for geo-location
result = embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
# Verify that the access rules were applied correctly
assert result == allow_access
def test_no_user_has_access(self):
CountryAccessRule.objects.create(
rule_type=CountryAccessRule.BLACKLIST_RULE,
restricted_course=self.restricted_course,
country=Country.objects.get(country='US')
)
# The user is set to None, because the user has not been authenticated.
with self._mock_geoip(""):
result = embargo_api.check_course_access(self.course.id, ip_address='0.0.0.0')
assert result
def test_no_user_blocked(self):
CountryAccessRule.objects.create(
rule_type=CountryAccessRule.BLACKLIST_RULE,
restricted_course=self.restricted_course,
country=Country.objects.get(country='US')
)
with self._mock_geoip('US'):
# The user is set to None, because the user has not been authenticated.
result = embargo_api.check_course_access(self.course.id, ip_address='0.0.0.0')
assert not result
def test_course_not_restricted(self):
# No restricted course model for this course key,
# so all access checks should be skipped.
unrestricted_course = CourseFactory.create()
with self.assertNumQueries(1):
embargo_api.check_course_access(unrestricted_course.id, user=self.user, ip_address='0.0.0.0')
# The second check should require no database queries
with self.assertNumQueries(0):
embargo_api.check_course_access(unrestricted_course.id, user=self.user, ip_address='0.0.0.0')
def test_ip_v6(self):
# Test the scenario that will go through every check
# (restricted course, but pass all the checks)
with self._mock_geoip('US'):
result = embargo_api.check_course_access(self.course.id, user=self.user,
ip_address='FE80::0202:B3FF:FE1E:8329')
assert result
def test_country_access_fallback_to_continent_code(self):
# Simulate Geolite2 falling back to a continent code
# instead of a country code. In this case, we should
# allow the user access.
with self._mock_geoip('EU'):
result = embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
assert result
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_profile_country_db_null(self):
# Django country fields treat NULL values inconsistently.
# When saving a profile with country set to None, Django saves an empty string to the database.
# However, when the country field loads a NULL value from the database, it sets
# `country.code` to `None`. This caused a bug in which country values created by
# the original South schema migration -- which defaulted to NULL -- caused a runtime
# exception when the embargo middleware treated the value as a string.
# In order to simulate this behavior, we can't simply set `profile.country = None`.
# (because when we save it, it will set the database field to an empty string instead of NULL)
query = "UPDATE auth_userprofile SET country = NULL WHERE id = %s"
connection.cursor().execute(query, [str(self.user.profile.id)])
# Verify that we can check the user's access without error
with self._mock_geoip('US'):
result = embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
assert result
def test_caching(self):
with self._mock_geoip('US'):
# Test the scenario that will go through every check
# (restricted course, but pass all the checks)
# This is the worst case, so it will hit all of the
# caching code.
with self.assertNumQueries(3):
embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
with self.assertNumQueries(0):
embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
def test_caching_no_restricted_courses(self):
RestrictedCourse.objects.all().delete()
cache.clear()
with self.assertNumQueries(1):
embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
with self.assertNumQueries(0):
embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
@ddt.data(
GlobalStaff,
CourseStaffRole,
CourseInstructorRole,
OrgStaffRole,
OrgInstructorRole,
)
def test_staff_access_country_block(self, staff_role_cls):
# Add a country to the blacklist
CountryAccessRule.objects.create(
rule_type=CountryAccessRule.BLACKLIST_RULE,
restricted_course=self.restricted_course,
country=Country.objects.get(country='US')
)
# Appear to make a request from an IP in the blocked country
with self._mock_geoip('US'):
result = embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
# Expect that the user is blocked, because the user isn't staff
assert not result, "User should not have access because the user isn't staff."
# Instantiate the role, configuring it for this course or org
if issubclass(staff_role_cls, CourseRole):
staff_role = staff_role_cls(self.course.id)
elif issubclass(staff_role_cls, OrgRole):
staff_role = staff_role_cls(self.course.id.org)
else:
staff_role = staff_role_cls()
# Add the user to the role
staff_role.add_users(self.user)
# Now the user should have access
with self._mock_geoip('US'):
result = embargo_api.check_course_access(self.course.id, user=self.user, ip_address='0.0.0.0')
assert result, 'User should have access because the user is staff.'
@contextmanager
def _mock_geoip(self, country_code):
"""
Mock for the GeoIP module.
"""
# pylint: disable=unused-argument
def mock_country(reader, country):
"""
:param reader:
:param country:
:return:
"""
magic_mock = MagicMock()
magic_mock.country = MagicMock()
type(magic_mock.country).iso_code = country_code
return magic_mock
patcher = patch.object(maxminddb, 'open_database')
patcher.start()
country_patcher = patch.object(geoip2.database.Reader, 'country', new=mock_country)
country_patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(country_patcher.stop)
yield
@ddt.ddt
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@skip_unless_lms
class EmbargoMessageUrlApiTests(UrlResetMixin, ModuleStoreTestCase):
"""Test the embargo API calls for retrieving the blocking message URLs. """
URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
@ddt.data(
('enrollment', '/embargo/blocked-message/enrollment/embargo/'),
('courseware', '/embargo/blocked-message/courseware/embargo/')
)
@ddt.unpack
def test_message_url_path(self, access_point, expected_url_path):
self._restrict_course(self.course.id)
# Retrieve the URL to the blocked message page
url_path = embargo_api.message_url_path(self.course.id, access_point)
assert url_path == expected_url_path
def test_message_url_path_caching(self):
self._restrict_course(self.course.id)
# The first time we retrieve the message, we'll need
# to hit the database.
with self.assertNumQueries(2):
embargo_api.message_url_path(self.course.id, "enrollment")
# The second time, we should be using cached values
with self.assertNumQueries(0):
embargo_api.message_url_path(self.course.id, "enrollment")
@ddt.data('enrollment', 'courseware')
def test_message_url_path_no_restrictions_for_course(self, access_point):
# No restrictions for the course
url_path = embargo_api.message_url_path(self.course.id, access_point)
# Use a default path
assert url_path == '/embargo/blocked-message/courseware/default/'
def test_invalid_access_point(self):
with pytest.raises(InvalidAccessPoint):
embargo_api.message_url_path(self.course.id, "invalid")
def test_message_url_stale_cache(self):
# Retrieve the URL once, populating the cache with the list
# of restricted courses.
self._restrict_course(self.course.id)
embargo_api.message_url_path(self.course.id, 'courseware')
# Delete the restricted course entry
RestrictedCourse.objects.get(course_key=self.course.id).delete()
# Clear the message URL cache
message_cache_key = (
'embargo.message_url_path.courseware.{course_key}'
).format(course_key=self.course.id)
cache.delete(message_cache_key)
# Try again. Even though the cache results are stale,
# we should still get a valid URL.
url_path = embargo_api.message_url_path(self.course.id, 'courseware')
assert url_path == '/embargo/blocked-message/courseware/default/'
def _restrict_course(self, course_key):
"""Restrict the user from accessing the course. """
country = Country.objects.create(country='us')
restricted_course = RestrictedCourse.objects.create(
course_key=course_key,
enroll_msg_key='embargo',
access_msg_key='embargo'
)
CountryAccessRule.objects.create(
restricted_course=restricted_course,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
|
[
"unittest.mock.patch.object",
"ddt.data",
"unittest.mock.MagicMock",
"common.djangoapps.student.tests.factories.UserFactory.create",
"django.core.cache.cache.clear",
"xmodule.modulestore.tests.django_utils.mixed_store_config",
"unittest.mock.patch.dict",
"django.db.connection.cursor",
"pytest.raises",
"django.core.cache.cache.delete",
"django.test.utils.override_settings",
"xmodule.modulestore.tests.factories.CourseFactory.create"
] |
[((1054, 1108), 'xmodule.modulestore.tests.django_utils.mixed_store_config', 'mixed_store_config', (['settings.COMMON_TEST_DATA_ROOT', '{}'], {}), '(settings.COMMON_TEST_DATA_ROOT, {})\n', (1072, 1108), False, 'from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config\n'), ((1121, 1170), 'django.test.utils.override_settings', 'override_settings', ([], {'MODULESTORE': 'MODULESTORE_CONFIG'}), '(MODULESTORE=MODULESTORE_CONFIG)\n', (1138, 1170), False, 'from django.test.utils import override_settings\n'), ((1189, 1242), 'unittest.mock.patch.dict', 'mock.patch.dict', (['settings.FEATURES', "{'EMBARGO': True}"], {}), "(settings.FEATURES, {'EMBARGO': True})\n", (1204, 1242), False, 'from unittest import mock\n'), ((10492, 10541), 'django.test.utils.override_settings', 'override_settings', ([], {'MODULESTORE': 'MODULESTORE_CONFIG'}), '(MODULESTORE=MODULESTORE_CONFIG)\n', (10509, 10541), False, 'from django.test.utils import override_settings\n'), ((1904, 2168), 'ddt.data', 'ddt.data', (["('US', None, [], [], True)", "('IR', None, ['IR', 'CU'], [], False)", "('US', 'IR', ['IR', 'CU'], [], False)", "('IR', 'IR', ['IR', 'CU'], [], False)", "('US', None, [], ['US'], True)", "('IR', None, [], ['US'], False)", "('US', 'IR', [], ['US'], False)"], {}), "(('US', None, [], [], True), ('IR', None, ['IR', 'CU'], [], False),\n ('US', 'IR', ['IR', 'CU'], [], False), ('IR', 'IR', ['IR', 'CU'], [], \n False), ('US', None, [], ['US'], True), ('IR', None, [], ['US'], False),\n ('US', 'IR', [], ['US'], False))\n", (1912, 2168), False, 'import ddt\n'), ((5996, 6049), 'unittest.mock.patch.dict', 'mock.patch.dict', (['settings.FEATURES', "{'EMBARGO': True}"], {}), "(settings.FEATURES, {'EMBARGO': True})\n", (6011, 6049), False, 'from unittest import mock\n'), ((8166, 8263), 'ddt.data', 'ddt.data', (['GlobalStaff', 'CourseStaffRole', 'CourseInstructorRole', 'OrgStaffRole', 'OrgInstructorRole'], {}), '(GlobalStaff, CourseStaffRole, CourseInstructorRole, OrgStaffRole,\n OrgInstructorRole)\n', (8174, 8263), False, 'import ddt\n'), ((10849, 10897), 'unittest.mock.patch.dict', 'patch.dict', (['settings.FEATURES', "{'EMBARGO': True}"], {}), "(settings.FEATURES, {'EMBARGO': True})\n", (10859, 10897), False, 'from unittest.mock import patch, MagicMock\n'), ((10994, 11135), 'ddt.data', 'ddt.data', (["('enrollment', '/embargo/blocked-message/enrollment/embargo/')", "('courseware', '/embargo/blocked-message/courseware/embargo/')"], {}), "(('enrollment', '/embargo/blocked-message/enrollment/embargo/'), (\n 'courseware', '/embargo/blocked-message/courseware/embargo/'))\n", (11002, 11135), False, 'import ddt\n'), ((11936, 11972), 'ddt.data', 'ddt.data', (['"""enrollment"""', '"""courseware"""'], {}), "('enrollment', 'courseware')\n", (11944, 11972), False, 'import ddt\n'), ((1520, 1542), 'xmodule.modulestore.tests.factories.CourseFactory.create', 'CourseFactory.create', ([], {}), '()\n', (1540, 1542), False, 'from xmodule.modulestore.tests.factories import CourseFactory\n'), ((1563, 1583), 'common.djangoapps.student.tests.factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (1581, 1583), False, 'from common.djangoapps.student.tests.factories import UserFactory\n'), ((1884, 1897), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (1895, 1897), False, 'from django.core.cache import cache\n'), ((4845, 4867), 'xmodule.modulestore.tests.factories.CourseFactory.create', 'CourseFactory.create', ([], {}), '()\n', (4865, 4867), False, 'from xmodule.modulestore.tests.factories import CourseFactory\n'), ((7870, 7883), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (7881, 7883), False, 'from django.core.cache import cache\n'), ((10193, 10233), 'unittest.mock.patch.object', 'patch.object', (['maxminddb', '"""open_database"""'], {}), "(maxminddb, 'open_database')\n", (10205, 10233), False, 'from unittest.mock import patch, MagicMock\n'), ((10284, 10349), 'unittest.mock.patch.object', 'patch.object', (['geoip2.database.Reader', '"""country"""'], {'new': 'mock_country'}), "(geoip2.database.Reader, 'country', new=mock_country)\n", (10296, 10349), False, 'from unittest.mock import patch, MagicMock\n'), ((10965, 10987), 'xmodule.modulestore.tests.factories.CourseFactory.create', 'CourseFactory.create', ([], {}), '()\n', (10985, 10987), False, 'from xmodule.modulestore.tests.factories import CourseFactory\n'), ((12994, 13025), 'django.core.cache.cache.delete', 'cache.delete', (['message_cache_key'], {}), '(message_cache_key)\n', (13006, 13025), False, 'from django.core.cache import cache\n'), ((10025, 10036), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10034, 10036), False, 'from unittest.mock import patch, MagicMock\n'), ((10070, 10081), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10079, 10081), False, 'from unittest.mock import patch, MagicMock\n'), ((12329, 12362), 'pytest.raises', 'pytest.raises', (['InvalidAccessPoint'], {}), '(InvalidAccessPoint)\n', (12342, 12362), False, 'import pytest\n'), ((6892, 6911), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (6909, 6911), False, 'from django.db import connection\n')]
|
from prody import *
from numpy import *
from random import random
import os.path
import sys
import time
time.sleep(10)
ar = []
for arg in sys.argv:
ar.append(arg)
initial_pdbn=ar[1]
final_pdbn=ar[2]
initial_pdb_id = initial_pdbn[:initial_pdbn.rfind('.')]
final_pdb_id = final_pdbn[:final_pdbn.rfind('.')]
original_initial_pdb = ar[3]
original_final_pdb = ar[4]
comd_cycle_number = ar[5]
if len(ar) > 6 and ar[6].strip() is not '0':
devi = float(ar[6])
else:
devi = 0.5
if len(ar) > 7 and ar[7].strip() is not '0':
stepcutoff=float(ar[7])
else:
stepcutoff=2.
if len(ar) > 8 and ar[8].strip() is not '0':
acceptance_ratio = float(ar[8])
else:
acceptance_ratio = 0.9
accept_para = 0.1
if len(ar) > 9 and ar[9].strip() is not '0':
anm_cut=float(ar[9])
else:
anm_cut=15
if len(ar) > 10 and ar[10].strip() is not '0':
N=int(ar[10])
else:
N=10000
if len(ar) > 11 and ar[11].strip() is not '0':
final_structure_dcd_name = ar[11]
else:
final_structure_dcd_name = 'cycle_{0}_'.format(int(comd_cycle_number)) + \
initial_pdb_id + '_' + final_pdb_id + '_final_structure.dcd'
if len(ar) > 12 and ar[12].strip() is not '0':
usePseudoatoms = int(ar[12])
else:
usePseudoatoms = 0
initial_pdb = parsePDB(initial_pdbn)
final_pdb = parsePDB(final_pdbn)
if usePseudoatoms:
initial_pdb_ca = initial_pdb
final_pdb_ca = final_pdb
else:
initial_pdb_ca = initial_pdb.select('name CA or name BB')
final_pdb_ca = final_pdb.select('name CA or name BB')
# ANM calculation based on current
pdb_anm = ANM('pdb ca')
pdb_anm.buildHessian(initial_pdb_ca, cutoff=anm_cut)
pdb_anm.calcModes()
# Cumulative sum vector preparation for metropolis sampling
eigs = 1/sqrt(pdb_anm.getEigvals())
eigs_n = zeros(eigs.shape)
eigs_n = eigs / sum(eigs)
eigscumsum = eigs_n.cumsum()
U = pdb_anm.getEigvecs()
# Take a step along mode 1 (ID 0) to calculate the scale factor
pdb_ca = initial_pdb_ca
pdb_ca_temp = pdb_ca.copy()
ID = 0
direction = 1.
coords_temp = pdb_ca_temp.getCoords()
coords_temp[0:,0] = coords_temp[0:,0] + direction * U[range(0,len(U),3),ID] * eigs[ID]
coords_temp[0:,1] = coords_temp[0:,1] + direction * U[range(1,len(U),3),ID] * eigs[ID]
coords_temp[0:,2] = coords_temp[0:,2] + direction * U[range(2,len(U),3),ID] * eigs[ID]
pdb_ca_temp.setCoords(coords_temp)
pdb_ca = pdb_ca_temp.copy()
biggest_rmsd = calcRMSD(pdb_ca.getCoords(), initial_pdb_ca.getCoords())
scale_factor = devi/biggest_rmsd # This means that devi is the maximum deviation in RMSD for any step
# counts for metropolis sampling
count1 = 0 # Up-hill moves
count2 = 0 # Accepted up-hill moves
count3 = 0 # Down-hill moves
# read MC parameter from file
if os.path.isfile(initial_pdb_id + '_ratio.dat') and os.stat(initial_pdb_id + '_ratio.dat').st_size != 0:
MCpara = loadtxt(initial_pdb_id + '_ratio.dat')
accept_para = MCpara[4]
if MCpara[1] > acceptance_ratio + 0.05:
accept_para *= 1.5
elif MCpara[1] < acceptance_ratio - 0.05:
accept_para /= 1.5
else:
savetxt(initial_pdb_id + '_status.dat',[1])
#else:
# accept_para = 0.1
# MC parameter 1 is the acceptance ratio, which should converge on
# the selected value with a tolerance of 0.05 either side
# and accept_para is adjusted to help bring it within these limits.
# This also happens every 5 steps during the run (lines 173 to 181).
if original_initial_pdb != original_final_pdb:
# difference from the target structure is defined as the energy and the minimum is zero.
native_dist = buildDistMatrix(final_pdb_ca)
dist = buildDistMatrix(initial_pdb_ca)
Ep = sum((native_dist - dist)**2)
# Reset pdb_ca (the current structure whole the steps back to the original)
pdb_ca = initial_pdb_ca
step_count = 0
check_step_counts = [0]
sys.stdout.write(' '*2 + 'rmsd' + ' '*2 + 'rand' + ' '*2 + 'ID' + ' '*3 + 'step' \
+ ' '*2 + 'accept_para' + ' '*5 + 'f' + '\n')
# MC Loop
for k in range(N):
pdb_ca_temp = pdb_ca.copy()
rand = random()
ID = argmax(rand<eigscumsum)
direction = 2*(random()>0.5)-1
coords_temp = pdb_ca_temp.getCoords()
coords_temp[0:,0] = coords_temp[0:,0] + direction * U[range(0,len(U),3),ID] * eigs[ID] * scale_factor
coords_temp[0:,1] = coords_temp[0:,1] + direction * U[range(1,len(U),3),ID] * eigs[ID] * scale_factor
coords_temp[0:,2] = coords_temp[0:,2] + direction * U[range(2,len(U),3),ID] * eigs[ID] * scale_factor
pdb_ca_temp.setCoords(coords_temp)
if original_initial_pdb != original_final_pdb:
dist = buildDistMatrix(pdb_ca_temp)
En = sum((native_dist - dist)**2)
# Check whether you are heading the right way and accept uphill moves
# depending on the Metropolis criterion. Classically this depends on RT
# but this is subsumed by the unknown units from having a uniform
# spring constant that is set to 1.
if Ep > En:
count3 += 1
pdb_ca = pdb_ca_temp.copy()
Ep = En
accepted = 1
elif exp(-(En-Ep) * accept_para) > random():
pdb_ca = pdb_ca_temp.copy()
count1 += 1
count2 += 1
Ep = En
accepted = 1
else:
count1 += 1
accepted = 0
if count1 == 0:
f = 1.
else:
f = float(count2)/float(count1)
if (mod(k,5)==0 and not(k==0)):
# Update of the accept_para to keep the MC para reasonable
# See comment lines 82 to 85.
if f > acceptance_ratio + 0.05:
accept_para /= 1.5;
elif f < acceptance_ratio - 0.05:
accept_para *= 1.5
if accept_para < 0.001: accept_para = 0.001
else:
# for exploration based on one structure
# all moves are uphill but will be accepted anyway
pdb_ca = pdb_ca_temp.copy()
count3 += 1
accepted = 1
f = 1.
rmsd = calcRMSD(pdb_ca.getCoords(), initial_pdb_ca.getCoords())
sys.stdout.write('{:6.2f}'.format(rmsd) + ' ' + '{:5.2f}'.format(rand) + \
'{:4d}'.format(ID) + '{:7d}'.format(k) + ' '*2 + str(accepted) + ' '*2 + \
'{:5.4f}'.format(accept_para) + ' '*2 + '{:5.4f}'.format(f) + '\n')
if rmsd > stepcutoff:
break
# Build an ensemble for writing the final structure to a dcd file
ensemble_final = Ensemble()
ensemble_final.setAtoms(initial_pdb_ca)
ensemble_final.setCoords(initial_pdb_ca)
ensemble_final.addCoordset(pdb_ca.getCoords())
writeDCD(final_structure_dcd_name, ensemble_final)
ratios = [count2/N, count2/count1 if count1 != 0 else 0, count2, k, accept_para ]
savetxt(initial_pdb_id + '_ratio.dat', ratios, fmt='%.2e')
|
[
"sys.stdout.write",
"random.random",
"time.sleep"
] |
[((105, 119), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (115, 119), False, 'import time\n'), ((3813, 3955), 'sys.stdout.write', 'sys.stdout.write', (["(' ' * 2 + 'rmsd' + ' ' * 2 + 'rand' + ' ' * 2 + 'ID' + ' ' * 3 + 'step' + \n ' ' * 2 + 'accept_para' + ' ' * 5 + 'f' + '\\n')"], {}), "(' ' * 2 + 'rmsd' + ' ' * 2 + 'rand' + ' ' * 2 + 'ID' + ' ' *\n 3 + 'step' + ' ' * 2 + 'accept_para' + ' ' * 5 + 'f' + '\\n')\n", (3829, 3955), False, 'import sys\n'), ((4034, 4042), 'random.random', 'random', ([], {}), '()\n', (4040, 4042), False, 'from random import random\n'), ((4095, 4103), 'random.random', 'random', ([], {}), '()\n', (4101, 4103), False, 'from random import random\n'), ((5105, 5113), 'random.random', 'random', ([], {}), '()\n', (5111, 5113), False, 'from random import random\n')]
|
"""
__author__: HashTagML
license: MIT
Created: Monday, 29th March 2021
"""
import os
import warnings
from pathlib import Path
from typing import Union
import imagesize
import yaml
import numpy as np
import pandas as pd
from .base import FormatSpec
from .utils import exists, get_image_dir, get_annotation_dir
class Yolo(FormatSpec):
"""Represents a YOLO annotation object.
Args:
root (Union[str, os.PathLike]): path to root directory. Expects the ``root`` directory to have either
of the following layouts:
.. code-block:: bash
root
├── images
│ ├── train
│ │ ├── 1.jpg
│ │ ├── 2.jpg
│ │ │ ...
│ │ └── n.jpg
│ ├── valid (...)
│ └── test (...)
│
└── annotations
├── train
│ ├── 1.txt
│ ├── 2.txt
│ │ ...
│ └── n.txt
├── valid (...)
├── test (...)
└── dataset.yaml [Optional]
or,
.. code-block:: bash
root
├── images
│ ├── 1.jpg
│ ├── 2.jpg
│ │ ...
│ └── n.jpg
│
└── annotations
├── 1.txt
├── 2.txt
│ ...
├── n.txt
└── dataset.yaml [Optional]
"""
def __init__(self, root: Union[str, os.PathLike]):
# self.root = root
super().__init__(root)
self.class_file = [y for y in Path(self.root).glob("*.yaml")]
self._image_dir = get_image_dir(root)
self._annotation_dir = get_annotation_dir(root)
self._has_image_split = False
assert exists(self._image_dir), "root is missing 'images' directory."
assert exists(self._annotation_dir), "root is missing 'annotations' directory."
self._find_splits()
self._resolve_dataframe()
def _resolve_dataframe(self):
master_df = pd.DataFrame(
columns=[
"split",
"image_id",
"image_width",
"image_height",
"x_min",
"y_min",
"width",
"height",
"category",
"image_path",
],
)
for split in self._splits:
image_ids = []
image_paths = []
class_ids = []
x_mins = []
y_mins = []
bbox_widths = []
bbox_heights = []
image_heights = []
image_widths = []
split = split if self._has_image_split else ""
annotations = Path(self._annotation_dir).joinpath(split).glob("*.txt")
for txt in annotations:
stem = txt.stem
try:
img_file = list(Path(self._image_dir).joinpath(split).glob(f"{stem}*"))[0]
im_width, im_height = imagesize.get(img_file)
with open(txt, "r") as f:
instances = f.read().strip().split("\n")
for ins in instances:
class_id, x, y, w, h = list(map(float, ins.split()))
image_ids.append(img_file.name)
image_paths.append(img_file)
class_ids.append(int(class_id))
x_mins.append(max(float((float(x) - w / 2) * im_width), 0))
y_mins.append(max(float((y - h / 2) * im_height), 0))
bbox_widths.append(float(w * im_width))
bbox_heights.append(float(h * im_height))
image_widths.append(im_width)
image_heights.append(im_height)
except IndexError: # if the image file does not exist
pass
annots_df = pd.DataFrame(
list(
zip(
image_ids,
image_paths,
image_widths,
image_heights,
class_ids,
x_mins,
y_mins,
bbox_widths,
bbox_heights,
)
),
columns=[
"image_id",
"image_path",
"image_width",
"image_height",
"class_id",
"x_min",
"y_min",
"width",
"height",
],
)
annots_df["split"] = split if split else "main"
master_df = pd.concat([master_df, annots_df], ignore_index=True)
# get category names from `dataset.yaml`
try:
with open(Path(self._annotation_dir).joinpath("dataset.yaml")) as f:
label_desc = yaml.load(f, Loader=yaml.FullLoader)
categories = label_desc["names"]
label_map = dict(zip(range(len(categories)), categories))
except FileNotFoundError:
label_map = dict()
warnings.warn(f"No `dataset.yaml` file found in {self._annotation_dir}")
master_df["class_id"] = master_df["class_id"].astype(np.int32)
if label_map:
master_df["category"] = master_df["class_id"].map(label_map)
else:
master_df["category"] = master_df["class_id"].astype(str)
self.master_df = master_df
|
[
"pandas.DataFrame",
"yaml.load",
"imagesize.get",
"pathlib.Path",
"warnings.warn",
"pandas.concat"
] |
[((2240, 2381), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['split', 'image_id', 'image_width', 'image_height', 'x_min', 'y_min',\n 'width', 'height', 'category', 'image_path']"}), "(columns=['split', 'image_id', 'image_width', 'image_height',\n 'x_min', 'y_min', 'width', 'height', 'category', 'image_path'])\n", (2252, 2381), True, 'import pandas as pd\n'), ((5074, 5126), 'pandas.concat', 'pd.concat', (['[master_df, annots_df]'], {'ignore_index': '(True)'}), '([master_df, annots_df], ignore_index=True)\n', (5083, 5126), True, 'import pandas as pd\n'), ((5300, 5336), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (5309, 5336), False, 'import yaml\n'), ((5530, 5602), 'warnings.warn', 'warnings.warn', (['f"""No `dataset.yaml` file found in {self._annotation_dir}"""'], {}), "(f'No `dataset.yaml` file found in {self._annotation_dir}')\n", (5543, 5602), False, 'import warnings\n'), ((3233, 3256), 'imagesize.get', 'imagesize.get', (['img_file'], {}), '(img_file)\n', (3246, 3256), False, 'import imagesize\n'), ((1784, 1799), 'pathlib.Path', 'Path', (['self.root'], {}), '(self.root)\n', (1788, 1799), False, 'from pathlib import Path\n'), ((2949, 2975), 'pathlib.Path', 'Path', (['self._annotation_dir'], {}), '(self._annotation_dir)\n', (2953, 2975), False, 'from pathlib import Path\n'), ((5212, 5238), 'pathlib.Path', 'Path', (['self._annotation_dir'], {}), '(self._annotation_dir)\n', (5216, 5238), False, 'from pathlib import Path\n'), ((3132, 3153), 'pathlib.Path', 'Path', (['self._image_dir'], {}), '(self._image_dir)\n', (3136, 3153), False, 'from pathlib import Path\n')]
|
"""
Authors: <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
import numpy as np
import pytest
import sys
import os
# Athos DIR
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import Config, Compiler, assert_almost_equal
@pytest.mark.parametrize(
"a_shape, out_shape",
[
([2, 3], [6]),
([6], [2, 3]),
([2, 3], [3, 2]),
([2, 3], [-1]), # Flatten 1-D,
([1], []), # convert to scalar,
([3, 2, 3], [2, -1]), # infer -1 as 9,
([3, 2, 3], [-1, 9]), # infer -1 as 2
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_reshape(test_dir, backend, a_shape, out_shape, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.reshape(a, out_shape, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, perm",
[([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])], # normal transpose, with perm
)
@pytest.mark.parametrize("dtype", [np.single])
def test_transpose(test_dir, backend, a_shape, perm, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.transpose(a, perm, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, num_or_size_splits, axis",
[
([2, 10], 5, 1),
pytest.param(
[5, 7],
[1, 4, 2],
1,
marks=pytest.mark.skip(
reason="[split] don't support split into specific sizes (SplitV)"
),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.split(a, num_or_size_splits, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
if type(output) == list:
tf_output = output[-1]
tf_expected_output = expected_output[-1]
else:
tf_output = output
tf_expected_output = expected_output
config = TFConfig(backend).add_input(a).add_output(tf_output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=tf_expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Squeeze
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[1, 2, 1, 3, 1, 1],
None,
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
pytest.param(
[1, 2, 1, 3, 1, 1],
[2, 4],
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_squeeze(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.squeeze(a, axis=axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, begin, size",
[
([3, 2, 3], [1, 0, 0], [1, 1, 3]),
([3, 2, 3], [1, 0, 0], [1, 2, 3]),
([3, 2, 3], [1, 0, 0], [2, 1, 3]),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_slice(test_dir, backend, a_shape, begin, size, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.slice(a, begin, size, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, b_shape, axis",
[
([2, 3], [3, 3], 0),
([2, 3, 2, 1], [2, 6, 2, 1], 1),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_concat(test_dir, backend, a_shape, b_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
b_inp = dtype(np.random.randn(*b_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
b = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=b_inp.shape, name="b")
output = tf.concat([a, b], axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp, b: b_inp})
config = TFConfig(backend).add_input(a).add_input(b).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp, b_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# ExpandDims
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[3, 2, 3], 1, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
pytest.param(
[2, 5], 0, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_expand_dims(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.expand_dims(a, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Pad
@pytest.mark.parametrize(
"a_shape, paddings, mode, constant_values",
[
([1, 2, 2, 1], [[1, 1], [1, 2], [1, 1], [1, 3]], "CONSTANT", 0),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"REFLECT",
0,
marks=pytest.mark.skip(reason="[pad] REFLECT not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"SYMMETRIC",
0,
marks=pytest.mark.skip(reason="[pad] SYMMETRIC not supported"),
),
pytest.param(
[2, 3],
[
[1, 1],
[2, 2],
],
"CONSTANT",
0,
marks=pytest.mark.skip(reason="[pad] Generic pad not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"CONSTANT",
1.2,
marks=pytest.mark.skip(reason="[pad] non-zero padding not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
pad = tf.constant(paddings, name="paddings")
output = tf.pad(
a, pad, mode=mode, constant_values=constant_values, name="output"
)
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Tile
@pytest.mark.parametrize(
"a_shape, multiples", [([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]
)
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.skip(reason="[tile] Not supported")
def test_tile(test_dir, backend, a_shape, multiples, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
mults = tf.constant(multiples, name="multiples")
output = tf.tile(a, mults, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
|
[
"tensorflow.reshape",
"pytest.mark.parametrize",
"tests.utils.Compiler",
"tensorflow.split",
"pytest.mark.skip",
"numpy.random.randn",
"os.path.dirname",
"tensorflow.pad",
"tensorflow.concat",
"tests.utils.assert_almost_equal",
"tensorflow.compat.v1.Session",
"tensorflow.squeeze",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"tensorflow.Graph",
"tensorflow.expand_dims",
"tensorflow.as_dtype",
"tensorflow.slice"
] |
[((1331, 1505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, out_shape"""', '[([2, 3], [6]), ([6], [2, 3]), ([2, 3], [3, 2]), ([2, 3], [-1]), ([1], []),\n ([3, 2, 3], [2, -1]), ([3, 2, 3], [-1, 9])]'], {}), "('a_shape, out_shape', [([2, 3], [6]), ([6], [2, 3]),\n ([2, 3], [3, 2]), ([2, 3], [-1]), ([1], []), ([3, 2, 3], [2, -1]), ([3,\n 2, 3], [-1, 9])])\n", (1354, 1505), False, 'import pytest\n'), ((1646, 1691), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (1669, 1691), False, 'import pytest\n'), ((2433, 2521), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, perm"""', '[([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])]'], {}), "('a_shape, perm', [([2, 3], [1, 0]), ([2, 4, 3], [0,\n 2, 1])])\n", (2456, 2521), False, 'import pytest\n'), ((2561, 2606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (2584, 2606), False, 'import pytest\n'), ((3635, 3680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (3658, 3680), False, 'import pytest\n'), ((5025, 5070), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (5048, 5070), False, 'import pytest\n'), ((5769, 5931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, begin, size"""', '[([3, 2, 3], [1, 0, 0], [1, 1, 3]), ([3, 2, 3], [1, 0, 0], [1, 2, 3]), ([3,\n 2, 3], [1, 0, 0], [2, 1, 3])]'], {}), "('a_shape, begin, size', [([3, 2, 3], [1, 0, 0], [1,\n 1, 3]), ([3, 2, 3], [1, 0, 0], [1, 2, 3]), ([3, 2, 3], [1, 0, 0], [2, 1,\n 3])])\n", (5792, 5931), False, 'import pytest\n'), ((5967, 6012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (5990, 6012), False, 'import pytest\n'), ((6716, 6825), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, b_shape, axis"""', '[([2, 3], [3, 3], 0), ([2, 3, 2, 1], [2, 6, 2, 1], 1)]'], {}), "('a_shape, b_shape, axis', [([2, 3], [3, 3], 0), ([2,\n 3, 2, 1], [2, 6, 2, 1], 1)])\n", (6739, 6825), False, 'import pytest\n'), ((6857, 6902), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (6880, 6902), False, 'import pytest\n'), ((8081, 8126), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (8104, 8126), False, 'import pytest\n'), ((9910, 9955), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (9933, 9955), False, 'import pytest\n'), ((10793, 10899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, multiples"""', '[([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]'], {}), "('a_shape, multiples', [([2, 3], [1, 2]), ([2, 3], [\n 2, 1]), ([2, 3], [2, 2])])\n", (10816, 10899), False, 'import pytest\n'), ((10902, 10947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (10925, 10947), False, 'import pytest\n'), ((10949, 10996), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[tile] Not supported"""'}), "(reason='[tile] Not supported')\n", (10965, 10996), False, 'import pytest\n'), ((1768, 1778), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1776, 1778), True, 'import tensorflow as tf\n'), ((2230, 2263), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (2238, 2263), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((2319, 2408), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (2338, 2408), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((2680, 2690), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2688, 2690), True, 'import tensorflow as tf\n'), ((3101, 3134), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (3109, 3134), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((3190, 3279), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (3209, 3279), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((3770, 3780), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3778, 3780), True, 'import tensorflow as tf\n'), ((4401, 4434), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (4409, 4434), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((4490, 4582), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'tf_expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=tf_expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (4509, 4582), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((5142, 5152), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5150, 5152), True, 'import tensorflow as tf\n'), ((5566, 5599), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (5574, 5599), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((5655, 5744), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (5674, 5744), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6089, 6099), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6097, 6099), True, 'import tensorflow as tf\n'), ((6513, 6546), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (6521, 6546), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6602, 6691), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (6621, 6691), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6982, 6992), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6990, 6992), True, 'import tensorflow as tf\n'), ((7559, 7592), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (7567, 7592), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((7655, 7744), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (7674, 7744), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((8202, 8212), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8210, 8212), True, 'import tensorflow as tf\n'), ((8625, 8658), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (8633, 8658), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((8714, 8803), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (8733, 8803), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((10050, 10060), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10058, 10060), True, 'import tensorflow as tf\n'), ((10583, 10616), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (10591, 10616), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((10672, 10761), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (10691, 10761), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((11070, 11080), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11078, 11080), True, 'import tensorflow as tf\n'), ((11544, 11577), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (11552, 11577), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((11633, 11722), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (11652, 11722), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((1220, 1245), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1235, 1245), False, 'import os\n'), ((1797, 1822), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (1812, 1822), True, 'import numpy as np\n'), ((1956, 1995), 'tensorflow.reshape', 'tf.reshape', (['a', 'out_shape'], {'name': '"""output"""'}), "(a, out_shape, name='output')\n", (1966, 1995), True, 'import tensorflow as tf\n'), ((2005, 2038), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2025, 2038), True, 'import tensorflow as tf\n'), ((2709, 2734), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (2724, 2734), True, 'import numpy as np\n'), ((2868, 2904), 'tensorflow.transpose', 'tf.transpose', (['a', 'perm'], {'name': '"""output"""'}), "(a, perm, name='output')\n", (2880, 2904), True, 'import tensorflow as tf\n'), ((2914, 2947), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2934, 2947), True, 'import tensorflow as tf\n'), ((3799, 3824), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (3814, 3824), True, 'import numpy as np\n'), ((3958, 4010), 'tensorflow.split', 'tf.split', (['a', 'num_or_size_splits', 'axis'], {'name': '"""output"""'}), "(a, num_or_size_splits, axis, name='output')\n", (3966, 4010), True, 'import tensorflow as tf\n'), ((4020, 4053), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (4040, 4053), True, 'import tensorflow as tf\n'), ((5171, 5196), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (5186, 5196), True, 'import numpy as np\n'), ((5330, 5369), 'tensorflow.squeeze', 'tf.squeeze', (['a'], {'axis': 'axis', 'name': '"""output"""'}), "(a, axis=axis, name='output')\n", (5340, 5369), True, 'import tensorflow as tf\n'), ((5379, 5412), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5399, 5412), True, 'import tensorflow as tf\n'), ((6118, 6143), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (6133, 6143), True, 'import numpy as np\n'), ((6277, 6316), 'tensorflow.slice', 'tf.slice', (['a', 'begin', 'size'], {'name': '"""output"""'}), "(a, begin, size, name='output')\n", (6285, 6316), True, 'import tensorflow as tf\n'), ((6326, 6359), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (6346, 6359), True, 'import tensorflow as tf\n'), ((7011, 7036), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (7026, 7036), True, 'import numpy as np\n'), ((7056, 7081), 'numpy.random.randn', 'np.random.randn', (['*b_shape'], {}), '(*b_shape)\n', (7071, 7081), True, 'import numpy as np\n'), ((7301, 7339), 'tensorflow.concat', 'tf.concat', (['[a, b]', 'axis'], {'name': '"""output"""'}), "([a, b], axis, name='output')\n", (7310, 7339), True, 'import tensorflow as tf\n'), ((7349, 7382), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (7369, 7382), True, 'import tensorflow as tf\n'), ((8231, 8256), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (8246, 8256), True, 'import numpy as np\n'), ((8390, 8428), 'tensorflow.expand_dims', 'tf.expand_dims', (['a', 'axis'], {'name': '"""output"""'}), "(a, axis, name='output')\n", (8404, 8428), True, 'import tensorflow as tf\n'), ((8438, 8471), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (8458, 8471), True, 'import tensorflow as tf\n'), ((10079, 10104), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (10094, 10104), True, 'import numpy as np\n'), ((10235, 10273), 'tensorflow.constant', 'tf.constant', (['paddings'], {'name': '"""paddings"""'}), "(paddings, name='paddings')\n", (10246, 10273), True, 'import tensorflow as tf\n'), ((10291, 10364), 'tensorflow.pad', 'tf.pad', (['a', 'pad'], {'mode': 'mode', 'constant_values': 'constant_values', 'name': '"""output"""'}), "(a, pad, mode=mode, constant_values=constant_values, name='output')\n", (10297, 10364), True, 'import tensorflow as tf\n'), ((10396, 10429), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10416, 10429), True, 'import tensorflow as tf\n'), ((11099, 11124), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (11114, 11124), True, 'import numpy as np\n'), ((11257, 11297), 'tensorflow.constant', 'tf.constant', (['multiples'], {'name': '"""multiples"""'}), "(multiples, name='multiples')\n", (11268, 11297), True, 'import tensorflow as tf\n'), ((11315, 11347), 'tensorflow.tile', 'tf.tile', (['a', 'mults'], {'name': '"""output"""'}), "(a, mults, name='output')\n", (11322, 11347), True, 'import tensorflow as tf\n'), ((11357, 11390), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (11377, 11390), True, 'import tensorflow as tf\n'), ((1890, 1908), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (1901, 1908), True, 'import tensorflow as tf\n'), ((2802, 2820), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (2813, 2820), True, 'import tensorflow as tf\n'), ((3892, 3910), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (3903, 3910), True, 'import tensorflow as tf\n'), ((5264, 5282), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (5275, 5282), True, 'import tensorflow as tf\n'), ((6211, 6229), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (6222, 6229), True, 'import tensorflow as tf\n'), ((7149, 7167), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (7160, 7167), True, 'import tensorflow as tf\n'), ((7235, 7253), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (7246, 7253), True, 'import tensorflow as tf\n'), ((8324, 8342), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (8335, 8342), True, 'import tensorflow as tf\n'), ((10172, 10190), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (10183, 10190), True, 'import tensorflow as tf\n'), ((11192, 11210), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (11203, 11210), True, 'import tensorflow as tf\n'), ((3499, 3587), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[split] don\'t support split into specific sizes (SplitV)"""'}), '(reason=\n "[split] don\'t support split into specific sizes (SplitV)")\n', (3515, 3587), False, 'import pytest\n'), ((4759, 4828), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[squeeze] Parametric squeeze not supported"""'}), "(reason='[squeeze] Parametric squeeze not supported')\n", (4775, 4828), False, 'import pytest\n'), ((4933, 5002), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[squeeze] Parametric squeeze not supported"""'}), "(reason='[squeeze] Parametric squeeze not supported')\n", (4949, 5002), False, 'import pytest\n'), ((7888, 7942), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[expand_dims] not supported"""'}), "(reason='[expand_dims] not supported')\n", (7904, 7942), False, 'import pytest\n'), ((8005, 8059), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[expand_dims] not supported"""'}), "(reason='[expand_dims] not supported')\n", (8021, 8059), False, 'import pytest\n'), ((9136, 9190), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] REFLECT not supported"""'}), "(reason='[pad] REFLECT not supported')\n", (9152, 9190), False, 'import pytest\n'), ((9355, 9411), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] SYMMETRIC not supported"""'}), "(reason='[pad] SYMMETRIC not supported')\n", (9371, 9411), False, 'import pytest\n'), ((9600, 9658), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] Generic pad not supported"""'}), "(reason='[pad] Generic pad not supported')\n", (9616, 9658), False, 'import pytest\n'), ((9824, 9887), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] non-zero padding not supported"""'}), "(reason='[pad] non-zero padding not supported')\n", (9840, 9887), False, 'import pytest\n')]
|
#!/usr/bin/python
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ozw as sensorObj
def main():
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
defaultDev = "/dev/ttyACM0"
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device", defaultDev)
# Instantiate an Aeotec DSB09104 instance, on device node 12. You
# will almost certainly need to change this to reflect your own
# network. Use the ozwdump example to see what nodes are available.
sensor = sensorObj.AeotecDSB09104(12)
# The first thing to do is create options, then lock them when done.
sensor.optionsCreate()
sensor.optionsLock()
# Next, initialize it.
print("Initializing, this may take awhile depending on your ZWave network")
sensor.init(defaultDev)
print("Initialization complete")
print("Querying data...")
while (True):
sensor.update()
print("Watts, Channel 1: %0.03f W" % sensor.getWattsC1())
print("Watts, Channel 2: %0.03f W" % sensor.getWattsC2())
print("Watts, Channel 3: %0.03f W" % sensor.getWattsC3())
print("Energy, Channel 1: %0.03f kWh" % sensor.getEnergyC1())
print("Energy, Channel 2: %0.03f kWh" % sensor.getEnergyC2())
print("Energy, Channel 3: %0.03f kWh" % sensor.getEnergyC3())
print("Battery Level: %d\n" % sensor.getBatteryLevel())
time.sleep(3)
if __name__ == '__main__':
main()
|
[
"atexit.register",
"time.sleep",
"sys.exit",
"upm.pyupm_ozw.AeotecDSB09104"
] |
[((1416, 1444), 'atexit.register', 'atexit.register', (['exitHandler'], {}), '(exitHandler)\n', (1431, 1444), False, 'import time, sys, signal, atexit\n'), ((1803, 1831), 'upm.pyupm_ozw.AeotecDSB09104', 'sensorObj.AeotecDSB09104', (['(12)'], {}), '(12)\n', (1827, 1831), True, 'from upm import pyupm_ozw as sensorObj\n'), ((1370, 1381), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1378, 1381), False, 'import time, sys, signal, atexit\n'), ((2689, 2702), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2699, 2702), False, 'import time, sys, signal, atexit\n')]
|
#!/usr/bin/env python
"""
_Conditions_t_
Condition job splitting test
"""
import unittest
import threading
import logging
import time
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.DataStructs.Run import Run
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMQuality.TestInit import TestInit
class ConditionTest(unittest.TestCase):
"""
_ExpressTest_
Test for Express job splitter
"""
def setUp(self):
"""
_setUp_
"""
import WMQuality.TestInit
WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS",
logger = logging,
dbinterface = myThread.dbi)
myThread.dbi.processData("""INSERT INTO wmbs_location
(id, site_name, state, state_time)
VALUES (1, 'SomeSite', 1, 1)
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_pnns
(id, pnn)
VALUES (1, 'SomePNN')
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
(location, pnn)
VALUES (1, 1)
""", transaction = False)
insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
insertRunDAO.execute(binds = { 'RUN' : 1,
'HLTKEY' : "someHLTKey" },
transaction = False)
insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
insertLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : 1 },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
transaction = False)
insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")
insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
insertStreamerDAO.execute(streamerPNN = "SomePNN",
binds = { 'RUN' : 1,
'P5_ID' : 1,
'LUMI' : 1,
'STREAM' : "Express",
'TIME' : int(time.time()),
'LFN' : "/streamer",
'FILESIZE' : 0,
'EVENTS' : 0 },
transaction = False)
insertPromptCalibrationDAO = daoFactory(classname = "RunConfig.InsertPromptCalibration")
insertPromptCalibrationDAO.execute( { 'RUN' : 1,
'STREAM' : "Express",
'NUM_PRODUCER' : 1},
transaction = False)
self.markPromptCalibrationFinishedDAO = daoFactory(classname = "ConditionUpload.MarkPromptCalibrationFinished")
self.fileset1 = Fileset(name = "TestFileset1")
self.fileset1.create()
workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
workflow1.create()
self.subscription1 = Subscription(fileset = self.fileset1,
workflow = workflow1,
split_algo = "Condition",
type = "Condition")
self.subscription1.create()
# set parentage chain and sqlite fileset
alcaRecoFile = File("/alcareco", size = 0, events = 0)
alcaRecoFile.addRun(Run(1, *[1]))
alcaRecoFile.setLocation("SomePNN", immediateSave = False)
alcaRecoFile.create()
alcaPromptFile = File("/alcaprompt", size = 0, events = 0)
alcaPromptFile.addRun(Run(1, *[1]))
alcaPromptFile.setLocation("SomePNN", immediateSave = False)
alcaPromptFile.create()
sqliteFile = File("/sqlite", size = 0, events = 0)
sqliteFile.create()
self.fileset1.addFile(sqliteFile)
self.fileset1.commit()
results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details
""",
transaction = False)[0].fetchall()
setParentageDAO = wmbsDaoFactory(classname = "Files.SetParentage")
setParentageDAO.execute(binds = [ { 'parent' : "/streamer",
'child' : "/alcareco" },
{ 'parent' : "/alcareco",
'child' : "/alcaprompt" },
{ 'parent' : "/alcaprompt",
'child' : "/sqlite" } ],
transaction = False)
# default split parameters
self.splitArgs = {}
self.splitArgs['runNumber'] = 1
self.splitArgs['streamName'] = "Express"
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def isPromptCalibFinished(self):
"""
_isPromptCalibFinished_
"""
myThread = threading.currentThread()
result = myThread.dbi.processData("""SELECT finished
FROM prompt_calib
""",
transaction = False)[0].fetchall()[0][0]
return result
def countPromptCalibFiles(self):
"""
_deleteSplitLumis_
"""
myThread = threading.currentThread()
result = myThread.dbi.processData("""SELECT COUNT(*)
FROM prompt_calib_file
""",
transaction = False)[0].fetchall()[0][0]
return result
def test00(self):
"""
_test00_
Make sure the job splitter behaves correctly.
Just make sure the job splitter does nothing
when the fileset is open and populates t0ast
data structures when it's closed. In the later
case all input files should be marked as
acquired without creating a job as well.
"""
mySplitArgs = self.splitArgs.copy()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.assertEqual(self.isPromptCalibFinished(), 0,
"ERROR: prompt_calib should not be finished")
self.assertEqual(self.countPromptCalibFiles(), 0,
"ERROR: there should be no prompt_calib_file")
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(self.isPromptCalibFinished(), 0,
"ERROR: prompt_calib should not be finished")
self.assertEqual(self.countPromptCalibFiles(), 1,
"ERROR: there should be one prompt_calib_file")
self.markPromptCalibrationFinishedDAO.execute(1, 1, transaction = False)
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.assertEqual(self.isPromptCalibFinished(), 1,
"ERROR: prompt_calib should be finished")
self.assertEqual(self.countPromptCalibFiles(), 1,
"ERROR: there should be one prompt_calib_file")
return
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"WMCore.WMBS.Workflow.Workflow",
"WMCore.WMBS.File.File",
"WMCore.DAOFactory.DAOFactory",
"WMCore.WMBS.Subscription.Subscription",
"time.time",
"WMQuality.TestInit.TestInit",
"WMCore.DataStructs.Run.Run",
"WMCore.WMBS.Fileset.Fileset",
"WMCore.JobSplitting.SplitterFactory.SplitterFactory",
"threading.currentThread"
] |
[((8955, 8970), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8968, 8970), False, 'import unittest\n'), ((793, 811), 'WMQuality.TestInit.TestInit', 'TestInit', (['__file__'], {}), '(__file__)\n', (801, 811), False, 'from WMQuality.TestInit import TestInit\n'), ((1013, 1055), 'WMCore.JobSplitting.SplitterFactory.SplitterFactory', 'SplitterFactory', ([], {'package': '"""T0.JobSplitting"""'}), "(package='T0.JobSplitting')\n", (1028, 1055), False, 'from WMCore.JobSplitting.SplitterFactory import SplitterFactory\n'), ((1078, 1103), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (1101, 1103), False, 'import threading\n'), ((1125, 1196), 'WMCore.DAOFactory.DAOFactory', 'DAOFactory', ([], {'package': '"""T0.WMBS"""', 'logger': 'logging', 'dbinterface': 'myThread.dbi'}), "(package='T0.WMBS', logger=logging, dbinterface=myThread.dbi)\n", (1135, 1196), False, 'from WMCore.DAOFactory import DAOFactory\n'), ((1293, 1368), 'WMCore.DAOFactory.DAOFactory', 'DAOFactory', ([], {'package': '"""WMCore.WMBS"""', 'logger': 'logging', 'dbinterface': 'myThread.dbi'}), "(package='WMCore.WMBS', logger=logging, dbinterface=myThread.dbi)\n", (1303, 1368), False, 'from WMCore.DAOFactory import DAOFactory\n'), ((4192, 4220), 'WMCore.WMBS.Fileset.Fileset', 'Fileset', ([], {'name': '"""TestFileset1"""'}), "(name='TestFileset1')\n", (4199, 4220), False, 'from WMCore.WMBS.Fileset import Fileset\n'), ((4275, 4353), 'WMCore.WMBS.Workflow.Workflow', 'Workflow', ([], {'spec': '"""spec.xml"""', 'owner': '"""hufnagel"""', 'name': '"""TestWorkflow1"""', 'task': '"""Test"""'}), "(spec='spec.xml', owner='hufnagel', name='TestWorkflow1', task='Test')\n", (4283, 4353), False, 'from WMCore.WMBS.Workflow import Workflow\n'), ((4418, 4520), 'WMCore.WMBS.Subscription.Subscription', 'Subscription', ([], {'fileset': 'self.fileset1', 'workflow': 'workflow1', 'split_algo': '"""Condition"""', 'type': '"""Condition"""'}), "(fileset=self.fileset1, workflow=workflow1, split_algo=\n 'Condition', type='Condition')\n", (4430, 4520), False, 'from WMCore.WMBS.Subscription import Subscription\n'), ((4762, 4797), 'WMCore.WMBS.File.File', 'File', (['"""/alcareco"""'], {'size': '(0)', 'events': '(0)'}), "('/alcareco', size=0, events=0)\n", (4766, 4797), False, 'from WMCore.WMBS.File import File\n'), ((4966, 5003), 'WMCore.WMBS.File.File', 'File', (['"""/alcaprompt"""'], {'size': '(0)', 'events': '(0)'}), "('/alcaprompt', size=0, events=0)\n", (4970, 5003), False, 'from WMCore.WMBS.File import File\n'), ((5174, 5207), 'WMCore.WMBS.File.File', 'File', (['"""/sqlite"""'], {'size': '(0)', 'events': '(0)'}), "('/sqlite', size=0, events=0)\n", (5178, 5207), False, 'from WMCore.WMBS.File import File\n'), ((6473, 6498), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (6496, 6498), False, 'import threading\n'), ((6889, 6914), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (6912, 6914), False, 'import threading\n'), ((4830, 4842), 'WMCore.DataStructs.Run.Run', 'Run', (['(1)', '*[1]'], {}), '(1, *[1])\n', (4833, 4842), False, 'from WMCore.DataStructs.Run import Run\n'), ((5038, 5050), 'WMCore.DataStructs.Run.Run', 'Run', (['(1)', '*[1]'], {}), '(1, *[1])\n', (5041, 5050), False, 'from WMCore.DataStructs.Run import Run\n'), ((3429, 3440), 'time.time', 'time.time', ([], {}), '()\n', (3438, 3440), False, 'import time\n')]
|
import json
import os
ROOT = "/scratch/cluster/ishann/data/detectron2/output"
ret0006_json = os.path.join(ROOT, "inference_ret0006/model_0089999/lvis_v0.5_val/class_aps_ret0006_model_0089999.json")
ret0011_json = os.path.join(ROOT, "inference_ret0011/model_0089999/inference/lvis_v0.5_val/class_aps_ret0011_model_0089999.json")
with open(ret0006_json, "r") as file_:
data_06 = json.load(file_)
with open(ret0011_json, "r") as file_:
data_11 = json.load(file_)
diff_aps = {}
for k in data_06.keys():
diff_aps[k] = abs(data_06[k] - data_11[k])
import math
vals = [val for val in diff_aps.values() if (not math.isnan(val))]
|
[
"math.isnan",
"json.load",
"os.path.join"
] |
[((95, 208), 'os.path.join', 'os.path.join', (['ROOT', '"""inference_ret0006/model_0089999/lvis_v0.5_val/class_aps_ret0006_model_0089999.json"""'], {}), "(ROOT,\n 'inference_ret0006/model_0089999/lvis_v0.5_val/class_aps_ret0006_model_0089999.json'\n )\n", (107, 208), False, 'import os\n'), ((215, 338), 'os.path.join', 'os.path.join', (['ROOT', '"""inference_ret0011/model_0089999/inference/lvis_v0.5_val/class_aps_ret0011_model_0089999.json"""'], {}), "(ROOT,\n 'inference_ret0011/model_0089999/inference/lvis_v0.5_val/class_aps_ret0011_model_0089999.json'\n )\n", (227, 338), False, 'import os\n'), ((384, 400), 'json.load', 'json.load', (['file_'], {}), '(file_)\n', (393, 400), False, 'import json\n'), ((455, 471), 'json.load', 'json.load', (['file_'], {}), '(file_)\n', (464, 471), False, 'import json\n'), ((622, 637), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (632, 637), False, 'import math\n')]
|
import unittest
import SentimentAnalysis
class SentimentAnalysis_Test(unittest.TestCase):
def setUp(self):
self.sa = SentimentAnalysis.SentimentAnalysis()
def test(self):
params = [
'All too much of the man-made is ugly, inefficient, depressing chaos.'
]
response = self.sa.predict(params)
self.assertIsNotNone(response)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"SentimentAnalysis.SentimentAnalysis"
] |
[((417, 432), 'unittest.main', 'unittest.main', ([], {}), '()\n', (430, 432), False, 'import unittest\n'), ((131, 168), 'SentimentAnalysis.SentimentAnalysis', 'SentimentAnalysis.SentimentAnalysis', ([], {}), '()\n', (166, 168), False, 'import SentimentAnalysis\n')]
|
#!/usr/bin/python2
import time
import sys
sys.path.append("./")
from irc.bot import IRCBot
channels = ["#pricktest"]
bot = IRCBot('localhost',
6667,
channels=channels,
bot_nick='prickbot'
)
def main():
global bot
bot.start()
while bot.isAlive():
time.sleep(3)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
bot.stop()
exit(0)
|
[
"sys.path.append",
"irc.bot.IRCBot",
"time.sleep"
] |
[((42, 63), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (57, 63), False, 'import sys\n'), ((125, 190), 'irc.bot.IRCBot', 'IRCBot', (['"""localhost"""', '(6667)'], {'channels': 'channels', 'bot_nick': '"""prickbot"""'}), "('localhost', 6667, channels=channels, bot_nick='prickbot')\n", (131, 190), False, 'from irc.bot import IRCBot\n'), ((334, 347), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (344, 347), False, 'import time\n')]
|
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.base.datetools import dates_from_str
import datetime
import time
from sklearn import svm
import json
feature_file_cases = '/Users/edwardgent/Downloads/NYT_US_COVID19.csv'
feature_file_tests = '/Users/edwardgent/Downloads/CT_US_COVID_TESTS.csv'
results_file = '/Users/edwardgent/Downloads/COVID-19_Hospital_Capacity_Metrics.csv'
test_threshold = 0.7
dataf = pd.DataFrame(columns=['New Cases', 'New Deaths', 'New Tests', 'Ventilators', 'ICU'])
chicago_cases = []
chicago_deaths = []
chicago_dates = []
chicago_ventilators = []
chicago_icu = []
chicago_tests = []
with open(feature_file_cases, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[2] == 'Illinois':
chicago_cases.append(item[8])
chicago_deaths.append(item[9])
chicago_dates.append(item[0])
fh.close()
chicago_dates = dates_from_str(chicago_dates)
dataf['New Cases'] = chicago_cases
dataf['New Deaths'] = chicago_deaths
dataf.index = pd.DatetimeIndex(chicago_dates)
temp_dates = []
with open(feature_file_tests, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[1] == 'Illinois':
chicago_tests.append(item[14])
temp_dates.append(item[2].split(' ')[0])
fh.close()
temp_dates = dates_from_str(temp_dates)
temp_df = pd.DataFrame(columns=['New Tests'])
temp_df['New Tests'] = chicago_tests
temp_df.index =pd.DatetimeIndex(temp_dates)
dataf = dataf.join(temp_df, lsuffix='_caller', rsuffix='_other')
dataf = dataf[~dataf.index.duplicated(keep='first')]
temp_dates = []
with open(results_file, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[0] == 'Date':
pass
else:
chicago_icu.append(item[14])
chicago_ventilators.append(item[1])
temp_dates.append(item[0].replace('/','-'))
fh.close()
temp_dates = dates_from_str(temp_dates)
temp_df = pd.DataFrame(columns=['Ventilators', 'ICU'])
temp_df['Ventilators'] = chicago_ventilators
temp_df['ICU'] = chicago_icu
temp_df.index = pd.DatetimeIndex(temp_dates)
dataf = dataf.join(temp_df, lsuffix='_caller', rsuffix='_other')
dataf = dataf[~dataf.index.duplicated(keep='first')]
def generate_model(dataf, train_col, train_features):
model = svm.SVC(probability=False, decision_function_shape='ovr', kernel='linear')
target_length = len(dataf['New Cases'])
target_length = int(test_threshold * target_length)
'''
cols_cases = dataf['New Cases']
cols_cases = cols_cases.tolist()[0:target_length]
cols_deaths = dataf['New Deaths']
cols_deaths = cols_deaths.tolist()[0:target_length]
cols_tests = dataf['New Tests_other']
cols_tests = cols_tests.tolist()[0:target_length]
feature_set = zip(cols_cases,cols_deaths,cols_tests)
feature_set = np.array(list(feature_set))
feature_set.reshape(133,3)
train_res = train_res.reshape(133,211)
train_res = train_res.ravel()
train_res_new = np.array_split(train_res, 133)
'''
#train_features = train_data[:,[3,4]].toarray()
#train_res = train_data[:,[1,2]].toarray()
#train_res = np.array(np.split(train_res,133))
'''
train_res_list = []
for item in train_res:
s = item[0]
train_res_list.append(''.join(str(s)))
print(train_res_list)
train_res = np.array(train_res_list)
'''
#train_res = train_res.reshape(133,211,1)
print(train_col)
#train_features = train_features.reshape(-1,1)
#train_res = train_res.reshape(-1,1)
print(train_features)
model.fit(train_features, np.array(train_col))
return model
def encode_results(dataf):
target_length = len(dataf['Ventilators_other'])
target_length = int(test_threshold*target_length)
cols_vent = dataf['Ventilators_other']
cols_vent = cols_vent.tolist()[0:target_length]
cols_icu = dataf['ICU_other']
cols_icu = cols_icu.tolist()[0:target_length]
backup_cols_vent = dataf['Ventilators_other'][target_length+1:-1]
backup_cols_icu = dataf['ICU_other'][target_length+1:-1]
dataf.drop(columns=['Ventilators_other', 'ICU_other'], inplace=True)
cols_cases = dataf['New Cases'][0:target_length]
cols_tests = dataf['New Tests_other'][0:target_length]
cols_deaths = dataf['New Deaths'][0:target_length]
backup_cols_cases = dataf['New Cases'][target_length+1:-1]
backup_cols_deaths = dataf['New Deaths'][target_length+1:-1]
backup_features = zip(backup_cols_deaths, backup_cols_cases)
backup_features = np.array(list(backup_features))
backup_features = backup_features.astype('int32')
train_icu = cols_icu
train_vent = cols_vent
train_features = zip(cols_deaths, cols_cases)
data_backup = zip(backup_cols_deaths,backup_cols_cases)
data_backup = np.array(list(data_backup))
data_backup = data_backup.astype('int32')
train_features = np.array(list(train_features)).astype('int32')
n_data=[]
for i in train_features:
n_data.append(list([int(j) for j in i]))
train_features = n_data #np.array(n_data)
for item in train_features:
for val in item:
item[item.index(val)] = int(val)
for item in train_vent:
train_vent[train_vent.index(item)] = int(item)
for item in train_icu:
train_icu[train_icu.index(item)] = int(item)
#my_list = []
#for ex in train_res:
# my_list.append(ex.tolist())
#print(train_res).
#train_res = np.array(dates_from_str())
return train_icu, train_vent, data_backup, backup_features, train_features
if __name__ == '__main__':
train_icu, train_vent, data_backup, backup_features, train_features = encode_results(dataf)
trained_model_icu = generate_model(dataf, train_icu, train_features)
trained_model_vent = generate_model(dataf, train_vent, train_features)
predictions_icu = trained_model_icu.predict(backup_features)
predictions_vent = trained_model_vent.predict(backup_features)
new_df = pd.DataFrame(columns=['Ventilators', 'ICU', 'New Deaths', 'New Cases'])
new_df['Ventilators'] = predictions_vent
new_df['ICU'] = predictions_icu
new_df['New Deaths'] = backup_features[:,0]
new_df['New Cases'] = backup_features[:,1]
new_df.reset_index(drop=True, inplace=True)
new_df = new_df.to_json(orient='records')
new_df = json.loads(new_df)
print(json.dumps(new_df, indent=4))
|
[
"pandas.DataFrame",
"csv.reader",
"json.loads",
"statsmodels.tsa.base.datetools.dates_from_str",
"json.dumps",
"pandas.DatetimeIndex",
"numpy.array",
"sklearn.svm.SVC"
] |
[((464, 552), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['New Cases', 'New Deaths', 'New Tests', 'Ventilators', 'ICU']"}), "(columns=['New Cases', 'New Deaths', 'New Tests', 'Ventilators',\n 'ICU'])\n", (476, 552), True, 'import pandas as pd\n'), ((997, 1026), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['chicago_dates'], {}), '(chicago_dates)\n', (1011, 1026), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((1113, 1144), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['chicago_dates'], {}), '(chicago_dates)\n', (1129, 1144), True, 'import pandas as pd\n'), ((1456, 1482), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['temp_dates'], {}), '(temp_dates)\n', (1470, 1482), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((1494, 1529), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['New Tests']"}), "(columns=['New Tests'])\n", (1506, 1529), True, 'import pandas as pd\n'), ((1584, 1612), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_dates'], {}), '(temp_dates)\n', (1600, 1612), True, 'import pandas as pd\n'), ((2114, 2140), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['temp_dates'], {}), '(temp_dates)\n', (2128, 2140), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((2152, 2196), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ventilators', 'ICU']"}), "(columns=['Ventilators', 'ICU'])\n", (2164, 2196), True, 'import pandas as pd\n'), ((2288, 2316), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_dates'], {}), '(temp_dates)\n', (2304, 2316), True, 'import pandas as pd\n'), ((736, 780), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (746, 780), False, 'import csv\n'), ((1228, 1272), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (1238, 1272), False, 'import csv\n'), ((1809, 1853), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (1819, 1853), False, 'import csv\n'), ((2503, 2577), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(False)', 'decision_function_shape': '"""ovr"""', 'kernel': '"""linear"""'}), "(probability=False, decision_function_shape='ovr', kernel='linear')\n", (2510, 2577), False, 'from sklearn import svm\n'), ((6199, 6270), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ventilators', 'ICU', 'New Deaths', 'New Cases']"}), "(columns=['Ventilators', 'ICU', 'New Deaths', 'New Cases'])\n", (6211, 6270), True, 'import pandas as pd\n'), ((6555, 6573), 'json.loads', 'json.loads', (['new_df'], {}), '(new_df)\n', (6565, 6573), False, 'import json\n'), ((3802, 3821), 'numpy.array', 'np.array', (['train_col'], {}), '(train_col)\n', (3810, 3821), True, 'import numpy as np\n'), ((6584, 6612), 'json.dumps', 'json.dumps', (['new_df'], {'indent': '(4)'}), '(new_df, indent=4)\n', (6594, 6612), False, 'import json\n')]
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 <NAME> r.c.i.m<EMAIL>enzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package dat_file
# Load and dump a dat file into a dat class
#
import os
import shutil
import re
import hashlib
import glob
import sys
from util_zip import zip_get_data_file
from inp import inp_load_file
from str2bool import str2bool
from triangle import triangle
from quiver import quiver
from util_text import is_number
from triangle import vec
#search first 40 lines for dims
def dat_file_load_info(output,lines):
#print(lines[0])
if len(lines)>1:
if lines[0]=="#gpvdm":
max_lines=len(lines)
if max_lines>40:
max_lines=40
found_xyz=False
for i in range(0, max_lines):
if (len(lines[i])>0):
if (lines[i][0]!="#"):
break
else:
command=lines[i].split(" ",1)
if len(command)<2:
command.append("")
if (command[0]=="#x_mul"):
output.x_mul=float(command[1])
if (command[0]=="#y_mul"):
output.y_mul=float(command[1])
if (command[0]=="#z_mul"):
output.z_mul=float(command[1])
if (command[0]=="#x_offset"):
output.x_offset=float(command[1])
if (command[0]=="#y_offset"):
output.y_offset=float(command[1])
if (command[0]=="#z_offset"):
output.z_offset=float(command[1])
if (command[0]=="#data_mul"):
output.data_mul=float(command[1])
if (command[0]=="#x_label"):
output.x_label=command[1]
if (command[0]=="#y_label"):
output.y_label=command[1]
if (command[0]=="#z_label"):
output.z_label=command[1]
if (command[0]=="#data_label"):
output.data_label=command[1]
if (command[0]=="#x_units"):
output.x_units=command[1]
if (command[0]=="#y_units"):
output.y_units=command[1]
if (command[0]=="#z_units"):
output.z_units=command[1]
if (command[0]=="#data_units"):
output.data_units=command[1]
if (command[0]=="#logscale_x"):
output.logx=str2bool(command[1])
if (command[0]=="#logscale_y"):
output.logy=str2bool(command[1])
if (command[0]=="#logscale_z"):
output.logz=str2bool(command[1])
if (command[0]=="#logscale_data"):
output.logdata=str2bool(command[1])
if (command[0]=="#type"):
output.type=command[1]
if (command[0]=="#title"):
output.title=command[1]
if (command[0]=="#section_one"):
output.section_one=command[1]
if (command[0]=="#section_two"):
output.section_two=command[1]
if (command[0]=="#time"):
output.time=float(command[1])
if (command[0]=="#Vexternal"):
output.Vexternal=float(command[1])
if (command[0]=="#data_max"):
output.data_max=float(command[1])
if (command[0]=="#data_min"):
output.data_min=float(command[1])
if (command[0]=="#x"):
output.x_len=int(command[1])
found_xyz=True
if (command[0]=="#y"):
output.y_len=int(command[1])
found_xyz=True
if (command[0]=="#z"):
output.z_len=int(command[1])
found_xyz=True
if (command[0]=="#rgb"):
output.decode_rgb("#rgb "+command[1])
if found_xyz==True and output.x_len != -1 and output.y_len != -1 and output.z_len != -1:
return True
else:
return False
return False
def guess_dim(lines):
x=0
y=0
z=0
data_started=False
for i in range(0, len(lines)):
temp=lines[i]
temp=re.sub(' +',' ',temp)
temp=re.sub("\t"," ",temp)
temp=re.sub("\r","",temp)
if len(temp)>0:
s=temp.split()
l=len(s)
if l>0:
if len(s[l-1])>0:
if s[l-1][0]=="#":
l=l-1
if l==1:
if is_number(s[0])==True:
print("I can't do this file type yet",s,l)
return False,False,False
if l==2:
if is_number(s[0])==True and is_number(s[1])==True:
y=y+1
if l==3:
if is_number(s[0])==True and is_number(s[1])==True and is_number(s[2])==True:
print("I can't do this file type yet",s,l)
return False,False,False
return 1,y,1
def col_name_to_pos(lines,col,known_col_sep):
if known_col_sep==None:
return col
if type(col)==float:
return col
for i in range(0, len(lines)):
s,label=decode_line(lines[i],known_col_sep=known_col_sep)
if col in s:
return s.index(col)
return False
def decode_line(line,known_col_sep=None):
label=False
line=re.sub(' +',' ',line)
if known_col_sep!=None:
s=line.split(known_col_sep)
return s,False
line=re.sub('\t',' ',line)
#check for labels at the end of the line
if len(line)>0:
if line[0]!="#":
if line.count("#")>0:
label=line.split("#")[1]
line=line.split("#")[0]
labels=True
line=line.replace(', ', ' ') #remove comman in csv files
line=line.replace(',', '.') #Remove European commas
s=line.split()
return s,label
def read_data_2d(x_scale,y_scale,z,file_name):
if file_name==None:
return False
found,lines=zip_get_data_file(file_name)
if found==True:
x_max=0
y_max=0
y_pos=0
z_store=[]
for i in range(0, len(lines)):
if len(lines[i])>0:
if lines[i][0]!="#" and lines[i]!="\n":
temp=lines[i]
temp=re.sub(' +',' ',temp)
temp=re.sub('\t',' ',temp)
temp=temp.rstrip()
sline=temp.split(" ")
if len(sline)==3:
if x_max==0:
y_scale.append(float(lines[i].split(" ")[1]))
if y_pos==0:
x_scale.append(float(lines[i].split(" ")[0]))
z_store.append(float(lines[i].split(" ")[2]))
y_pos=y_pos+1
if x_max==0:
y_max=y_max+1
if lines[i]=="":
x_max=x_max+1
y_pos=0
if lines[len(lines)-1]!="\n":
x_max=x_max+1
x_max=len(x_scale)
y_max=len(y_scale)
pos=0
for x in range(0, x_max):
z.append([])
for y in range(0, y_max):
z[x].append(z_store[pos])
pos=pos+1
return True
else:
return False
def dat_file_print(dat):
print("valid_data",dat.valid_data)
print("grid",dat.grid)
print("show_pointer",dat.show_pointer)
print("logy",dat.logy)
print("logx",dat.logx)
print("logz",dat.logz)
print("logdata",dat.logdata)
print("label_data",dat.label_data)
print("invert_y",dat.invert_y)
print("normalize",dat.normalize)
print("norm_to_peak_of_all_data",dat.norm_to_peak_of_all_data)
print("subtract_first_point",dat.subtract_first_point)
print("add_min",dat.add_min)
print("legend_pos",dat.legend_pos)
print("ymax",dat.ymax)
print("ymin",dat.ymin)
print("x_label",dat.x_label)
print("y_label",dat.y_label)
print("z_label",dat.z_label)
print("data_label",dat.data_label)
print("x_units",dat.x_units)
print("y_units",dat.y_units)
print("z_units",dat.z_units)
print("rgb",dat.rgb)
print("data_units",dat.data_units)
print("x_mul",dat.x_mul)
print("y_mul",dat.y_mul)
print("z_mul",dat.z_mul)
print("data_mul",dat.data_mul)
print("key_units",dat.key_units)
print("file0",dat.file0)
print("tag0",dat.tag0)
print("file1",dat.file1)
print("tag1",dat.tag1)
print("file2",dat.file2)
print("tag2",dat.tag2)
print("example_file0",dat.example_file0)
print("example_file1",dat.example_file1)
print("example_file2",dat.example_file2)
print("time",dat.time)
print("Vexternal",dat.Vexternal)
print("file_name",dat.file_name)
print("other_file",dat.other_file)
print("title",dat.title)
print("type",dat.type)
print("section_one",dat.section_one)
print("section_two",dat.section_two)
print("x_start",dat.x_start)
print("x_stop",dat.x_stop)
print("x_points",dat.x_points)
print("y_start",dat.y_start)
print("y_stop",dat.y_stop)
print("y_points",dat.y_points)
print("x_len",dat.x_len)
print("y_len",dat.y_len)
print("z_len",dat.z_len)
print("x_scale",dat.x_scale)
print("y_scale",dat.y_scale)
print("z_scale",dat.z_scale)
print("data",dat.data)
print("labels",dat.labels)
class dat_file():
def __init__(self):
self.valid_data=False
self.grid=False
self.show_pointer=False
self.logy=False
self.logx=False
self.logz=False
self.logdata=False
self.label_data=False
self.invert_y=False
self.normalize=False
self.norm_to_peak_of_all_data=False
self.subtract_first_point=False
self.add_min=False
self.legend_pos="lower right"
self.ymax=-1
self.ymin=-1
self.xmax=-1
self.xmin=-1
self.zmax=-1
self.zmin=-1
self.x_label=""
self.y_label=""
self.z_label=""
self.data_label=""
self.x_units=""
self.y_units=""
self.z_units=""
self.data_units=""
self.plot_type="" #wireframe/heat etc...
self.plotted=False
self.r=None
self.g=None
self.b=None
self.x_mul=1.0
self.y_mul=1.0
self.z_mul=1.0
self.x_offset=0.0
self.y_offset=0.0
self.z_offset=0.0
self.data_mul=1.0
self.key_units=""
self.key_text=""
self.file0=""
self.tag0=""
self.file1=""
self.tag1=""
self.file2=""
self.tag2=""
self.example_file0=""
self.example_file1=""
self.example_file2=""
self.time=0.0
self.Vexternal=0.0
self.file_name=""
self.other_file=""
self.title=""
self.type="xy"
self.section_one=""
self.section_two=""
self.x_start=0
self.x_stop=1
self.x_points=25
self.y_start=0
self.y_stop=1
self.y_points=25
self.x_len=0
self.y_len=0
self.z_len=0
self.data_max=None
self.data_min=None
self.x_scale=[]
self.y_scale=[]
self.z_scale=[]
self.data=[]
self.labels=[]
self.file_age=0
self.new_read=True
self.file_name=None
def import_data(self,file_name,x_col=0,y_col=1,skip_lines=0,known_col_sep=None):
"""This is an import filter for xy data"""
if self.have_i_loaded_this(file_name)==True:
return True
lines=[]
#print("1")
lines=inp_load_file(file_name)
if lines==False:
return False
if len(lines)<skip_lines:
return False
x_col=col_name_to_pos(lines,x_col,known_col_sep)
y_col=col_name_to_pos(lines,y_col,known_col_sep)
lines=lines[skip_lines:]
self.x_scale=[]
self.y_scale=[]
self.z_scale=[]
self.data=[]
data_started=False
self.data=[[[0.0 for k in range(0)] for j in range(1)] for i in range(1)]
for i in range(0, len(lines)):
s,label=decode_line(lines[i],known_col_sep=known_col_sep)
#print(s)
l=len(s)
if l>0:
if data_started==False:
if is_number(s[0])==True:
data_started=True
if s[0]=="#end":
break
if data_started==True:
number_ok=False
try:
float(s[x_col])
float(s[y_col])
number_ok=True
except:
pass
if number_ok==True:
if max(x_col,y_col)<l:
duplicate=False
if float(s[x_col]) in self.y_scale:
duplicate=True
if duplicate==False:
self.y_scale.append(float(s[x_col]))
self.data[0][0].append(float(s[y_col]))
self.x_len=1
self.y_len=len(self.data[0][0])
self.z_len=1
#print("3")
return True
def rgb(self):
if self.r==None:
return None
return format(int(self.r*255), '02x')+format(int(self.g*255), '02x')+format(int(self.b*255), '02x')
def pow(self,val):
a=dat_file()
a.copy(self)
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=pow(self.data[z][x][y],val)
return a
def intergrate(self):
sum=0.0
for y in range(0,len(self.y_scale)-1):
dy=self.y_scale[y+1]-self.y_scale[y]
sum=sum+self.data[0][0][y]*dy
return sum
def set_neg_to_zero(self):
for y in range(0,len(self.y_scale)):
if self.data[0][0][y]<0.0:
self.data[0][0][y]=0.0
def set_neg_to_last(self):
last=0.0
for y in range(0,len(self.y_scale)):
if self.data[0][0][y]<0.0:
self.data[0][0][y]=last
else:
last=self.data[0][0][y]
def __sub__(self,val):
a=dat_file()
a.copy(self)
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=self.data[z][x][y]-val
return a
def __add__(self,val):
a=dat_file()
a.copy(self)
if type(val)==float:
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=self.data[z][x][y]+val
else:
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=self.data[z][x][y]+val.data[z][x][y]
return a
def __truediv__(self,in_data):
a=dat_file()
a.copy(self)
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=self.data[z][x][y]/in_data.data[z][x][y]
return a
def __rsub__(self,val):
a=dat_file()
a.copy(self)
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=val-self.data[z][x][y]
return a
def set_float(self,val):
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
self.data[z][x][y]=val
def chop_y(self,y0,y1):
if y0==0 and y1==0:
return
self.y_scale=self.y_scale[y0:y1]
self.y_len=len(self.y_scale)
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
self.data[z][x]=self.data[z][x][y0:y1]
#for y in range(0,len(self.y_scale)):
# self.data[z][x][y]=val
def __mul__(self,in_data):
a=dat_file()
a.copy(self)
if type(in_data)==float:
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=in_data*self.data[z][x][y]
else:
for z in range(0,len(self.z_scale)):
for x in range(0,len(self.x_scale)):
for y in range(0,len(self.y_scale)):
a.data[z][x][y]=in_data.data[z][x][y]*self.data[z][x][y]
return a
def __rmul__(self, in_data):
return self.__mul__(in_data)
def copy(self,in_data):
self.x_len=in_data.x_len
self.y_len=in_data.y_len
self.z_len=in_data.z_len
self.init_mem()
for i in range(0,len(self.x_scale)):
self.x_scale[i]=in_data.x_scale[i]
for i in range(0,len(self.y_scale)):
self.y_scale[i]=in_data.y_scale[i]
for i in range(0,len(self.z_scale)):
self.z_scale[i]=in_data.z_scale[i]
self.y_mul=in_data.y_mul
self.y_units=in_data.y_units
self.data_mul=in_data.data_mul
self.data_units=in_data.data_units
def init_mem(self):
self.data=[[[0.0 for k in range(self.y_len)] for j in range(self.x_len)] for i in range(self.z_len)]
self.x_scale= [0.0]*self.x_len
self.y_scale= [0.0]*self.y_len
self.z_scale= [0.0]*self.z_len
self.valid_data=True
def decode_circuit_lines(self,lines):
build=[]
self.data=[]
for line in lines:
s,label=decode_line(line)
l=len(s)
if l>0:
if s[0].startswith("#")==False:
c=component()
c.z0=float(s[0])
c.x0=float(s[1])
c.y0=float(s[2])
c.z1=float(s[3])
c.x1=float(s[4])
c.y1=float(s[5])
c.name=s[6]
self.data.append(c)
return True
def decode_poly_lines(self,lines):
build=[]
self.data=[]
for line in lines:
s,label=decode_line(line)
l=len(s)
if l>0:
if s[0].startswith("#")==False:
s=list(map(float, s))
build.append(s)
if len(build)!=0 and len(s)==0:
#This is to account for the forth xyz vector required by gnuplot and to remove it if it exists.
if len(build)==4:
if build[len(build)-1]==build[0]:
build=build[:-1]
if len(build)==3 and len(build[0])==3:
t=triangle()
t.xyz0.z=build[0][0]
t.xyz0.x=build[0][1]
t.xyz0.y=build[0][2]
t.xyz1.z=build[1][0]
t.xyz1.x=build[1][1]
t.xyz1.y=build[1][2]
t.xyz2.z=build[2][0]
t.xyz2.x=build[2][1]
t.xyz2.y=build[2][2]
t.points=3
self.data.append(t)
elif len(build)==2 and len(build[0])==3:
t=triangle()
t.xyz0.z=build[0][0]
t.xyz0.x=build[0][1]
t.xyz0.y=build[0][2]
t.xyz1.z=build[1][0]
t.xyz1.x=build[1][1]
t.xyz1.y=build[1][2]
self.data.append(t)
t.points=2
else:
self.data.append(build)
build=[]
self.valid_data=True
return True
def cal_min_max(self):
self.ymax=0.0
self.ymin=0.0
self.xmax=0.0
self.xmin=0.0
self.zmax=0.0
self.zmin=0.0
if self.type=="quiver":
for d in self.data:
if d.x>self.xmax:
self.xmax=d.x
if d.y>self.ymax:
self.ymax=d.y
if d.z>self.zmax:
self.zmax=d.z
def decode_quiver_lines(self,lines):
build=[]
self.data=[]
for line in lines:
s,label=decode_line(line)
l=len(s)
if l>0:
if s[0].startswith("#")==False:
s=list(map(float, s))
q=quiver()
q.x=s[0]
q.y=s[1]
q.z=s[2]
q.dx=s[3]
q.dy=s[4]
q.dz=s[5]
q.mag=s[6]
self.data.append(q)
self.cal_min_max()
self.valid_data=True
return True
def have_i_loaded_this(self,file_name):
if os.path.isfile(file_name)==True:
age=os.path.getmtime(file_name)
if age==self.file_age:
self.new_read=False
return True
else:
self.new_read=True
self.file_age=age
return False
def load(self,file_name,guess=True):
self.file_name=file_name
if file_name==None:
self.valid_data=False
return False
if self.have_i_loaded_this(file_name)==True:
return True
found,lines=zip_get_data_file(file_name)
if found==False:
return False
self.x_scale=[]
self.y_scale=[]
self.z_scale=[]
self.data=[]
if dat_file_load_info(self,lines)==False:
#print("no dims")
if guess==True:
self.x_len, self.y_len, self.z_len = guess_dim(lines)
else:
return False
if self.x_len==False:
self.valid_data=False
print("No idea what to do with this file!",file_name)
return False
if self.type=="poly":
return self.decode_poly_lines(lines)
if self.type=="circuit":
return self.decode_circuit_lines(lines)
if self.type=="quiver":
return self.decode_quiver_lines(lines)
return self.decode_zxy_lines(lines)
def decode_zxy_lines(self,lines):
self.init_mem()
self.labels=[]
data_started=False
x=0
y=0
z=0
dim=0
label=""
labels=False
#print(file_name)
for line in lines:
s,label=decode_line(line)
l=len(s)
if l>0:
if data_started==False:
if is_number(s[0])==True:
data_started=True
if s[0]=="#end":
break
if data_started==True:
if line.count("nan")>0:
#print("Warning nan found in data file",file_name)
return False
line_found=False
if l==4:
line_found=True
a0=s[0]
a1=s[1]
a2=s[2]
self.data[z][x][y]=float(s[3])
elif l==3:
line_found=True
if self.type=="rgb":
r=float(int(s[2][0:2], 16)/255)
g=float(int(s[2][2:4], 16)/255)
b=float(int(s[2][4:6], 16)/255)
self.data[z][x][y]=[r,g,b]
else:
self.data[z][x][y]=float(s[2])
a0=s[0]
a1=s[1]
a2=0.0
elif l==2:
#print(s,self.z_len,self.x_len,self.y_len)
line_found=True
if self.type=="rgb":
r=float(int(s[1][0:2], 16)/255)
g=float(int(s[1][2:4], 16)/255)
b=float(int(s[1][4:6], 16)/255)
self.data[z][x][y]=[r,g,b]
else:
self.data[z][x][y]=float(s[1])
a0=s[0]
a1=0.0
a2=0.0
if line_found==True:
if l==2:
if x==0 and z==0:
self.y_scale[y]=float(a0)+self.y_offset
elif l==3:
if x==0 and z==0:
self.y_scale[y]=float(a1)+self.y_offset
if z==0 and y==0:
self.x_scale[x]=float(a0)
elif l==4:
if x==0 and y==0:
self.z_scale[z]=float(a0)
if z==0 and y==0:
self.x_scale[x]=float(a1)
if x==0 and z==0:
self.y_scale[y]=float(a2)+self.y_offset
#if z==y:
# self.z_scale[y]=float(a0)
if label!=False:
self.labels.append(label)
y=y+1
if y==self.y_len:
y=0
x=x+1
if x==self.x_len:
x=0
z=z+1
if s[0]=="#data":
data_started=True
if data_started==False:
return False
#print(self.data)
return True
def save_as_csv(self,file_name):
if file_name.endswith(".csv")==False:
file_name=file_name+".csv"
lines=[]
lines.append(self.y_label+","+self.data_label)
for i in range(0,self.y_len):
y_text=str('{:.8e}'.format(float(self.y_scale[i])))
data_text=str('{:.8e}'.format(float(self.data[0][0][i])))
lines.append(y_text+","+data_text)
dump=""
for item in lines:
dump=dump+item+"\n"
f=open(file_name, mode='w')
lines = f.write(dump)
f.close()
def save_as_txt(self,file_name):
if file_name.endswith(".txt")==False:
file_name=file_name+".txt"
lines=[]
for i in range(0,self.y_len):
y_text=str('{:.8e}'.format(float(self.y_scale[i])))
data_text=str('{:.8e}'.format(float(self.data[0][0][i])))
lines.append(y_text+" "+data_text)
dump=""
for item in lines:
dump=dump+item+"\n"
f=open(file_name, mode='w')
lines = f.write(dump)
f.close()
def save(self,file_name):
a = open(file_name, "w")
a.write("\n".join(self.gen_output_data()))
a.close()
def decode_rgb(self,line):
if line.startswith("#rgb"):
#print(line)
line=line.split()
if len(line)==2:
rgb=line[1]
self.r=float(int(rgb[0:2], 16)/255)
self.g=float(int(rgb[2:4], 16)/255)
self.b=float(int(rgb[4:6], 16)/255)
def __str__(self):
return "\n".join(self.gen_output_data())
def gen_output_data(self):
lines=[]
lines.append("#gpvdm")
lines.append("#title "+str(self.title))
lines.append("#type "+str(self.type))
lines.append("#x_mul "+str(self.x_mul))
lines.append("#y_mul "+str(self.y_mul))
lines.append("#z_mul "+str(self.z_mul))
lines.append("#data_mul "+str(self.data_mul))
if self.x_label!="":
lines.append("#x_label "+str(self.x_label))
if self.y_label!="":
lines.append("#y_label "+str(self.y_label))
if self.z_label!="":
lines.append("#z_label "+str(self.z_label))
if self.data_label!="":
lines.append("#data_label "+str(self.data_label))
if self.x_units!="":
lines.append("#x_units "+str(self.x_units))
if self.y_units!="":
lines.append("#y_units "+str(self.y_units))
if self.z_units!="":
lines.append("#y_units "+str(self.z_units))
if self.rgb()!=None:
lines.append("#rgb "+str(self.rgb()))
if self.data_units!="":
lines.append("#data_units "+str(self.data_units))
if self.logy!=False:
lines.append("#logscale_y "+str(self.logy))
if self.logx!=False:
lines.append("#logscale_x "+str(self.logx))
if self.logz!=False:
lines.append("#logscale_z "+str(self.logz))
if self.logdata!=False:
lines.append("#logscale_data "+str(self.logdata))
lines.append("#time "+str(self.time))
lines.append("#Vexternal "+str(self.Vexternal))
lines.append("#x "+str(self.x_len))
lines.append("#y "+str(self.y_len))
lines.append("#z "+str(self.z_len))
lines.append("#begin")
if self.type=="poly":
for d in self.data:
lines.append(str(d.xyz0.z)+" "+str(d.xyz0.x)+" "+str(d.xyz0.y))
lines.append(str(d.xyz1.z)+" "+str(d.xyz1.x)+" "+str(d.xyz1.y))
lines.append(str(d.xyz2.z)+" "+str(d.xyz2.x)+" "+str(d.xyz2.y))
lines.append(str(d.xyz0.z)+" "+str(d.xyz0.x)+" "+str(d.xyz0.y))
lines.append("")
else:
for i in range(0,self.y_len):
y_text=str('{:.8e}'.format(float(self.y_scale[i])))
data_text=str('{:.8e}'.format(float(self.data[0][0][i])))
lines.append(y_text+" "+data_text)
lines.append("#end")
return lines
|
[
"inp.inp_load_file",
"quiver.quiver",
"str2bool.str2bool",
"util_zip.zip_get_data_file",
"triangle.triangle",
"os.path.isfile",
"os.path.getmtime",
"util_text.is_number",
"re.sub"
] |
[((5200, 5223), 're.sub', 're.sub', (['""" +"""', '""" """', 'line'], {}), "(' +', ' ', line)\n", (5206, 5223), False, 'import re\n'), ((5302, 5325), 're.sub', 're.sub', (['"""\t"""', '""" """', 'line'], {}), "('\\t', ' ', line)\n", (5308, 5325), False, 'import re\n'), ((5753, 5781), 'util_zip.zip_get_data_file', 'zip_get_data_file', (['file_name'], {}), '(file_name)\n', (5770, 5781), False, 'from util_zip import zip_get_data_file\n'), ((4268, 4291), 're.sub', 're.sub', (['""" +"""', '""" """', 'temp'], {}), "(' +', ' ', temp)\n", (4274, 4291), False, 'import re\n'), ((4297, 4320), 're.sub', 're.sub', (['"""\t"""', '""" """', 'temp'], {}), "('\\t', ' ', temp)\n", (4303, 4320), False, 'import re\n'), ((4326, 4348), 're.sub', 're.sub', (["'\\r'", '""""""', 'temp'], {}), "('\\r', '', temp)\n", (4332, 4348), False, 'import re\n'), ((10364, 10388), 'inp.inp_load_file', 'inp_load_file', (['file_name'], {}), '(file_name)\n', (10377, 10388), False, 'from inp import inp_load_file\n'), ((17983, 18011), 'util_zip.zip_get_data_file', 'zip_get_data_file', (['file_name'], {}), '(file_name)\n', (18000, 18011), False, 'from util_zip import zip_get_data_file\n'), ((17570, 17595), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (17584, 17595), False, 'import os\n'), ((17610, 17637), 'os.path.getmtime', 'os.path.getmtime', (['file_name'], {}), '(file_name)\n', (17626, 17637), False, 'import os\n'), ((4493, 4508), 'util_text.is_number', 'is_number', (['s[0]'], {}), '(s[0])\n', (4502, 4508), False, 'from util_text import is_number\n'), ((5971, 5994), 're.sub', 're.sub', (['""" +"""', '""" """', 'temp'], {}), "(' +', ' ', temp)\n", (5977, 5994), False, 'import re\n'), ((6003, 6026), 're.sub', 're.sub', (['"""\t"""', '""" """', 'temp'], {}), "('\\t', ' ', temp)\n", (6009, 6026), False, 'import re\n'), ((16179, 16189), 'triangle.triangle', 'triangle', ([], {}), '()\n', (16187, 16189), False, 'from triangle import triangle\n'), ((17325, 17333), 'quiver.quiver', 'quiver', ([], {}), '()\n', (17331, 17333), False, 'from quiver import quiver\n'), ((4617, 4632), 'util_text.is_number', 'is_number', (['s[0]'], {}), '(s[0])\n', (4626, 4632), False, 'from util_text import is_number\n'), ((4643, 4658), 'util_text.is_number', 'is_number', (['s[1]'], {}), '(s[1])\n', (4652, 4658), False, 'from util_text import is_number\n'), ((4699, 4714), 'util_text.is_number', 'is_number', (['s[0]'], {}), '(s[0])\n', (4708, 4714), False, 'from util_text import is_number\n'), ((4725, 4740), 'util_text.is_number', 'is_number', (['s[1]'], {}), '(s[1])\n', (4734, 4740), False, 'from util_text import is_number\n'), ((4751, 4766), 'util_text.is_number', 'is_number', (['s[2]'], {}), '(s[2])\n', (4760, 4766), False, 'from util_text import is_number\n'), ((10935, 10950), 'util_text.is_number', 'is_number', (['s[0]'], {}), '(s[0])\n', (10944, 10950), False, 'from util_text import is_number\n'), ((16520, 16530), 'triangle.triangle', 'triangle', ([], {}), '()\n', (16528, 16530), False, 'from triangle import triangle\n'), ((18945, 18960), 'util_text.is_number', 'is_number', (['s[0]'], {}), '(s[0])\n', (18954, 18960), False, 'from util_text import is_number\n'), ((2823, 2843), 'str2bool.str2bool', 'str2bool', (['command[1]'], {}), '(command[1])\n', (2831, 2843), False, 'from str2bool import str2bool\n'), ((2901, 2921), 'str2bool.str2bool', 'str2bool', (['command[1]'], {}), '(command[1])\n', (2909, 2921), False, 'from str2bool import str2bool\n'), ((2979, 2999), 'str2bool.str2bool', 'str2bool', (['command[1]'], {}), '(command[1])\n', (2987, 2999), False, 'from str2bool import str2bool\n'), ((3063, 3083), 'str2bool.str2bool', 'str2bool', (['command[1]'], {}), '(command[1])\n', (3071, 3083), False, 'from str2bool import str2bool\n')]
|
"""
WebDjangular URL Configuration
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib.sitemaps import views as sitemaps_views
# settings.ANGULAR_CLIENT_APP_DIR, settings.ANGULAR_ADMIN_APP_DIR
from django.contrib.staticfiles.views import serve
from django.urls import path
from rest_framework.routers import DefaultRouter
from .sitemaps import PageSitemap, PostSitemap, index as sitemap_index, sitemap as sitemap_section
from .views.AddressViewSet import AddressViewSet
from .views.CoreConfigViewSet import CoreConfigGroupViewSet, \
CoreConfigInputViewSet
from .views.CoreViewSet import AuthorViewSet, CoreConfigViewSet, \
PluginViewSet, ThemeViewSet, WebsiteViewSet
from .views.EmailViewSet import EmailViewSet
from .views.InitViewSet import InitViewSet
from django.views.decorators.cache import cache_page
'''
schema_view = get_schema_view(
openapi.Info(
title="DjAngular API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="MIT License"),
),
validators=['flex', 'ssv'],
public=True,
)
'''
router = DefaultRouter()
router.register(r'core_email', EmailViewSet)
router.register(r'core_author', AuthorViewSet)
router.register(r'core_plugin', PluginViewSet)
router.register(r'core_theme', ThemeViewSet)
router.register(r'core_config', CoreConfigViewSet)
router.register(r'core_website', WebsiteViewSet)
router.register(r'core_init', InitViewSet)
router.register(r'core_config_input', CoreConfigInputViewSet,
base_name='core_config_input')
router.register(r'core_config_group', CoreConfigGroupViewSet,
base_name='core_config_group')
router.register(r'address', AddressViewSet, base_name='address')
sitemaps = {
'page': PageSitemap(),
'post': PostSitemap()
}
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^api/', include('libs.core.users.api.urls')),
url(r'^api/', include('libs.core.cms.api.urls'), name='cms'),
url(r'^api/', include('libs.core.media.api.urls')),
url(r'^api/', include('libs.plugins.provider.api.urls')),
url(r'^api/', include('libs.plugins.store.api.urls')),
# url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
path('sitemap.xml',
sitemap_index,
{'sitemaps': sitemaps, 'sitemap_url_name': 'sitemaps'}),
path('sitemap-<section>.xml',
sitemap_section,
{'sitemaps': sitemaps}, name='sitemaps'),
# url(r'^admin', TemplateView.as_view(template_name="/static/admin/index.html")),
url(r'^admin', serve, kwargs={'path': 'admin/index.html'}),
url(r'^', serve, kwargs={'path': 'client/index.html'}),
]
urlpatterns = static('/admin/', document_root='static/admin/') + static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT) + static(
'/client_app/', document_root='static/client') + urlpatterns
|
[
"django.conf.urls.include",
"django.urls.path",
"django.conf.urls.url",
"django.conf.urls.static.static",
"rest_framework.routers.DefaultRouter"
] |
[((1341, 1356), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (1354, 1356), False, 'from rest_framework.routers import DefaultRouter\n'), ((2539, 2633), 'django.urls.path', 'path', (['"""sitemap.xml"""', 'sitemap_index', "{'sitemaps': sitemaps, 'sitemap_url_name': 'sitemaps'}"], {}), "('sitemap.xml', sitemap_index, {'sitemaps': sitemaps,\n 'sitemap_url_name': 'sitemaps'})\n", (2543, 2633), False, 'from django.urls import path\n'), ((2656, 2748), 'django.urls.path', 'path', (['"""sitemap-<section>.xml"""', 'sitemap_section', "{'sitemaps': sitemaps}"], {'name': '"""sitemaps"""'}), "('sitemap-<section>.xml', sitemap_section, {'sitemaps': sitemaps}, name\n ='sitemaps')\n", (2660, 2748), False, 'from django.urls import path\n'), ((2859, 2916), 'django.conf.urls.url', 'url', (['"""^admin"""', 'serve'], {'kwargs': "{'path': 'admin/index.html'}"}), "('^admin', serve, kwargs={'path': 'admin/index.html'})\n", (2862, 2916), False, 'from django.conf.urls import include, url\n'), ((2924, 2977), 'django.conf.urls.url', 'url', (['"""^"""', 'serve'], {'kwargs': "{'path': 'client/index.html'}"}), "('^', serve, kwargs={'path': 'client/index.html'})\n", (2927, 2977), False, 'from django.conf.urls import include, url\n'), ((2093, 2113), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (2100, 2113), False, 'from django.conf.urls import include, url\n'), ((2137, 2172), 'django.conf.urls.include', 'include', (['"""libs.core.users.api.urls"""'], {}), "('libs.core.users.api.urls')\n", (2144, 2172), False, 'from django.conf.urls import include, url\n'), ((2194, 2227), 'django.conf.urls.include', 'include', (['"""libs.core.cms.api.urls"""'], {}), "('libs.core.cms.api.urls')\n", (2201, 2227), False, 'from django.conf.urls import include, url\n'), ((2261, 2296), 'django.conf.urls.include', 'include', (['"""libs.core.media.api.urls"""'], {}), "('libs.core.media.api.urls')\n", (2268, 2296), False, 'from django.conf.urls import include, url\n'), ((2318, 2359), 'django.conf.urls.include', 'include', (['"""libs.plugins.provider.api.urls"""'], {}), "('libs.plugins.provider.api.urls')\n", (2325, 2359), False, 'from django.conf.urls import include, url\n'), ((2381, 2419), 'django.conf.urls.include', 'include', (['"""libs.plugins.store.api.urls"""'], {}), "('libs.plugins.store.api.urls')\n", (2388, 2419), False, 'from django.conf.urls import include, url\n'), ((3186, 3239), 'django.conf.urls.static.static', 'static', (['"""/client_app/"""'], {'document_root': '"""static/client"""'}), "('/client_app/', document_root='static/client')\n", (3192, 3239), False, 'from django.conf.urls.static import static\n'), ((2998, 3046), 'django.conf.urls.static.static', 'static', (['"""/admin/"""'], {'document_root': '"""static/admin/"""'}), "('/admin/', document_root='static/admin/')\n", (3004, 3046), False, 'from django.conf.urls.static import static\n'), ((3049, 3110), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (3055, 3110), False, 'from django.conf.urls.static import static\n')]
|
import json, codecs
import numpy as np
def load_data(root, path):
'''Load data from path'''
data = codecs.open(root + path, encoding = 'utf8')
relations = [json.loads(x) for x in data]
data.close()
return relations
def convert2id(root):
'''Convert training data into indices'''
wdict = {'<pad>':0, '<unk>':1}
cdict = {}
with codecs.open(root + 'train/relations.json', encoding = 'utf8') as pdtb:
relations = [json.loads(x) for x in pdtb]
wid = 2
cid = 0
for rel in relations:
sen_cat = rel['Arg1']['RawText'].split() + rel['Arg2']['RawText'].split()
sense = rel['Sense'][0]
if sense not in cdict:
cdict[sense] = cid
cdict[cid] = sense
cid += 1
for word in sen_cat:
if word not in wdict:
wdict[word] = wid
wdict[wid] = word
wid += 1
return wdict, cdict
def generateBatches(relations, sen_len, num_class, wdict, cdict, batch_size):
'''Generate train/dev/test batches for the model'''
per_len = int(sen_len / 2)
emb_list = []
class_list = []
for i in range(0, len(relations), batch_size):
batch = relations[i:i+batch_size]
emb = np.ndarray((len(batch), sen_len))
cla = np.zeros((len(batch), num_class))
for j in range(len(batch)):
rel = batch[j]
sense = rel['Sense'][0]
sen1 = rel['Arg1']['RawText'].split()
sen2 = rel['Arg2']['RawText'].split()
sen = word2id(sen1, per_len, wdict) + word2id(sen2, per_len, wdict)
if len(sen) < sen_len:
sen = sen + [wdict['<pad>']] * (sen_len - len(sen))
emb[j] = sen
cla[j][cdict[sense]] = 1
emb_list.append(emb)
class_list.append(cla)
return emb_list, class_list
def word2id(sen, per_len, wdict):
'''Convert a sequence of word into a sequence of indices'''
if len(sen) > per_len:
sen = sen[:per_len]
w2id_sen = []
for w in sen:
if w not in wdict:
w2id_sen.append(wdict['<unk>'])
else:
w2id_sen.append(wdict[w])
return w2id_sen
|
[
"codecs.open",
"json.loads"
] |
[((108, 149), 'codecs.open', 'codecs.open', (['(root + path)'], {'encoding': '"""utf8"""'}), "(root + path, encoding='utf8')\n", (119, 149), False, 'import json, codecs\n'), ((169, 182), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (179, 182), False, 'import json, codecs\n'), ((363, 422), 'codecs.open', 'codecs.open', (["(root + 'train/relations.json')"], {'encoding': '"""utf8"""'}), "(root + 'train/relations.json', encoding='utf8')\n", (374, 422), False, 'import json, codecs\n'), ((455, 468), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (465, 468), False, 'import json, codecs\n')]
|
from dataclasses import dataclass, field
from uuid import UUID
from dataclasses_json import config, dataclass_json
@dataclass_json
@dataclass
class Owner:
id: UUID = field(metadata=config(field_name="uuid"))
username: str
name: str
lastname: str
logo: str
kyc: bool
bio: str
|
[
"dataclasses_json.config"
] |
[((188, 213), 'dataclasses_json.config', 'config', ([], {'field_name': '"""uuid"""'}), "(field_name='uuid')\n", (194, 213), False, 'from dataclasses_json import config, dataclass_json\n')]
|
import icalendar
import morepath
import sedate
from collections import OrderedDict, namedtuple
from datetime import datetime, timedelta
from isodate import parse_date, ISO8601Error
from itertools import groupby
from morepath.request import Response
from onegov.core.security import Public, Private
from onegov.core.utils import module_path
from onegov.core.orm import as_selectable_from_path
from onegov.form import FormSubmission
from onegov.org.cli import close_ticket
from onegov.reservation import ResourceCollection, Resource, Reservation
from onegov.org import _, OrgApp, utils
from onegov.org.elements import Link
from onegov.org.forms import (
ResourceForm, ResourceCleanupForm, ResourceExportForm
)
from onegov.org.layout import ResourcesLayout, ResourceLayout
from onegov.org.models.resource import DaypassResource, RoomResource, \
ItemResource
from onegov.org.utils import group_by_column, keywords_first
from onegov.ticket import Ticket, TicketCollection
from purl import URL
from sedate import utcnow, standardize_date
from sqlalchemy import and_, select
from sqlalchemy.orm import object_session
from webob import exc
RESOURCE_TYPES = {
'daypass': {
'success': _("Added a new daypass"),
'title': _("New daypass"),
'class': DaypassResource
},
'room': {
'success': _("Added a new room"),
'title': _("New room"),
'class': RoomResource
},
'daily-item': {
'success': _("Added a new item"),
'title': _("New Item"),
'class': ItemResource
}
}
def get_daypass_form(self, request):
return get_resource_form(self, request, 'daypass')
def get_room_form(self, request):
return get_resource_form(self, request, 'room')
def get_item_form(self, request):
return get_resource_form(self, request, 'daily-item')
def get_resource_form(self, request, type=None):
if isinstance(self, ResourceCollection):
assert type is not None
model = RESOURCE_TYPES[type]['class']()
else:
model = self
return model.with_content_extensions(ResourceForm, request)
@OrgApp.html(model=ResourceCollection, template='resources.pt',
permission=Public)
def view_resources(self, request, layout=None):
return {
'title': _("Reservations"),
'resources': group_by_column(
request=request,
query=self.query(),
group_column=Resource.group,
sort_column=Resource.title
),
'layout': layout or ResourcesLayout(self, request)
}
@OrgApp.json(model=ResourceCollection, permission=Public, name='json')
def view_resources_json(self, request):
def transform(resource):
return {
'name': resource.name,
'title': resource.title,
'url': request.link(resource),
}
@request.after
def cache(response):
# only update once every minute
response.cache_control.max_age = 60
return group_by_column(
request=request,
query=self.query(),
group_column=Resource.group,
sort_column=Resource.title,
transform=transform,
default_group=request.translate(_("Reservations"))
)
@OrgApp.form(model=ResourceCollection, name='new-room',
template='form.pt', permission=Private, form=get_room_form)
def handle_new_room(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'room', layout)
@OrgApp.form(model=ResourceCollection, name='new-daypass',
template='form.pt', permission=Private, form=get_daypass_form)
def handle_new_daypass(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'daypass', layout)
@OrgApp.form(model=ResourceCollection, name='new-daily-item',
template='form.pt', permission=Private, form=get_item_form)
def handle_new_resource_item(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'daily-item', layout)
def handle_new_resource(self, request, form, type, layout=None):
if form.submitted(request):
resource = self.add(
title=form.title.data, type=type, timezone='Europe/Zurich'
)
form.populate_obj(resource)
request.success(RESOURCE_TYPES[type]['success'])
return morepath.redirect(request.link(resource))
layout = layout or ResourcesLayout(self, request)
layout.include_editor()
layout.include_code_editor()
layout.breadcrumbs.append(Link(RESOURCE_TYPES[type]['title'], '#'))
return {
'layout': layout,
'title': _(RESOURCE_TYPES[type]['title']),
'form': form,
'form_width': 'large'
}
@OrgApp.form(model=Resource, name='edit', template='form.pt',
permission=Private, form=get_resource_form)
def handle_edit_resource(self, request, form, layout=None):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return morepath.redirect(request.link(self))
elif not request.POST:
form.process(obj=self)
layout = layout or ResourceLayout(self, request)
layout.include_editor()
layout.include_code_editor()
layout.breadcrumbs.append(Link(_("Edit"), '#'))
return {
'layout': layout,
'title': self.title,
'form': form,
'form_width': 'large'
}
@OrgApp.html(model=Resource, template='resource.pt', permission=Public)
def view_resource(self, request, layout=None):
return {
'title': self.title,
'resource': self,
'layout': layout or ResourceLayout(self, request),
'feed': request.link(self, name='slots'),
'resources_url': request.class_link(ResourceCollection, name='json')
}
@OrgApp.view(model=Resource, request_method='DELETE', permission=Private)
def handle_delete_resource(self, request):
request.assert_valid_csrf_token()
if not self.deletable:
raise exc.HTTPMethodNotAllowed()
tickets = TicketCollection(request.session)
def handle_reservation_tickets(reservation):
ticket = tickets.by_handler_id(reservation.token.hex)
if ticket:
close_ticket(ticket, request.current_user, request)
ticket.create_snapshot(request)
collection = ResourceCollection(request.app.libres_context)
collection.delete(
self,
including_reservations=True,
handle_reservation=handle_reservation_tickets
)
@OrgApp.form(model=Resource, permission=Private, name='cleanup',
form=ResourceCleanupForm, template='resource_cleanup.pt')
def handle_cleanup_allocations(self, request, form, layout=None):
""" Removes all unused allocations between the given dates. """
if form.submitted(request):
start, end = form.data['start'], form.data['end']
count = self.scheduler.remove_unused_allocations(start, end)
request.success(
_("Successfully removed ${count} unused allocations", mapping={
'count': count
})
)
return morepath.redirect(request.link(self))
if request.method == 'GET':
form.start.data, form.end.data = get_date_range(self, request.params)
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Clean up"), '#'))
layout.editbar_links = None
return {
'layout': layout,
'title': _("Clean up"),
'form': form
}
def predict_next_reservation(resource, request, reservations):
prediction = utils.predict_next_daterange(
tuple((r.display_start(), r.display_end()) for r in reservations)
)
if not prediction:
return None
allocation = resource.scheduler.allocations_in_range(*prediction).first()
if not allocation:
return None
whole_day = sedate.is_whole_day(*prediction, timezone=resource.timezone)
quota = utils.predict_next_value(tuple(r.quota for r in reservations)) or 1
if whole_day:
time = request.translate(_("Whole day"))
else:
time = utils.render_time_range(*prediction)
return {
'url': request.link(allocation, name='reserve'),
'start': prediction[0].isoformat(),
'end': prediction[1].isoformat(),
'quota': quota,
'wholeDay': whole_day,
'time': time
}
@OrgApp.json(model=Resource, name='reservations', permission=Public)
def get_reservations(self, request):
reservations = tuple(self.bound_reservations(request))
prediction = predict_next_reservation(self, request, reservations)
return {
'reservations': [
utils.ReservationInfo(self, reservation, request).as_dict()
for reservation in reservations
],
'prediction': prediction
}
def get_date(text, default):
try:
date = parse_date(text)
return datetime(date.year, date.month, date.day, tzinfo=default.tzinfo)
except (ISO8601Error, TypeError):
return default
def get_date_range(resource, params):
default_start, default_end = resource.calendar_date_range
start = get_date(params.get('start'), default_start)
end = get_date(params.get('end'), default_end)
start = sedate.replace_timezone(
datetime(start.year, start.month, start.day), resource.timezone)
end = sedate.replace_timezone(
datetime(end.year, end.month, end.day), resource.timezone)
if end < start:
start = end
return sedate.align_range_to_day(start, end, resource.timezone)
@OrgApp.html(model=Resource, permission=Private, name='occupancy',
template='resource_occupancy.pt')
def view_occupancy(self, request, layout=None):
# infer the default start/end date from the calendar view parameters
start, end = get_date_range(self, request.params)
query = self.reservations_with_tickets_query(start, end)
query = query.with_entities(
Reservation.start, Reservation.end, Reservation.quota,
Ticket.subtitle, Ticket.id
)
def group_key(record):
return sedate.to_timezone(record[0], self.timezone).date()
occupancy = OrderedDict()
grouped = groupby(query.all(), group_key)
Entry = namedtuple('Entry', ('start', 'end', 'title', 'quota', 'url'))
count = 0
for date, records in grouped:
occupancy[date] = tuple(
Entry(
start=sedate.to_timezone(r[0], self.timezone),
end=sedate.to_timezone(
r[1] + timedelta(microseconds=1), self.timezone),
quota=r[2],
title=r[3],
url=request.class_link(Ticket, {
'handler_code': 'RSV',
'id': r[4]
})
) for r in records
)
count += len(occupancy[date])
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Occupancy"), '#'))
layout.editbar_links = None
utilisation = 100 - self.scheduler.queries.availability_by_range(
start, end, (self.id, )
)
return {
'layout': layout,
'title': _("Occupancy"),
'occupancy': occupancy,
'resource': self,
'start': sedate.to_timezone(start, self.timezone).date(),
'end': sedate.to_timezone(end, self.timezone).date(),
'count': count,
'utilisation': utilisation
}
@OrgApp.html(model=Resource, template='resource-subscribe.pt',
permission=Private, name='subscribe')
def view_resource_subscribe(self, request, layout=None):
url = URL(request.link(self, 'ical'))
url = url.scheme('webcal')
if url.has_query_param('view'):
url = url.remove_query_param('view')
url = url.query_param('access-token', self.access_token)
url = url.as_string()
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Subscribe"), '#'))
return {
'title': self.title,
'resource': self,
'layout': layout,
'url': url
}
@OrgApp.view(model=Resource, permission=Public, name='ical')
def view_ical(self, request):
assert self.access_token is not None
if request.params.get('access-token') != self.access_token:
raise exc.HTTPForbidden()
s = utcnow() - timedelta(days=30)
e = utcnow() + timedelta(days=30 * 12)
cal = icalendar.Calendar()
cal.add('prodid', '-//OneGov//onegov.org//')
cal.add('version', '2.0')
cal.add('method', 'PUBLISH')
cal.add('x-wr-calname', self.title)
cal.add('x-wr-relcalid', self.id.hex)
# refresh every 120 minutes by default (Outlook and maybe others)
cal.add('x-published-ttl', 'PT120M')
# add allocations/reservations
date = utcnow()
path = module_path('onegov.org', 'queries/resource-ical.sql')
stmt = as_selectable_from_path(path)
records = object_session(self).execute(select(stmt.c).where(and_(
stmt.c.resource == self.id, s <= stmt.c.start, stmt.c.start <= e
)))
for r in records:
start = r.start
end = r.end + timedelta(microseconds=1)
evt = icalendar.Event()
evt.add('uid', r.token)
evt.add('summary', r.title)
evt.add('location', self.title)
evt.add('description', r.description)
evt.add('dtstart', standardize_date(start, 'UTC'))
evt.add('dtend', standardize_date(end, 'UTC'))
evt.add('dtstamp', date)
evt.add('url', request.class_link(Ticket, {
'handler_code': r.handler_code,
'id': r.ticket_id
}))
cal.add_component(evt)
return Response(
cal.to_ical(),
content_type='text/calendar',
content_disposition=f'inline; filename={self.name}.ics'
)
@OrgApp.form(model=Resource, permission=Private, name='export',
template='export.pt', form=ResourceExportForm)
def view_export(self, request, form, layout=None):
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Occupancy"), '#'))
layout.editbar_links = None
# XXX this could be turned into a redirect to a GET view, which would
# make it easier for scripts to get this data, but since we don't have
# a good API story anyway we don't have spend to much energy on it here
# - instead we should do this in a comprehensive fashion
if form.submitted(request):
field_order, results = run_export(
resource=self,
start=form.data['start'],
end=form.data['end'],
nested=form.format == 'json',
formatter=layout.export_formatter(form.format)
)
return form.as_export_response(results, self.title, key=field_order)
if request.method == 'GET':
form.start.data, form.end.data = get_date_range(self, request.params)
return {
'layout': layout,
'title': _("Export"),
'form': form,
'explanation': _("Exports the reservations of the given date range.")
}
def run_export(resource, start, end, nested, formatter):
start = sedate.replace_timezone(
datetime(start.year, start.month, start.day),
resource.timezone
)
end = sedate.replace_timezone(
datetime(end.year, end.month, end.day),
resource.timezone
)
start, end = sedate.align_range_to_day(start, end, resource.timezone)
query = resource.reservations_with_tickets_query(start, end)
query = query.join(FormSubmission, Reservation.token == FormSubmission.id)
query = query.with_entities(
Reservation.start,
Reservation.end,
Reservation.quota,
Reservation.email,
Ticket.number,
Ticket.subtitle,
FormSubmission.data,
)
results = []
keywords = ('start', 'end', 'quota', 'email', 'ticket', 'title')
for record in query:
result = OrderedDict()
start = sedate.to_timezone(record[0], resource.timezone)
end = sedate.to_timezone(record[1], resource.timezone)
end += timedelta(microseconds=1)
result['start'] = formatter(start)
result['end'] = formatter(end)
result['quota'] = formatter(record[2])
result['email'] = formatter(record[3])
result['ticket'] = formatter(record[4])
result['title'] = formatter(record[5])
if nested:
result['form'] = {
k: formatter(v)
for k, v in record[6].items()
}
else:
for key, value in record[6].items():
result['form_' + key] = formatter(value)
results.append(result)
return keywords_first(keywords), results
|
[
"onegov.org._",
"icalendar.Event",
"webob.exc.HTTPMethodNotAllowed",
"onegov.org.OrgApp.form",
"sqlalchemy.orm.object_session",
"onegov.ticket.TicketCollection",
"onegov.reservation.ResourceCollection",
"sqlalchemy.select",
"datetime.timedelta",
"onegov.org.utils.render_time_range",
"onegov.org.OrgApp.json",
"onegov.org.OrgApp.view",
"onegov.org.utils.keywords_first",
"sqlalchemy.and_",
"onegov.org.layout.ResourcesLayout",
"onegov.org.cli.close_ticket",
"onegov.core.utils.module_path",
"datetime.datetime",
"sedate.utcnow",
"onegov.org.layout.ResourceLayout",
"sedate.align_range_to_day",
"onegov.core.orm.as_selectable_from_path",
"icalendar.Calendar",
"onegov.org.elements.Link",
"webob.exc.HTTPForbidden",
"sedate.to_timezone",
"isodate.parse_date",
"sedate.is_whole_day",
"collections.namedtuple",
"sedate.standardize_date",
"collections.OrderedDict",
"onegov.org.utils.ReservationInfo",
"onegov.org.OrgApp.html"
] |
[((2107, 2193), 'onegov.org.OrgApp.html', 'OrgApp.html', ([], {'model': 'ResourceCollection', 'template': '"""resources.pt"""', 'permission': 'Public'}), "(model=ResourceCollection, template='resources.pt', permission=\n Public)\n", (2118, 2193), False, 'from onegov.org import _, OrgApp, utils\n'), ((2557, 2626), 'onegov.org.OrgApp.json', 'OrgApp.json', ([], {'model': 'ResourceCollection', 'permission': 'Public', 'name': '"""json"""'}), "(model=ResourceCollection, permission=Public, name='json')\n", (2568, 2626), False, 'from onegov.org import _, OrgApp, utils\n'), ((3220, 3338), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'ResourceCollection', 'name': '"""new-room"""', 'template': '"""form.pt"""', 'permission': 'Private', 'form': 'get_room_form'}), "(model=ResourceCollection, name='new-room', template='form.pt',\n permission=Private, form=get_room_form)\n", (3231, 3338), False, 'from onegov.org import _, OrgApp, utils\n'), ((3474, 3599), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'ResourceCollection', 'name': '"""new-daypass"""', 'template': '"""form.pt"""', 'permission': 'Private', 'form': 'get_daypass_form'}), "(model=ResourceCollection, name='new-daypass', template=\n 'form.pt', permission=Private, form=get_daypass_form)\n", (3485, 3599), False, 'from onegov.org import _, OrgApp, utils\n'), ((3740, 3865), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'ResourceCollection', 'name': '"""new-daily-item"""', 'template': '"""form.pt"""', 'permission': 'Private', 'form': 'get_item_form'}), "(model=ResourceCollection, name='new-daily-item', template=\n 'form.pt', permission=Private, form=get_item_form)\n", (3751, 3865), False, 'from onegov.org import _, OrgApp, utils\n'), ((4713, 4822), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'Resource', 'name': '"""edit"""', 'template': '"""form.pt"""', 'permission': 'Private', 'form': 'get_resource_form'}), "(model=Resource, name='edit', template='form.pt', permission=\n Private, form=get_resource_form)\n", (4724, 4822), False, 'from onegov.org import _, OrgApp, utils\n'), ((5419, 5489), 'onegov.org.OrgApp.html', 'OrgApp.html', ([], {'model': 'Resource', 'template': '"""resource.pt"""', 'permission': 'Public'}), "(model=Resource, template='resource.pt', permission=Public)\n", (5430, 5489), False, 'from onegov.org import _, OrgApp, utils\n'), ((5800, 5872), 'onegov.org.OrgApp.view', 'OrgApp.view', ([], {'model': 'Resource', 'request_method': '"""DELETE"""', 'permission': 'Private'}), "(model=Resource, request_method='DELETE', permission=Private)\n", (5811, 5872), False, 'from onegov.org import _, OrgApp, utils\n'), ((6514, 6640), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'Resource', 'permission': 'Private', 'name': '"""cleanup"""', 'form': 'ResourceCleanupForm', 'template': '"""resource_cleanup.pt"""'}), "(model=Resource, permission=Private, name='cleanup', form=\n ResourceCleanupForm, template='resource_cleanup.pt')\n", (6525, 6640), False, 'from onegov.org import _, OrgApp, utils\n'), ((8397, 8464), 'onegov.org.OrgApp.json', 'OrgApp.json', ([], {'model': 'Resource', 'name': '"""reservations"""', 'permission': 'Public'}), "(model=Resource, name='reservations', permission=Public)\n", (8408, 8464), False, 'from onegov.org import _, OrgApp, utils\n'), ((9590, 9694), 'onegov.org.OrgApp.html', 'OrgApp.html', ([], {'model': 'Resource', 'permission': 'Private', 'name': '"""occupancy"""', 'template': '"""resource_occupancy.pt"""'}), "(model=Resource, permission=Private, name='occupancy', template=\n 'resource_occupancy.pt')\n", (9601, 9694), False, 'from onegov.org import _, OrgApp, utils\n'), ((11455, 11559), 'onegov.org.OrgApp.html', 'OrgApp.html', ([], {'model': 'Resource', 'template': '"""resource-subscribe.pt"""', 'permission': 'Private', 'name': '"""subscribe"""'}), "(model=Resource, template='resource-subscribe.pt', permission=\n Private, name='subscribe')\n", (11466, 11559), False, 'from onegov.org import _, OrgApp, utils\n'), ((12102, 12161), 'onegov.org.OrgApp.view', 'OrgApp.view', ([], {'model': 'Resource', 'permission': 'Public', 'name': '"""ical"""'}), "(model=Resource, permission=Public, name='ical')\n", (12113, 12161), False, 'from onegov.org import _, OrgApp, utils\n'), ((13823, 13937), 'onegov.org.OrgApp.form', 'OrgApp.form', ([], {'model': 'Resource', 'permission': 'Private', 'name': '"""export"""', 'template': '"""export.pt"""', 'form': 'ResourceExportForm'}), "(model=Resource, permission=Private, name='export', template=\n 'export.pt', form=ResourceExportForm)\n", (13834, 13937), False, 'from onegov.org import _, OrgApp, utils\n'), ((6039, 6072), 'onegov.ticket.TicketCollection', 'TicketCollection', (['request.session'], {}), '(request.session)\n', (6055, 6072), False, 'from onegov.ticket import Ticket, TicketCollection\n'), ((6330, 6376), 'onegov.reservation.ResourceCollection', 'ResourceCollection', (['request.app.libres_context'], {}), '(request.app.libres_context)\n', (6348, 6376), False, 'from onegov.reservation import ResourceCollection, Resource, Reservation\n'), ((7884, 7944), 'sedate.is_whole_day', 'sedate.is_whole_day', (['*prediction'], {'timezone': 'resource.timezone'}), '(*prediction, timezone=resource.timezone)\n', (7903, 7944), False, 'import sedate\n'), ((9530, 9586), 'sedate.align_range_to_day', 'sedate.align_range_to_day', (['start', 'end', 'resource.timezone'], {}), '(start, end, resource.timezone)\n', (9555, 9586), False, 'import sedate\n'), ((10190, 10203), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10201, 10203), False, 'from collections import OrderedDict, namedtuple\n'), ((10262, 10324), 'collections.namedtuple', 'namedtuple', (['"""Entry"""', "('start', 'end', 'title', 'quota', 'url')"], {}), "('Entry', ('start', 'end', 'title', 'quota', 'url'))\n", (10272, 10324), False, 'from collections import OrderedDict, namedtuple\n'), ((12425, 12445), 'icalendar.Calendar', 'icalendar.Calendar', ([], {}), '()\n', (12443, 12445), False, 'import icalendar\n'), ((12800, 12808), 'sedate.utcnow', 'utcnow', ([], {}), '()\n', (12806, 12808), False, 'from sedate import utcnow, standardize_date\n'), ((12820, 12874), 'onegov.core.utils.module_path', 'module_path', (['"""onegov.org"""', '"""queries/resource-ical.sql"""'], {}), "('onegov.org', 'queries/resource-ical.sql')\n", (12831, 12874), False, 'from onegov.core.utils import module_path\n'), ((12886, 12915), 'onegov.core.orm.as_selectable_from_path', 'as_selectable_from_path', (['path'], {}), '(path)\n', (12909, 12915), False, 'from onegov.core.orm import as_selectable_from_path\n'), ((15392, 15448), 'sedate.align_range_to_day', 'sedate.align_range_to_day', (['start', 'end', 'resource.timezone'], {}), '(start, end, resource.timezone)\n', (15417, 15448), False, 'import sedate\n'), ((1198, 1222), 'onegov.org._', '_', (['"""Added a new daypass"""'], {}), "('Added a new daypass')\n", (1199, 1222), False, 'from onegov.org import _, OrgApp, utils\n'), ((1241, 1257), 'onegov.org._', '_', (['"""New daypass"""'], {}), "('New daypass')\n", (1242, 1257), False, 'from onegov.org import _, OrgApp, utils\n'), ((1332, 1353), 'onegov.org._', '_', (['"""Added a new room"""'], {}), "('Added a new room')\n", (1333, 1353), False, 'from onegov.org import _, OrgApp, utils\n'), ((1372, 1385), 'onegov.org._', '_', (['"""New room"""'], {}), "('New room')\n", (1373, 1385), False, 'from onegov.org import _, OrgApp, utils\n'), ((1463, 1484), 'onegov.org._', '_', (['"""Added a new item"""'], {}), "('Added a new item')\n", (1464, 1484), False, 'from onegov.org import _, OrgApp, utils\n'), ((1503, 1516), 'onegov.org._', '_', (['"""New Item"""'], {}), "('New Item')\n", (1504, 1516), False, 'from onegov.org import _, OrgApp, utils\n'), ((2280, 2297), 'onegov.org._', '_', (['"""Reservations"""'], {}), "('Reservations')\n", (2281, 2297), False, 'from onegov.org import _, OrgApp, utils\n'), ((4397, 4427), 'onegov.org.layout.ResourcesLayout', 'ResourcesLayout', (['self', 'request'], {}), '(self, request)\n', (4412, 4427), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((4519, 4559), 'onegov.org.elements.Link', 'Link', (["RESOURCE_TYPES[type]['title']", '"""#"""'], {}), "(RESOURCE_TYPES[type]['title'], '#')\n", (4523, 4559), False, 'from onegov.org.elements import Link\n'), ((4618, 4650), 'onegov.org._', '_', (["RESOURCE_TYPES[type]['title']"], {}), "(RESOURCE_TYPES[type]['title'])\n", (4619, 4650), False, 'from onegov.org import _, OrgApp, utils\n'), ((5146, 5175), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (5160, 5175), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((5997, 6023), 'webob.exc.HTTPMethodNotAllowed', 'exc.HTTPMethodNotAllowed', ([], {}), '()\n', (6021, 6023), False, 'from webob import exc\n'), ((7290, 7319), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (7304, 7319), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((7465, 7478), 'onegov.org._', '_', (['"""Clean up"""'], {}), "('Clean up')\n", (7466, 7478), False, 'from onegov.org import _, OrgApp, utils\n'), ((8118, 8154), 'onegov.org.utils.render_time_range', 'utils.render_time_range', (['*prediction'], {}), '(*prediction)\n', (8141, 8154), False, 'from onegov.org import _, OrgApp, utils\n'), ((8894, 8910), 'isodate.parse_date', 'parse_date', (['text'], {}), '(text)\n', (8904, 8910), False, 'from isodate import parse_date, ISO8601Error\n'), ((8926, 8990), 'datetime.datetime', 'datetime', (['date.year', 'date.month', 'date.day'], {'tzinfo': 'default.tzinfo'}), '(date.year, date.month, date.day, tzinfo=default.tzinfo)\n', (8934, 8990), False, 'from datetime import datetime, timedelta\n'), ((9309, 9353), 'datetime.datetime', 'datetime', (['start.year', 'start.month', 'start.day'], {}), '(start.year, start.month, start.day)\n', (9317, 9353), False, 'from datetime import datetime, timedelta\n'), ((9418, 9456), 'datetime.datetime', 'datetime', (['end.year', 'end.month', 'end.day'], {}), '(end.year, end.month, end.day)\n', (9426, 9456), False, 'from datetime import datetime, timedelta\n'), ((10900, 10929), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (10914, 10929), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((11185, 11199), 'onegov.org._', '_', (['"""Occupancy"""'], {}), "('Occupancy')\n", (11186, 11199), False, 'from onegov.org import _, OrgApp, utils\n'), ((11892, 11921), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (11906, 11921), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((12312, 12331), 'webob.exc.HTTPForbidden', 'exc.HTTPForbidden', ([], {}), '()\n', (12329, 12331), False, 'from webob import exc\n'), ((12341, 12349), 'sedate.utcnow', 'utcnow', ([], {}), '()\n', (12347, 12349), False, 'from sedate import utcnow, standardize_date\n'), ((12352, 12370), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (12361, 12370), False, 'from datetime import datetime, timedelta\n'), ((12379, 12387), 'sedate.utcnow', 'utcnow', ([], {}), '()\n', (12385, 12387), False, 'from sedate import utcnow, standardize_date\n'), ((12390, 12413), 'datetime.timedelta', 'timedelta', ([], {'days': '(30 * 12)'}), '(days=30 * 12)\n', (12399, 12413), False, 'from datetime import datetime, timedelta\n'), ((13178, 13195), 'icalendar.Event', 'icalendar.Event', ([], {}), '()\n', (13193, 13195), False, 'import icalendar\n'), ((14021, 14050), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (14035, 14050), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((14958, 14969), 'onegov.org._', '_', (['"""Export"""'], {}), "('Export')\n", (14959, 14969), False, 'from onegov.org import _, OrgApp, utils\n'), ((15016, 15070), 'onegov.org._', '_', (['"""Exports the reservations of the given date range."""'], {}), "('Exports the reservations of the given date range.')\n", (15017, 15070), False, 'from onegov.org import _, OrgApp, utils\n'), ((15181, 15225), 'datetime.datetime', 'datetime', (['start.year', 'start.month', 'start.day'], {}), '(start.year, start.month, start.day)\n', (15189, 15225), False, 'from datetime import datetime, timedelta\n'), ((15302, 15340), 'datetime.datetime', 'datetime', (['end.year', 'end.month', 'end.day'], {}), '(end.year, end.month, end.day)\n', (15310, 15340), False, 'from datetime import datetime, timedelta\n'), ((15946, 15959), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15957, 15959), False, 'from collections import OrderedDict, namedtuple\n'), ((15977, 16025), 'sedate.to_timezone', 'sedate.to_timezone', (['record[0]', 'resource.timezone'], {}), '(record[0], resource.timezone)\n', (15995, 16025), False, 'import sedate\n'), ((16040, 16088), 'sedate.to_timezone', 'sedate.to_timezone', (['record[1]', 'resource.timezone'], {}), '(record[1], resource.timezone)\n', (16058, 16088), False, 'import sedate\n'), ((16104, 16129), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (16113, 16129), False, 'from datetime import datetime, timedelta\n'), ((16709, 16733), 'onegov.org.utils.keywords_first', 'keywords_first', (['keywords'], {}), '(keywords)\n', (16723, 16733), False, 'from onegov.org.utils import group_by_column, keywords_first\n'), ((2517, 2547), 'onegov.org.layout.ResourcesLayout', 'ResourcesLayout', (['self', 'request'], {}), '(self, request)\n', (2532, 2547), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((4980, 5008), 'onegov.org._', '_', (['"""Your changes were saved"""'], {}), "('Your changes were saved')\n", (4981, 5008), False, 'from onegov.org import _, OrgApp, utils\n'), ((5272, 5281), 'onegov.org._', '_', (['"""Edit"""'], {}), "('Edit')\n", (5273, 5281), False, 'from onegov.org import _, OrgApp, utils\n'), ((5633, 5662), 'onegov.org.layout.ResourceLayout', 'ResourceLayout', (['self', 'request'], {}), '(self, request)\n', (5647, 5662), False, 'from onegov.org.layout import ResourcesLayout, ResourceLayout\n'), ((6216, 6267), 'onegov.org.cli.close_ticket', 'close_ticket', (['ticket', 'request.current_user', 'request'], {}), '(ticket, request.current_user, request)\n', (6228, 6267), False, 'from onegov.org.cli import close_ticket\n'), ((6981, 7060), 'onegov.org._', '_', (['"""Successfully removed ${count} unused allocations"""'], {'mapping': "{'count': count}"}), "('Successfully removed ${count} unused allocations', mapping={'count': count})\n", (6982, 7060), False, 'from onegov.org import _, OrgApp, utils\n'), ((7355, 7368), 'onegov.org._', '_', (['"""Clean up"""'], {}), "('Clean up')\n", (7356, 7368), False, 'from onegov.org import _, OrgApp, utils\n'), ((8077, 8091), 'onegov.org._', '_', (['"""Whole day"""'], {}), "('Whole day')\n", (8078, 8091), False, 'from onegov.org import _, OrgApp, utils\n'), ((10965, 10979), 'onegov.org._', '_', (['"""Occupancy"""'], {}), "('Occupancy')\n", (10966, 10979), False, 'from onegov.org import _, OrgApp, utils\n'), ((11957, 11971), 'onegov.org._', '_', (['"""Subscribe"""'], {}), "('Subscribe')\n", (11958, 11971), False, 'from onegov.org import _, OrgApp, utils\n'), ((12931, 12951), 'sqlalchemy.orm.object_session', 'object_session', (['self'], {}), '(self)\n', (12945, 12951), False, 'from sqlalchemy.orm import object_session\n'), ((12981, 13051), 'sqlalchemy.and_', 'and_', (['(stmt.c.resource == self.id)', '(s <= stmt.c.start)', '(stmt.c.start <= e)'], {}), '(stmt.c.resource == self.id, s <= stmt.c.start, stmt.c.start <= e)\n', (12985, 13051), False, 'from sqlalchemy import and_, select\n'), ((13137, 13162), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (13146, 13162), False, 'from datetime import datetime, timedelta\n'), ((13377, 13407), 'sedate.standardize_date', 'standardize_date', (['start', '"""UTC"""'], {}), "(start, 'UTC')\n", (13393, 13407), False, 'from sedate import utcnow, standardize_date\n'), ((13434, 13462), 'sedate.standardize_date', 'standardize_date', (['end', '"""UTC"""'], {}), "(end, 'UTC')\n", (13450, 13462), False, 'from sedate import utcnow, standardize_date\n'), ((14086, 14100), 'onegov.org._', '_', (['"""Occupancy"""'], {}), "('Occupancy')\n", (14087, 14100), False, 'from onegov.org import _, OrgApp, utils\n'), ((3192, 3209), 'onegov.org._', '_', (['"""Reservations"""'], {}), "('Reservations')\n", (3193, 3209), False, 'from onegov.org import _, OrgApp, utils\n'), ((10121, 10165), 'sedate.to_timezone', 'sedate.to_timezone', (['record[0]', 'self.timezone'], {}), '(record[0], self.timezone)\n', (10139, 10165), False, 'import sedate\n'), ((11276, 11316), 'sedate.to_timezone', 'sedate.to_timezone', (['start', 'self.timezone'], {}), '(start, self.timezone)\n', (11294, 11316), False, 'import sedate\n'), ((11340, 11378), 'sedate.to_timezone', 'sedate.to_timezone', (['end', 'self.timezone'], {}), '(end, self.timezone)\n', (11358, 11378), False, 'import sedate\n'), ((12960, 12974), 'sqlalchemy.select', 'select', (['stmt.c'], {}), '(stmt.c)\n', (12966, 12974), False, 'from sqlalchemy import and_, select\n'), ((8685, 8734), 'onegov.org.utils.ReservationInfo', 'utils.ReservationInfo', (['self', 'reservation', 'request'], {}), '(self, reservation, request)\n', (8706, 8734), False, 'from onegov.org import _, OrgApp, utils\n'), ((10448, 10487), 'sedate.to_timezone', 'sedate.to_timezone', (['r[0]', 'self.timezone'], {}), '(r[0], self.timezone)\n', (10466, 10487), False, 'import sedate\n'), ((10556, 10581), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (10565, 10581), False, 'from datetime import datetime, timedelta\n')]
|
# -*- coding: utf-8 -*-
'''
训练材料预处理,保存成单个脸部图片
只有当你的训练材料中有很多多人照片才这么做
请将训练材料放在Prescreen文件夹中。
结构要求:
-+-PersonA-+-1.jpg
| +-2.jpg
| +-...
|
+-PersonB-+-1.jpg
| +-2.jpg
| +-...
|
+-PersonC-+-1.jpg
+-2.jpg
+-...
'''
from PIL import Image
import psutil
import face_recognition
import os
from datetime import datetime
import threading
import time
SEE_ALL_FACES=False
WINDOWS=os.sep=="\\"
SS=os.sep
ERROR_INFO=""
def TIS(n):
time.sleep(n)
#获取扩展名
def __fex(path):
ex=os.path.splitext(path)[1]
return ex[1:]
#重命名
def __renameFile():
dir = ".{0}Prescreen".format(SS)#./Prescreen目录
folder = (os.listdir(dir))#显示预筛选文件夹下的人物文件夹#./Prescreen/*
for person in folder:#./Prescreen/person
personDir = ".{0}Prescreen{1}".format(SS,SS)+person
pic = (os.listdir(personDir))#./Prescreen/person/*
for file in pic:#./Prescreen/person/xxx.jpg
time=datetime.now()
srcFile = dir + SS + person + SS + file#./Prescreen/person/xxx.jpg
#dst=./Prescreen/{人名}/{时间后面的秒数}.{扩展名}
TI=str(time)[17:]
dstFile = dir + SS + "{0}{1}{2}.{3}".format(person,SS,TI.replace(" ",""),__fex(srcFile))
try:
os.rename(srcFile,dstFile)#重命名
except Exception as e:
print(e)
else:
pass
pic = (os.listdir(personDir))#./Prescreen/person/*
n = 1
for file in pic:#./Prescreen/person/file
srcFile = personDir + SS + file#./Prescreen/person/xxx.jpg
#dst=./Prescreen/person/{n}.{ext}
dstFile = personDir + SS + "{0}.{1}".format(n,__fex(srcFile))
try:
os.rename(srcFile,dstFile)
except Exception as e:
print(e)
else:
pass
n += 1
#Find faces in pictures
def __checkFaces(file,person):
global ERROR_INFO
try:
# Load the jpg file into a numpy array
inputPic = file
image = face_recognition.load_image_file(inputPic)
# Find all the faces in the image using the default HOG-based model.
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
# See also: find_faces_in_picture_cnn.py
#CNN:
#face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model="cnn")
face_locations = face_recognition.face_locations(image)
faceNum = len(face_locations)
print("Found \033[1;33;40m{0}\033[0m: face(s) in \033[1;35;40m{1}\033[0m: photograph.".format(faceNum ,file), end = " ==> ")
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
if (top-200<0):
if (top-150<0):
if (top-100<0):
T=top
else:
T=top-100
else:
T=top-150
else:
T=top-200
B=bottom+100
if (left-100<0):
L=left
else:
L=left-100
R=right+100
TIS(0.2)
print(T,B,L,R)
face_image = image[T:B, L:R]
pil_image = Image.fromarray(face_image)
if(SEE_ALL_FACES):
pil_image.show()
time=str(datetime.now())[17:]
#.{/}Prescreen{/}{person}{/}FRS{time}
pil_image.save(".{0}Prescreen{1}{2}{3}FRS{4}.{5}".format(SS,SS,person,SS,time,__fex(file)))
except Exception as e:
ERROR_INFO="{0}\n{1}".format(ERROR_INFO,e)
print("\033[1;32;41m{0}\033[0m".format(e))
raise e
return faceNum
#MainX
def filePrescreen():
print("Prescreen Start......\n")
__renameFile()
dir = ".{0}Prescreen".format(SS)
folder = (os.listdir(dir))#./Prescreen/*
for person in folder:#./Prescreen/person/
personDir = ".{0}Prescreen{1}".format(SS,SS)+person#./Prescreen/person/
pic = (os.listdir(personDir))#./Prescreen/person/*
if (len(pic)>=20) & (not WINDOWS):#./Prescreen/person/下的图片20张以上
taskNum = int(len(pic)/4)
taskLef = len(pic)%4
# 创建新线程
thread1 = __TaskSubmit("1", pic[0:taskNum],person)
thread2 = __TaskSubmit("2", pic[taskNum:taskNum*2],person)
thread3 = __TaskSubmit("3", pic[taskNum*2:taskNum*3],person)
thread4 = __TaskSubmit("4", pic[taskNum*3:taskNum*4],person)
if taskLef==0:
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
else:
thread5 = __TaskSubmit("5", pic[taskNum*4:taskNum*4+taskLef],person)
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
thread5.join()
else:
for file in pic:#./Prescreen/person/xxx.jpg
time=datetime.now()#获取当前时间
srcFile = personDir + SS + file #"./Prescreen/"+person+ "/" +file
if __checkFaces(srcFile,person)==0:
#./Prescreen/person/rm{time}
dstFile = personDir + SS +"rm{0}".format(str(time)[17:].replace(" ",""))
else:
#./Prescreen/person/1F{时间}.{扩展名}
dstFile = personDir + SS +"1F{0}.{1}".format(str(time)[17:].replace(" ",""),__fex(srcFile))
try:
os.rename(srcFile,dstFile)
except Exception as e:
print(e)
else:
pass
__rmFiles()
def doTask(who,person):
for f in who:#f=(xxx.jpg)
time=datetime.now()#获取当前时间
srcFile = "./Prescreen/{0}/{1}".format(person,f)#./Prescreen/{person}/{xxx.jpg}
if __checkFaces(srcFile,person)==0:
#./Prescreen/person/rm{}{}
dstFile = "./Prescreen/{0}/rm{1}.{2}".format(person,str(time)[17:].replace(" ",""),__fex(srcFile))
else:
dstFile = "./Prescreen/{0}/1F{1}.{2}".format(person,str(time)[17:].replace(" ",""),__fex(srcFile))
try:
os.rename(srcFile,dstFile)
except Exception as e:
print(e)
else:
pass
class __TaskSubmit (threading.Thread):
def __init__(self,id ,who,person):
threading.Thread.__init__(self)
self.id = id
self.who = who
self.person = person
self.result="1"
def run(self):
print ("开始线程:" + self.id + " on stat " + self.result)
doTask(self.who,self.person)
self.result="0"
print ("退出线程:" + self.id + " on stat " + self.result)
def __rmFiles():
print("\nDelete files...")
dir = ".{0}Prescreen".format(SS)
folder = (os.listdir(dir))#显示预筛选文件夹下的人物文件夹#./Prescreen/*
for person in folder:#./Prescreen/person
if WINDOWS:
try:
#del .\Prescreen\{person}\rm*
commandInput = 'del /S /Q .\\Prescreen\\' + person + "\\rm*"
commandImplementation = os.popen(commandInput)
print("Del...")
except Exception as e:
print(e)
try:
#del .\Prescreen\{person}\rm*
commandInput = 'del /S /Q .\\Prescreen\\' + person + "\\1F*"
commandImplementation = os.popen(commandInput)
print("Del...")
except Exception as e:
print(e)
else:
try:
#rm ./Prescreen/{person}/rm*
commandInput = 'rm ./Prescreen/' + person + "/rm*"
commandImplementation = os.popen(commandInput)
except Exception as e:
print("REMOVE FILE ERROR.")
try:
#rm ./Prescreen/{person}/rm*
commandInput = 'rm ./Prescreen/' + person + "/1F*"
commandImplementation = os.popen(commandInput)
except Exception as e:
print("REMOVE FILE ERROR.")
def __killPro(second,pro):
time.sleep(second)
print("展示时间:"+str(second)+"秒")
for proc in psutil.process_iter(): # 遍历当前process
if proc.name() == pro: # 如果process的name是display
proc.kill() # 关闭该process
if __name__ == "__main__":
#True是显示识别出的图像
#SEE_ALL_FACES=True
filePrescreen()
print("\033[1;32;41m{0}\033[0m".format(ERROR_INFO))
print("\n\033[5;31;40m训练材料预处理结束,请进行人工复审。下面如果有报错,请忽略。\033[0m\n")
if SEE_ALL_FACES:
#延时5秒
__killPro(5,"display")
SystemExit()
print("\nFinish.")
|
[
"psutil.process_iter",
"threading.Thread.__init__",
"os.rename",
"os.popen",
"time.sleep",
"os.path.splitext",
"PIL.Image.fromarray",
"face_recognition.face_locations",
"face_recognition.load_image_file",
"datetime.datetime.now",
"os.listdir"
] |
[((526, 539), 'time.sleep', 'time.sleep', (['n'], {}), '(n)\n', (536, 539), False, 'import time\n'), ((696, 711), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (706, 711), False, 'import os\n'), ((3462, 3477), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3472, 3477), False, 'import os\n'), ((6040, 6055), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (6050, 6055), False, 'import os\n'), ((7024, 7042), 'time.sleep', 'time.sleep', (['second'], {}), '(second)\n', (7034, 7042), False, 'import time\n'), ((7088, 7109), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (7107, 7109), False, 'import psutil\n'), ((570, 592), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (586, 592), False, 'import os\n'), ((848, 869), 'os.listdir', 'os.listdir', (['personDir'], {}), '(personDir)\n', (858, 869), False, 'import os\n'), ((1294, 1315), 'os.listdir', 'os.listdir', (['personDir'], {}), '(personDir)\n', (1304, 1315), False, 'import os\n'), ((1809, 1851), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['inputPic'], {}), '(inputPic)\n', (1841, 1851), False, 'import face_recognition\n'), ((2194, 2232), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image'], {}), '(image)\n', (2225, 2232), False, 'import face_recognition\n'), ((3619, 3640), 'os.listdir', 'os.listdir', (['personDir'], {}), '(personDir)\n', (3629, 3640), False, 'import os\n'), ((5115, 5129), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5127, 5129), False, 'from datetime import datetime\n'), ((5667, 5698), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (5692, 5698), False, 'import threading\n'), ((946, 960), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (958, 960), False, 'from datetime import datetime\n'), ((2958, 2985), 'PIL.Image.fromarray', 'Image.fromarray', (['face_image'], {}), '(face_image)\n', (2973, 2985), False, 'from PIL import Image\n'), ((5509, 5536), 'os.rename', 'os.rename', (['srcFile', 'dstFile'], {}), '(srcFile, dstFile)\n', (5518, 5536), False, 'import os\n'), ((1197, 1224), 'os.rename', 'os.rename', (['srcFile', 'dstFile'], {}), '(srcFile, dstFile)\n', (1206, 1224), False, 'import os\n'), ((1565, 1592), 'os.rename', 'os.rename', (['srcFile', 'dstFile'], {}), '(srcFile, dstFile)\n', (1574, 1592), False, 'import os\n'), ((4552, 4566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4564, 4566), False, 'from datetime import datetime\n'), ((6278, 6300), 'os.popen', 'os.popen', (['commandInput'], {}), '(commandInput)\n', (6286, 6300), False, 'import os\n'), ((6495, 6517), 'os.popen', 'os.popen', (['commandInput'], {}), '(commandInput)\n', (6503, 6517), False, 'import os\n'), ((6709, 6731), 'os.popen', 'os.popen', (['commandInput'], {}), '(commandInput)\n', (6717, 6731), False, 'import os\n'), ((6914, 6936), 'os.popen', 'os.popen', (['commandInput'], {}), '(commandInput)\n', (6922, 6936), False, 'import os\n'), ((3041, 3055), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3053, 3055), False, 'from datetime import datetime\n'), ((4955, 4982), 'os.rename', 'os.rename', (['srcFile', 'dstFile'], {}), '(srcFile, dstFile)\n', (4964, 4982), False, 'import os\n')]
|
import os.path
import pygame
from pygame.sprite import Sprite
from game_stats import GameStats
from settings import Settings
class Ship(Sprite):
def __init__(self, ai_settings: Settings, screen: pygame.SurfaceType
, size=(0, 0), image_name="alien-invasion/images/ship1.png"):
"""Initialize the ship and set its starting position."""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image and get its rect.
# fullname = os.path.join(os.getcwd(), image_name
try:
self.image = pygame.image.load(image_name)
except pygame.error as e:
print('Cannot load image: ', image_name)
print(e)
raise SystemExit
if size == (0, 0):
size = ai_settings.ship_size
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a float value for the ship's center.
self.center = float(self.rect.centerx)
# Movement Flags.
self.moving_right = False
self.moving_left = False
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
def update(self, stats: GameStats):
"""Update the ship's position based on the movement flag."""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor * stats.time_passed
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor * stats.time_passed
# Update rect object from self.center
self.rect.centerx = self.center
def center_ship(self):
"""Position the ship at center on screen."""
self.center = float(self.screen_rect.centerx)
|
[
"pygame.image.load",
"pygame.transform.scale"
] |
[((887, 927), 'pygame.transform.scale', 'pygame.transform.scale', (['self.image', 'size'], {}), '(self.image, size)\n', (909, 927), False, 'import pygame\n'), ((624, 653), 'pygame.image.load', 'pygame.image.load', (['image_name'], {}), '(image_name)\n', (641, 653), False, 'import pygame\n')]
|
# coding: utf-8
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.information import InformationKalmanPredictor
from ...predictor.kalman import KalmanPredictor
from ...types.state import InformationState, GaussianState
from ...types.array import StateVector, CovarianceMatrix
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( # Standard Kalman
InformationKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
StateVector([-6.45, 0.7]),
CovarianceMatrix([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard"]
)
def test_information(PredictorClass, transition_model,
prior_mean, prior_covar):
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
# First do prediction in standard way
test_state = GaussianState(prior_mean, prior_covar, timestamp=timestamp)
test_predictor = KalmanPredictor(transition_model)
test_prediction = test_predictor.predict(test_state, timestamp=new_timestamp)
# define the precision matrix and information state
precision_matrix = np.linalg.inv(prior_covar)
info_state_mean = precision_matrix @ prior_mean
# Define prior information state
prior = InformationState(info_state_mean, precision_matrix, timestamp=timestamp)
# Initialise a Information filter predictor
predictor = PredictorClass(transition_model=transition_model)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
# reconstruct the state vector and covariance matrix
pred_covar = np.linalg.inv(prediction.precision)
pred_mean = pred_covar @ prediction.state_vector
# And do the tests
assert(np.allclose(predictor._transition_function(prior,
time_interval=new_timestamp-timestamp),
test_prediction.state_vector, 0, atol=1e-14))
assert(np.allclose(pred_mean,
test_prediction.state_vector, 0, atol=1.e-14))
assert(np.allclose(pred_covar,
test_prediction.covar, 0, atol=1.e-14))
assert(prediction.timestamp == new_timestamp)
# test that we can get to the inverse matrix
class ConstantVelocitywithInverse(ConstantVelocity):
def inverse_matrix(self, **kwargs):
return np.linalg.inv(self.matrix(**kwargs))
transition_model_winv = ConstantVelocitywithInverse(noise_diff_coeff=0.1)
predictor_winv = PredictorClass(transition_model_winv)
# Test this still works
prediction_from_inv = predictor_winv.predict(prior=prior, timestamp=new_timestamp)
assert (np.allclose(prediction.state_vector, prediction_from_inv.state_vector, 0, atol=1.e-14))
# TODO: Test with Control Model
|
[
"numpy.allclose",
"numpy.linalg.inv",
"datetime.datetime.now",
"datetime.timedelta"
] |
[((903, 926), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (924, 926), False, 'import datetime\n'), ((1358, 1384), 'numpy.linalg.inv', 'np.linalg.inv', (['prior_covar'], {}), '(prior_covar)\n', (1371, 1384), True, 'import numpy as np\n'), ((1901, 1936), 'numpy.linalg.inv', 'np.linalg.inv', (['prediction.precision'], {}), '(prediction.precision)\n', (1914, 1936), True, 'import numpy as np\n'), ((2249, 2316), 'numpy.allclose', 'np.allclose', (['pred_mean', 'test_prediction.state_vector', '(0)'], {'atol': '(1e-14)'}), '(pred_mean, test_prediction.state_vector, 0, atol=1e-14)\n', (2260, 2316), True, 'import numpy as np\n'), ((2353, 2414), 'numpy.allclose', 'np.allclose', (['pred_covar', 'test_prediction.covar', '(0)'], {'atol': '(1e-14)'}), '(pred_covar, test_prediction.covar, 0, atol=1e-14)\n', (2364, 2414), True, 'import numpy as np\n'), ((2965, 3054), 'numpy.allclose', 'np.allclose', (['prediction.state_vector', 'prediction_from_inv.state_vector', '(0)'], {'atol': '(1e-14)'}), '(prediction.state_vector, prediction_from_inv.state_vector, 0,\n atol=1e-14)\n', (2976, 3054), True, 'import numpy as np\n'), ((984, 1020), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'timediff'}), '(seconds=timediff)\n', (1002, 1020), False, 'import datetime\n')]
|
'''Set of functions to construct a graph as a combination of
smaller subgraphs (of aparticular shape, defined in the shapes.py file)
'''
import graphwave
import math
import networkx as nx
import numpy as np
from shapes import *
from utils.utils import *
from shapes.shapes import *
def build_structure(width_basis, basis_type, list_shapes, start=0,
rdm_basis_plugins =False, add_random_edges=0,
plot=False, savefig=False):
'''This function creates a basis (torus, string, or cycle)
and attaches elements of the type in the list randomly along the basis.
Possibility to add random edges afterwards.
INPUT:
--------------------------------------------------------------------------------------
width_basis : width (in terms of number of nodes) of the basis
basis_type : (torus, string, or cycle)
shapes : list of shape list (1st arg: type of shape,
next args:args for building the shape,
except for the start)
start : initial nb for the first node
rdm_basis_plugins: boolean. Should the shapes be randomly placed
along the basis (True) or regularly (False)?
add_random_edges : nb of edges to randomly add on the structure
plot,savefig : plotting and saving parameters
OUTPUT:
--------------------------------------------------------------------------------------
basis : a nx graph with the particular shape
colors : labels for each role
'''
basis, role_id = eval(basis_type)(start, width_basis)
attrs = {}
for node in basis.nodes:
attrs[node] = {"attr": np.array([10, 10, 10, 10, 10, basis.degree[node]])}
nx.set_node_attributes(basis, attrs)
n_basis, n_shapes = nx.number_of_nodes(basis), len(list_shapes)
start += n_basis # indicator of the id of the next node
# Sample (with replacement) where to attach the new motives
if rdm_basis_plugins is True:
plugins = np.random.choice(n_basis, n_shapes, replace=False)
else:
spacing = math.floor(width_basis / n_shapes)
plugins = [int(k * spacing) for k in range(n_shapes)]
communities = [0] * n_basis
seen_shapes = {'basis': [0, n_basis]}
for p in plugins:
role_id[p] += 1
for shape_id, shape in enumerate(list_shapes):
shape_type = shape[0]
args = [start]
if len(shape)>1:
args += shape[1:]
args += [0]
args += [shape_id * 5]
print("args", *args)
print(shape_type)
graph_s, roles_graph_s = eval(shape_type)(*args)
n_s = nx.number_of_nodes(graph_s)
try:
col_start = seen_shapes[shape_type][0]
except:
col_start = np.max(role_id) + 1
seen_shapes[shape_type] = [col_start, n_s]
# Attach the shape to the basis
basis.add_nodes_from(graph_s.nodes(data=True))
basis.add_edges_from(graph_s.edges())
basis.add_edges_from([(start, plugins[shape_id])])
role_id[plugins[shape_id]] += (-2 - 10 * seen_shapes[shape_type][0])
communities += [shape_id] * n_s
temp_labels = [r + col_start for r in roles_graph_s]
temp_labels[0] += 100 * seen_shapes[shape_type][0]
role_id += temp_labels
start += n_s
if add_random_edges > 0:
# add random edges between nodes:
for p in range(add_random_edges):
src, dest = np.random.choice(nx.number_of_nodes(basis),
2, replace=False)
print (src, dest)
basis.add_edges_from([(src, dest)])
if plot is True: plot_networkx(basis, role_id)
return basis, communities, plugins, role_id
def build_lego_structure(list_shapes, start=0, plot=False, savefig=False,
bkbone_graph_type='nx.connected_watts_strogatz_graph',
bkbone_graph_args=[4, 0.4], save2text='', add_node=10):
'''This function creates a graph from a list of building blocks on top
of a backbone graph
INPUT:
---------------------------------------------------------------------------------
list_shapes : list of shape list (1st arg: type of shape,
next args: args for building the shape, except
for the start)
bkbone_graph_type : which type of backbone graph
(default= 'nx.connected_watts_strogatz_graph')
add_nodes : number of "empty nodes" to add to the graph structures, ie,
nodes in the graph that do not belong to a
particular clique
bkbone_graph_args : arguments for generating the backbone graph
(except from nb of nodes, which
is automatically computef)
start : initial node nb
plot, savefig,save2txt: plotting and saving parameters
OUTPUT:
---------------------------------------------------------------------------------
graph : a nx graph (association of cliques/motifs
planted along a backbone structure)
communities : motif Id
role_labels : role id
label_shape : label/class of the motif. This induces
different levels of similarities among motifs
'''
graph = nx.Graph()
shape_id = [] # labels for the different shapes
role_labels = [] # labels for the different shapes
communities = [] # roles in the network
seen_shapes = {}
start = graph.number_of_nodes()
for nb_shape, shape in enumerate(list_shapes):
shape_type = shape[0]
try:
role_start, shape_id_start = seen_shapes[shape_type]
except:
if len(role_labels) > 0:
seen_shapes[shape_type] = [np.max(role_labels) + 1, np.max(shape_id) + 1]
role_start, shape_id_start = seen_shapes[shape_type]
else:
seen_shapes[shape_type] = [0, 0]
role_start, shape_id_start = 0, 0
args = [start]
args += shape[1:]
args += [role_start]
graph_s, roles = eval(shape_type)(*args)
# Attach the shape to the basis
graph.add_nodes_from(graph_s.nodes())
graph.add_edges_from(graph_s.edges())
communities += [nb_shape] * nx.number_of_nodes(graph_s)
role_labels += roles
shape_id += [shape_id_start] * nx.number_of_nodes(graph_s)
start += graph_s.number_of_nodes()
# Now we link the different shapes by attaching them to the underlyin
# graph structure:
n_nodes, n_shapes = graph.number_of_nodes(), len(list_shapes)
graph.add_nodes_from(range(n_nodes, n_nodes + add_node))
role_labels += [n_shapes + 1] * add_node
communities += range(n_shapes, n_shapes + add_node)
shape_id += [-1] * add_node
# generate back_bone Graph
bkbone_graph_args.insert(0, n_shapes + add_node)
bkbone_graph = eval(bkbone_graph_type)(*bkbone_graph_args)
for e in bkbone_graph.edges():
ii = np.random.choice(np.where(np.array(communities) == e[0])[0], 1)[0]
jj = np.random.choice(np.where(np.array(communities) == e[1])[0], 1)[0]
graph.add_edges_from([(ii, jj)])
if plot is True: plot_networkx(graph, role_labels)
if len(save2text) > 0: saveNet2txt(graph, colors=role_labels, name='net', path=save2text)
return graph, communities, role_labels, shape_id
def create_bigger_network(nb_cells, width_cell, list_cell_shapes,
rdm_basis_plugins=True, cell_type="cycle"):
''' Automatically creates a big network by linking several instances of a
graph created by build_structure(width_basis, basis_type, list_shapes,..)
'''
width_basis, basis_type = width_cell[0]
list_shapes = list_cell_shapes[0]
graph, roles, plugins = build_structure(width_basis, basis_type,
list_shapes, start=0,
rdm_basis_plugins=rdm_basis_plugins,
add_random_edges=0, plot=False)
start = graph.number_of_nodes()
for i in range(1, nb_cells):
width_basis, basis_type = width_cell[i]
list_shapes = list_cell_shapes[i]
graph_i, roles_i, plugins_i = build_structure(width_basis,
basis_type,
list_shapes,
start=start,
add_random_edges=0,
plot=False)
graph.add_nodes_from(graph_i.nodes())
graph.add_edges_from(graph_i.edges())
graph.add_edges_from([(start, start + 1)])
start += graph_i.number_of_nodes()
roles += roles_i
plugins += plugins_i
return graph, roles, plugins
|
[
"networkx.set_node_attributes",
"math.floor",
"numpy.max",
"networkx.Graph",
"numpy.array",
"numpy.random.choice",
"networkx.number_of_nodes"
] |
[((1827, 1863), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['basis', 'attrs'], {}), '(basis, attrs)\n', (1849, 1863), True, 'import networkx as nx\n'), ((5620, 5630), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5628, 5630), True, 'import networkx as nx\n'), ((1888, 1913), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['basis'], {}), '(basis)\n', (1906, 1913), True, 'import networkx as nx\n'), ((2116, 2166), 'numpy.random.choice', 'np.random.choice', (['n_basis', 'n_shapes'], {'replace': '(False)'}), '(n_basis, n_shapes, replace=False)\n', (2132, 2166), True, 'import numpy as np\n'), ((2195, 2229), 'math.floor', 'math.floor', (['(width_basis / n_shapes)'], {}), '(width_basis / n_shapes)\n', (2205, 2229), False, 'import math\n'), ((2749, 2776), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (2767, 2776), True, 'import networkx as nx\n'), ((1771, 1821), 'numpy.array', 'np.array', (['[10, 10, 10, 10, 10, basis.degree[node]]'], {}), '([10, 10, 10, 10, 10, basis.degree[node]])\n', (1779, 1821), True, 'import numpy as np\n'), ((6651, 6678), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (6669, 6678), True, 'import networkx as nx\n'), ((6747, 6774), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (6765, 6774), True, 'import networkx as nx\n'), ((3600, 3625), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['basis'], {}), '(basis)\n', (3618, 3625), True, 'import networkx as nx\n'), ((2881, 2896), 'numpy.max', 'np.max', (['role_id'], {}), '(role_id)\n', (2887, 2896), True, 'import numpy as np\n'), ((6122, 6141), 'numpy.max', 'np.max', (['role_labels'], {}), '(role_labels)\n', (6128, 6141), True, 'import numpy as np\n'), ((6147, 6163), 'numpy.max', 'np.max', (['shape_id'], {}), '(shape_id)\n', (6153, 6163), True, 'import numpy as np\n'), ((7397, 7418), 'numpy.array', 'np.array', (['communities'], {}), '(communities)\n', (7405, 7418), True, 'import numpy as np\n'), ((7477, 7498), 'numpy.array', 'np.array', (['communities'], {}), '(communities)\n', (7485, 7498), True, 'import numpy as np\n')]
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import unittest
from disjoint_set import DisjointSet
class TestDisjointSet(unittest.TestCase):
def test_no_merge(self):
s = DisjointSet(3)
self.assertEqual(s._parent, [-1, -1, -1])
self.assertEqual(s._root(0), 0)
self.assertEqual(s._root(1), 1)
self.assertEqual(s._root(2), 2)
self.assertEqual(s.size(0), 1)
self.assertEqual(s.size(1), 1)
self.assertEqual(s.size(2), 1)
self.assertTrue(s.same(0, 0))
self.assertTrue(s.same(1, 1))
self.assertTrue(s.same(2, 2))
self.assertFalse(s.same(0, 1))
self.assertFalse(s.same(0, 2))
self.assertFalse(s.same(1, 0))
self.assertFalse(s.same(1, 2))
self.assertFalse(s.same(2, 0))
self.assertFalse(s.same(2, 1))
def test_merge(self):
s = DisjointSet(3)
s.merge(0, 2)
self.assertEqual(s.size(0), 2)
self.assertEqual(s.size(2), 2)
self.assertEqual(s.size(1), 1)
self.assertTrue(s.same(0, 2))
self.assertTrue(s.same(2, 0))
self.assertFalse(s.same(0, 1))
self.assertFalse(s.same(1, 0))
self.assertFalse(s.same(1, 2))
self.assertFalse(s.same(2, 1))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"disjoint_set.DisjointSet"
] |
[((1327, 1342), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1340, 1342), False, 'import unittest\n'), ((220, 234), 'disjoint_set.DisjointSet', 'DisjointSet', (['(3)'], {}), '(3)\n', (231, 234), False, 'from disjoint_set import DisjointSet\n'), ((909, 923), 'disjoint_set.DisjointSet', 'DisjointSet', (['(3)'], {}), '(3)\n', (920, 923), False, 'from disjoint_set import DisjointSet\n')]
|
import hues.dpda as DPDA
def test_zero_negation():
func = DPDA.zero_break
assert func((1, 2, 3, 4, 0, 10, 1)) == (10, 1)
assert func((1, 2, 3, 4, 5, 0)) == tuple()
def test_order_annihilation():
func = DPDA.annihilate
assert func(range(0, 10), (1, 2, 3, 4, 4, 3)) == (3,)
assert func(range(5, 12), (1, 2, 10, 11, 11, 2)) == (1, 2, 2, 11)
def test_built_order_annihilation():
f1 = DPDA.annihilator(range(5, 12))
assert f1((1, 2, 10, 11, 11, 2)) == (1, 2, 2, 11)
def test_dedup():
func = DPDA.dedup
assert func((1, 2, 3, 3, 4, 2, 1, 3, 5)) == (1, 2, 3, 4, 5)
def test_chaining():
funcs = (
DPDA.zero_break, # Take the last non-reset subset
DPDA.annihilator(range(5)), # Between 0 and 5, keep the last one
DPDA.annihilator(range(10, 15)), # Between 10 and 15, keep the last one
DPDA.dedup, # Finally remove duplicates
)
stack = (1, 2, 3, 2, 2, 0, 1, 2, 3, 2, 5, 5, 11, 3, 15, 14)
expected = (5, 15, 3, 14)
assert DPDA.apply(funcs, stack) == expected
assert DPDA.apply(funcs, (1, 1, 0)) == tuple()
|
[
"hues.dpda.apply"
] |
[((1008, 1032), 'hues.dpda.apply', 'DPDA.apply', (['funcs', 'stack'], {}), '(funcs, stack)\n', (1018, 1032), True, 'import hues.dpda as DPDA\n'), ((1054, 1082), 'hues.dpda.apply', 'DPDA.apply', (['funcs', '(1, 1, 0)'], {}), '(funcs, (1, 1, 0))\n', (1064, 1082), True, 'import hues.dpda as DPDA\n')]
|
"""Install Foremast Lathe."""
from setuptools import find_packages, setup
with open('README.rst') as readme_handle:
LONG_DESCRIPTION = readme_handle.read()
setup(
name='foremast ui',
description='Foremast configuration generator web UI.',
long_description=LONG_DESCRIPTION,
author='',
author_email='',
install_requires=[
'click',
'connexion',
'foremast',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
setup_requires=['setuptools_scm'],
use_scm_version={'local_scheme': 'dirty-tag'},
entry_points={
'console_scripts': [
'foremast_ui=foremast_ui.__main__:main',
],
}, )
|
[
"setuptools.find_packages"
] |
[((429, 455), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (442, 455), False, 'from setuptools import find_packages, setup\n')]
|
# -*- coding: utf-8 -*-
from django.db import models
class TimeModelMixin(models.Model):
"""Model内时间维护信息插件
注意!!:因为此mixin继承自models.Model,所以子类在继承时要确保此mixin在models.Model`出现`之前使用,
否则会出现method resolution order (MRO) 报错"""
created_time = models.DateTimeField("创建时间", auto_now_add=True, editable=False)
last_update = models.DateTimeField("最后修改时间", auto_now=True, editable=False)
class Meta:
abstract = True
class EditorModelMixin(models.Model):
"""Model内时间维护信息插件
注意!!:因为此mixin继承自models.Model,所以子类在继承时要确保此mixin在models.Model`出现`之前使用,
否则会出现method resolution order (MRO) 报错"""
creator_id = models.IntegerField("创建者")
last_editor_id = models.IntegerField("最后修改者")
class Meta:
abstract = True
|
[
"django.db.models.DateTimeField",
"django.db.models.IntegerField"
] |
[((251, 314), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'auto_now_add': '(True)', 'editable': '(False)'}), "('创建时间', auto_now_add=True, editable=False)\n", (271, 314), False, 'from django.db import models\n'), ((333, 394), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""最后修改时间"""'], {'auto_now': '(True)', 'editable': '(False)'}), "('最后修改时间', auto_now=True, editable=False)\n", (353, 394), False, 'from django.db import models\n'), ((634, 660), 'django.db.models.IntegerField', 'models.IntegerField', (['"""创建者"""'], {}), "('创建者')\n", (653, 660), False, 'from django.db import models\n'), ((682, 710), 'django.db.models.IntegerField', 'models.IntegerField', (['"""最后修改者"""'], {}), "('最后修改者')\n", (701, 710), False, 'from django.db import models\n')]
|
"""This module defines metaclasses used to trace the parameters passed through operation-critical classes that are members of
other libraries. These are only used in cases where it is impractical or impossible to effectively retrieve the arguments
explicitly provided by a user, as well as the default arguments for the classes being traced. Generally, tracer metaclasses will
aim to add some attributes to the class, that will collect default values, and provided arguments on the class's creation, and an
instance's call
Related
-------
:mod:`hyperparameter_hunter.importer`
This module handles the interception of certain imports in order to inject the tracer metaclasses defined in
:mod:`hyperparameter_hunter.tracers` into the inheritance structure of objects that need to be traced"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.space import Real, Integer, Categorical
##################################################
# Import Miscellaneous Assets
##################################################
# noinspection PyProtectedMember
from inspect import signature, _empty
class KerasTracer(type):
"""This metaclass traces the default arguments and explicitly provided arguments of descendants of
`keras.engine.base_layer.Layer`. It also has special provisions for instantiating dummy Keras models if directed to"""
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
namespace = dict(
__hh_default_args=[],
__hh_default_kwargs={},
__hh_used_args=[],
__hh_used_kwargs={},
)
return namespace
def __new__(mcs, name, bases, namespace, **kwargs):
class_obj = super().__new__(mcs, name, bases, dict(namespace))
all_args, all_kwargs = [], {}
signature_parameters = signature(class_obj.__init__).parameters
for k, v in signature_parameters.items():
if k not in ['self', 'args', 'kwargs']: # FLAG: Might want to remove kwargs - Could be necessary to ok "input_dim"
if ((v.kind in [v.KEYWORD_ONLY, v.POSITIONAL_OR_KEYWORD]) and v.default != _empty):
all_kwargs[k] = v.default
else:
all_args.append(k)
setattr(class_obj, '__hh_default_args', all_args)
setattr(class_obj, '__hh_default_kwargs', all_kwargs)
return class_obj
def __call__(cls, *args, **kwargs):
if getattr(G, 'use_dummy_keras_tracer', False) is True:
_args = [_ if not isinstance(_, (Real, Integer, Categorical)) else _.bounds[0] for _ in args]
_kwargs = {_k: _v if not isinstance(_v, (Real, Integer, Categorical)) else _v.bounds[0] for _k, _v in kwargs.items()}
instance = super().__call__(*_args, **_kwargs)
else:
instance = super().__call__(*args, **kwargs)
setattr(instance, '__hh_used_args', args)
setattr(instance, '__hh_used_kwargs', kwargs)
return instance
|
[
"inspect.signature"
] |
[((1950, 1979), 'inspect.signature', 'signature', (['class_obj.__init__'], {}), '(class_obj.__init__)\n', (1959, 1979), False, 'from inspect import signature, _empty\n')]
|
import unittest
from auction_system.auction import Auction
from auction_system.auction_house import AuctionHouse
from auction_system.item import Item
class TestAuctionHouse(unittest.TestCase):
def setUp(self):
self.auction_house = AuctionHouse()
def tearDown(self):
self.auction_house = None
def testAddAuctionSuccess(self):
auction = Auction(Item("Van Gogh's painting", 1000))
self.auction_house.add_auction(auction)
self.assertListEqual(self.auction_house.auctions, [auction])
def testAddAuctionFailure(self):
auction = Auction(Item("Van Gogh's painting", 1000))
self.auction_house.add_auction(auction)
self.auction_house.add_auction(auction)
self.assertListEqual(self.auction_house.auctions, [auction])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"auction_system.auction_house.AuctionHouse",
"auction_system.item.Item"
] |
[((834, 849), 'unittest.main', 'unittest.main', ([], {}), '()\n', (847, 849), False, 'import unittest\n'), ((247, 261), 'auction_system.auction_house.AuctionHouse', 'AuctionHouse', ([], {}), '()\n', (259, 261), False, 'from auction_system.auction_house import AuctionHouse\n'), ((385, 418), 'auction_system.item.Item', 'Item', (['"""Van Gogh\'s painting"""', '(1000)'], {}), '("Van Gogh\'s painting", 1000)\n', (389, 418), False, 'from auction_system.item import Item\n'), ((601, 634), 'auction_system.item.Item', 'Item', (['"""Van Gogh\'s painting"""', '(1000)'], {}), '("Van Gogh\'s painting", 1000)\n', (605, 634), False, 'from auction_system.item import Item\n')]
|
#
# Copyright (c) 2015-2020 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_form.browser.multi module
This module provides multi-widgets implementation.
"""
from operator import attrgetter
from zope.interface import implementer
from zope.schema.interfaces import IDict, IField, IList, ITuple
from pyams_form.browser.widget import HTMLFormElement
from pyams_form.button import Buttons, button_and_handler
from pyams_form.interfaces.button import IActions
from pyams_form.interfaces.form import IButtonForm, IHandlerForm
from pyams_form.interfaces.widget import IFieldWidget, IMultiWidget
from pyams_form.widget import FieldWidget, MultiWidget as MultiWidgetBase
from pyams_layer.interfaces import IFormLayer
from pyams_utils.adapter import adapter_config
__docformat__ = 'restructuredtext'
from pyams_form import _ # pylint: disable=ungrouped-imports
@implementer(IButtonForm, IHandlerForm)
class FormMixin:
"""Form mixin class"""
@implementer(IMultiWidget)
class MultiWidget(HTMLFormElement, MultiWidgetBase, FormMixin):
# pylint: disable=function-redefined
"""Multi widget implementation."""
buttons = Buttons()
prefix = 'widget'
klass = 'multi-widget'
css = 'multi'
items = ()
actions = None
show_label = True # show labels for item subwidgets or not
# Internal attributes
_adapter_value_attributes = MultiWidgetBase._adapter_value_attributes + ('show_label',)
def update(self):
"""See pyams_form.interfaces.widget.IWidget."""
super().update()
self.update_actions()
self.actions.execute()
self.update_actions() # Update again, as conditions may change
def update_actions(self):
"""Update widget actions"""
self.update_allow_add_remove()
if self.name is not None:
self.prefix = self.name
registry = self.request.registry
self.actions = registry.getMultiAdapter((self, self.request, self), IActions)
self.actions.update()
@button_and_handler(_('Add'), name='add',
condition=attrgetter('allow_adding'))
def handle_add(self, action): # pylint: disable=unused-argument
"""Add button handler"""
self.append_adding_widget()
@button_and_handler(_('Remove selected'), name='remove',
condition=attrgetter('allow_removing'))
def handle_remove(self, action): # pylint: disable=unused-argument
"""Remove button handler"""
self.remove_widgets([widget.name for widget in self.widgets
if '{}.remove'.format(widget.name) in self.request.params])
@adapter_config(required=(IDict, IFormLayer),
provided=IFieldWidget)
def MultiFieldWidgetFactory(field, request): # pylint: disable=invalid-name
"""IFieldWidget factory for MultiWidget."""
return FieldWidget(field, MultiWidget(request))
@adapter_config(required=(IDict, IField, IFormLayer),
provided=IFieldWidget)
@adapter_config(required=(IList, IField, IFormLayer),
provided=IFieldWidget)
@adapter_config(required=(ITuple, IField, IFormLayer),
provided=IFieldWidget)
def MultiFieldWidget(field, value_type, request): # pylint: disable=invalid-name,unused-argument
"""IFieldWidget factory for MultiWidget."""
return MultiFieldWidgetFactory(field, request)
|
[
"pyams_form._",
"zope.interface.implementer",
"pyams_utils.adapter.adapter_config",
"operator.attrgetter",
"pyams_form.button.Buttons"
] |
[((1275, 1313), 'zope.interface.implementer', 'implementer', (['IButtonForm', 'IHandlerForm'], {}), '(IButtonForm, IHandlerForm)\n', (1286, 1313), False, 'from zope.interface import implementer\n'), ((1361, 1386), 'zope.interface.implementer', 'implementer', (['IMultiWidget'], {}), '(IMultiWidget)\n', (1372, 1386), False, 'from zope.interface import implementer\n'), ((3053, 3120), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'required': '(IDict, IFormLayer)', 'provided': 'IFieldWidget'}), '(required=(IDict, IFormLayer), provided=IFieldWidget)\n', (3067, 3120), False, 'from pyams_utils.adapter import adapter_config\n'), ((3317, 3392), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'required': '(IDict, IField, IFormLayer)', 'provided': 'IFieldWidget'}), '(required=(IDict, IField, IFormLayer), provided=IFieldWidget)\n', (3331, 3392), False, 'from pyams_utils.adapter import adapter_config\n'), ((3410, 3485), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'required': '(IList, IField, IFormLayer)', 'provided': 'IFieldWidget'}), '(required=(IList, IField, IFormLayer), provided=IFieldWidget)\n', (3424, 3485), False, 'from pyams_utils.adapter import adapter_config\n'), ((3503, 3579), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'required': '(ITuple, IField, IFormLayer)', 'provided': 'IFieldWidget'}), '(required=(ITuple, IField, IFormLayer), provided=IFieldWidget)\n', (3517, 3579), False, 'from pyams_utils.adapter import adapter_config\n'), ((1546, 1555), 'pyams_form.button.Buttons', 'Buttons', ([], {}), '()\n', (1553, 1555), False, 'from pyams_form.button import Buttons, button_and_handler\n'), ((2437, 2445), 'pyams_form._', '_', (['"""Add"""'], {}), "('Add')\n", (2438, 2445), False, 'from pyams_form import _\n'), ((2684, 2704), 'pyams_form._', '_', (['"""Remove selected"""'], {}), "('Remove selected')\n", (2685, 2704), False, 'from pyams_form import _\n'), ((2493, 2519), 'operator.attrgetter', 'attrgetter', (['"""allow_adding"""'], {}), "('allow_adding')\n", (2503, 2519), False, 'from operator import attrgetter\n'), ((2755, 2783), 'operator.attrgetter', 'attrgetter', (['"""allow_removing"""'], {}), "('allow_removing')\n", (2765, 2783), False, 'from operator import attrgetter\n')]
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import state.StatePart
import globals as G
class UIPart(state.StatePart.StatePart):
def __init__(self, position, bboxsize, anchor_element="WS", anchor_window="WS"):
super().__init__()
self.position = position
self.bboxsize = bboxsize
self.anchor_element = anchor_element
self.anchor_window = anchor_window
def get_real_position(self):
x, y = self.position
wx, wy = G.window.get_size()
bx, by = self.bboxsize
if self.anchor_element[0] == "M":
x -= bx // 2
elif self.anchor_element[0] == "E":
x = bx - abs(x)
if self.anchor_element[1] == "M":
y -= by // 2
elif self.anchor_element[1] == "N":
y = by - abs(y)
if self.anchor_window[0] == "M":
x += wx // 2
elif self.anchor_window[0] == "E":
x = wx - abs(x)
if self.anchor_window[1] == "M":
y += wy // 2
elif self.anchor_window[1] == "N":
y = wy - abs(y)
return x, y
|
[
"globals.window.get_size"
] |
[((686, 705), 'globals.window.get_size', 'G.window.get_size', ([], {}), '()\n', (703, 705), True, 'import globals as G\n')]
|
# Generated by Django 4.0.1 on 2022-02-03 07:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Hub', '0014_alter_appointment_id_alter_customer_id_and_more'),
]
operations = [
migrations.AlterField(
model_name='photographer',
name='category',
field=models.CharField(blank=True, choices=[('Event', 'Event'), ('Fashion', 'Fashion'), ('Sports', 'Sports'), ('Food', 'Food'), ('Art_and_Portrait', 'Art_and_Portrait'), ('Architecture', 'Architecture'), ('Documentary', 'Documentary'), ('Travel', 'Travel'), ('Modelling_and_Lifestyle', 'Modelling_and_Lifestyle'), ('Nature_and_Wildlife', 'Natue_and_Wildlife'), ('Product', 'Product'), ('Photo_Journalism', 'Photo_journalism')], max_length=100, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((369, 841), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('Event', 'Event'), ('Fashion', 'Fashion'), ('Sports', 'Sports'), ('Food',\n 'Food'), ('Art_and_Portrait', 'Art_and_Portrait'), ('Architecture',\n 'Architecture'), ('Documentary', 'Documentary'), ('Travel', 'Travel'),\n ('Modelling_and_Lifestyle', 'Modelling_and_Lifestyle'), (\n 'Nature_and_Wildlife', 'Natue_and_Wildlife'), ('Product', 'Product'), (\n 'Photo_Journalism', 'Photo_journalism')]", 'max_length': '(100)', 'null': '(True)'}), "(blank=True, choices=[('Event', 'Event'), ('Fashion',\n 'Fashion'), ('Sports', 'Sports'), ('Food', 'Food'), ('Art_and_Portrait',\n 'Art_and_Portrait'), ('Architecture', 'Architecture'), ('Documentary',\n 'Documentary'), ('Travel', 'Travel'), ('Modelling_and_Lifestyle',\n 'Modelling_and_Lifestyle'), ('Nature_and_Wildlife',\n 'Natue_and_Wildlife'), ('Product', 'Product'), ('Photo_Journalism',\n 'Photo_journalism')], max_length=100, null=True)\n", (385, 841), False, 'from django.db import migrations, models\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
MultiDatasetLoader class is used by DatasetLoader class to load multiple datasets
and more granular
"""
import logging
import warnings
import numpy as np
from mmf.common.registry import registry
from mmf.utils.build import build_dataloader_and_sampler, build_dataset
from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master
from mmf.utils.general import get_batch_size
logger = logging.getLogger(__name__)
class MultiDatasetLoader:
"""
MultiDatasetLoader class that is used for training on multiple datasets together.
"""
def __init__(self, dataset_type="train"):
self._dataset_type = dataset_type
self._is_master = is_master()
self._datasets = []
self._loaders = []
self._samplers = []
self._iterators = []
self._total_length = 0
self._per_dataset_lengths = []
self._num_datasets = 0
self._finished_iterators = {}
self._used_once = {}
@property
def dataset_type(self):
return self._dataset_type
@property
def current_dataset_name(self):
return self.current_dataset.name
@property
def num_datasets(self):
return self._num_datasets
@property
def datasets(self):
return self._datasets
@property
def loaders(self):
return self._loaders
@property
def samplers(self):
return self._samplers
@property
def iterators(self):
return self._iterators
@iterators.setter
def iterators(self, iterators):
self._iterators = iterators
@property
def current_dataset(self):
return self._chosen_dataset
# Setter only for functions which users should also be able to set
@current_dataset.setter
def current_dataset(self, dataset):
self._chosen_dataset = dataset
@property
def current_loader(self):
return self._chosen_loader
@current_loader.setter
def current_loader(self, loader):
self._chosen_loader = loader
@property
def current_index(self):
return self._loader_index
@current_index.setter
def current_index(self, index: int):
self._loader_index = index
def get_datasets(self):
return self.datasets
@property
def first_loader(self):
return self.loaders[0]
def _process_datasets(self):
if "datasets" not in self.config:
logger.warning("No datasets attribute present. Setting default to vqa2.")
datasets = "vqa2"
else:
datasets = self.config.datasets
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
self._given_datasets = datasets
def load(self, config):
self.build_datasets(config)
self.build_dataloaders()
def build_datasets(self, config):
self.config = config
self._process_datasets()
for dataset in self._given_datasets:
if dataset in self.config.dataset_config:
dataset_config = self.config.dataset_config[dataset]
else:
raise RuntimeError(
f"Dataset {dataset} is missing from " "dataset_config in config."
)
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
if dataset_instance is None:
continue
self.datasets.append(dataset_instance)
if hasattr(dataset_instance, "__len__"):
self._per_dataset_lengths.append(len(dataset_instance))
self._total_length += len(dataset_instance)
self._num_datasets = len(self.datasets)
self.current_index = 0
self.current_dataset = self.datasets[self.current_index]
self._infer_dataset_probabilities()
def build_dataloaders(self):
assert len(self._datasets) > 0, "Call build_datasets first"
for dataset_instance in self.datasets:
loader_instance, sampler_instance = build_dataloader_and_sampler(
dataset_instance, self.config.training
)
self.loaders.append(loader_instance)
self.samplers.append(sampler_instance)
self.current_loader = self.loaders[self.current_index]
def _infer_dataset_probabilities(self):
self._dataset_probabilities = [
1 / self._num_datasets for _ in range(self.num_datasets)
]
training = self.config.get("training", {})
self._proportional_sampling = training.get(
"dataset_size_proportional_sampling", True
)
if self._dataset_type != "train":
# If it is val or test, it needs to be all datasets need to be
# fully iterated as metrics will be calculated in eval mode
# over complete datasets
self._proportional_sampling = True
if self._proportional_sampling is True and len(self._per_dataset_lengths) > 0:
self._dataset_probabilities = self._per_dataset_lengths[:]
self._dataset_probabilities = [
prob / self._total_length for prob in self._dataset_probabilities
]
def __len__(self):
# Since, this is iterator, we need to return total length == number of batches
return self._total_length // get_batch_size()
def __iter__(self):
if self._num_datasets == 1:
return iter(self.loaders[0])
# Clear off old iterators
self.iterators = []
for loader in self.loaders:
self.iterators.append(iter(loader))
self._chosen_iterator = self.iterators[self.current_index]
return self
def __next__(self):
try:
next_batch = next(self._chosen_iterator)
except StopIteration:
if (
self._proportional_sampling is True
or len(self._used_once) != self.num_datasets
):
self._finished_iterators[self.current_index] = 1
if len(self._finished_iterators) == self.num_datasets:
raise
else:
self.change_dataloader()
next_batch = next(self._chosen_iterator)
else:
raise
self._used_once[self.current_index] = 1
return next_batch
def change_dataloader(self):
if self.num_datasets <= 1:
return
choice = 0
if self._is_master:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
while choice in self._finished_iterators:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
choice = broadcast_scalar(choice, 0, device=registry.get("current_device"))
self.current_index = choice
self.current_dataset = self.datasets[self.current_index]
self.current_loader = self.loaders[self.current_index]
self._chosen_iterator = self.iterators[self.current_index]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
if not hasattr(self._chosen_dataset, "prepare_batch"):
warnings.warn(
f"{self._chosen_dataset.dataset_name} doesn't define 'prepare_batch' "
+ "method. You are expected to prepare and move your batch to "
+ "CUDA device yourself."
)
else:
batch = self._chosen_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def seed_sampler(self, epoch):
if is_dist_initialized():
for sampler in self._samplers:
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
|
[
"numpy.random.choice",
"mmf.utils.distributed.is_master",
"logging.getLogger",
"mmf.utils.general.get_batch_size",
"mmf.common.registry.registry.get",
"mmf.utils.build.build_dataset",
"mmf.utils.build.build_dataloader_and_sampler",
"warnings.warn",
"mmf.utils.distributed.is_dist_initialized"
] |
[((462, 489), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (479, 489), False, 'import logging\n'), ((735, 746), 'mmf.utils.distributed.is_master', 'is_master', ([], {}), '()\n', (744, 746), False, 'from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master\n'), ((7852, 7873), 'mmf.utils.distributed.is_dist_initialized', 'is_dist_initialized', ([], {}), '()\n', (7871, 7873), False, 'from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master\n'), ((3362, 3419), 'mmf.utils.build.build_dataset', 'build_dataset', (['dataset', 'dataset_config', 'self.dataset_type'], {}), '(dataset, dataset_config, self.dataset_type)\n', (3375, 3419), False, 'from mmf.utils.build import build_dataloader_and_sampler, build_dataset\n'), ((4111, 4179), 'mmf.utils.build.build_dataloader_and_sampler', 'build_dataloader_and_sampler', (['dataset_instance', 'self.config.training'], {}), '(dataset_instance, self.config.training)\n', (4139, 4179), False, 'from mmf.utils.build import build_dataloader_and_sampler, build_dataset\n'), ((5429, 5445), 'mmf.utils.general.get_batch_size', 'get_batch_size', ([], {}), '()\n', (5443, 5445), False, 'from mmf.utils.general import get_batch_size\n'), ((7436, 7624), 'warnings.warn', 'warnings.warn', (['(f"{self._chosen_dataset.dataset_name} doesn\'t define \'prepare_batch\' " +\n \'method. You are expected to prepare and move your batch to \' +\n \'CUDA device yourself.\')'], {}), '(\n f"{self._chosen_dataset.dataset_name} doesn\'t define \'prepare_batch\' " +\n \'method. You are expected to prepare and move your batch to \' +\n \'CUDA device yourself.\')\n', (7449, 7624), False, 'import warnings\n'), ((6609, 6678), 'numpy.random.choice', 'np.random.choice', (['self.num_datasets', '(1)'], {'p': 'self._dataset_probabilities'}), '(self.num_datasets, 1, p=self._dataset_probabilities)\n', (6625, 6678), True, 'import numpy as np\n'), ((6956, 6986), 'mmf.common.registry.registry.get', 'registry.get', (['"""current_device"""'], {}), "('current_device')\n", (6968, 6986), False, 'from mmf.common.registry import registry\n'), ((6792, 6861), 'numpy.random.choice', 'np.random.choice', (['self.num_datasets', '(1)'], {'p': 'self._dataset_probabilities'}), '(self.num_datasets, 1, p=self._dataset_probabilities)\n', (6808, 6861), True, 'import numpy as np\n')]
|
from setuptools import setup
setup(
name='linkedin',
version='0.1',
description='A package to assist in collecting data from linkedin pages.',
author='kohlert',
license='MIT',
keywords='webscraper job search linkedin selenium',
url='https://github.com/kohlert/data_science/tree/master/Web_Scrapers/linkedin',
packages=['linkedin', 'linkedin.local_drivers'],
install_requires=['selenium', 'pandas'],
)
|
[
"setuptools.setup"
] |
[((30, 428), 'setuptools.setup', 'setup', ([], {'name': '"""linkedin"""', 'version': '"""0.1"""', 'description': '"""A package to assist in collecting data from linkedin pages."""', 'author': '"""kohlert"""', 'license': '"""MIT"""', 'keywords': '"""webscraper job search linkedin selenium"""', 'url': '"""https://github.com/kohlert/data_science/tree/master/Web_Scrapers/linkedin"""', 'packages': "['linkedin', 'linkedin.local_drivers']", 'install_requires': "['selenium', 'pandas']"}), "(name='linkedin', version='0.1', description=\n 'A package to assist in collecting data from linkedin pages.', author=\n 'kohlert', license='MIT', keywords=\n 'webscraper job search linkedin selenium', url=\n 'https://github.com/kohlert/data_science/tree/master/Web_Scrapers/linkedin'\n , packages=['linkedin', 'linkedin.local_drivers'], install_requires=[\n 'selenium', 'pandas'])\n", (35, 428), False, 'from setuptools import setup\n')]
|
import os
import pandas as pd
from sklearn import ensemble
from sklearn import preprocessing
from sklearn import metrics
import joblib
import numpy as np
from . import dispatcher
def predict(test_data_path, model_type, model_path):
df = pd.read_csv(test_data_path)
test_idx = df["id"].values
predictions = None
for FOLD in range(5):
df = pd.read_csv(test_data_path)
encoders = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_label_encoder.pkl"))
cols = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_columns.pkl"))
for c in encoders:
lbl = encoders[c]
df.loc[:, c] = df.loc[:, c].astype(str).fillna("NONE")
df.loc[:, c] = lbl.transform(df[c].values.tolist())
clf = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}.pkl"))
df = df[cols]
preds = clf.predict_proba(df)[:, 1]
if FOLD == 0:
predictions = preds
else:
predictions += preds
predictions /= 5
sub = pd.DataFrame(np.column_stack((test_idx, predictions)), columns=["id", "target"])
return sub
if __name__ == "__main__":
submission = predict(test_data_path="input/test_cat.csv",
model_type="randomforest",
model_path="models/")
submission.loc[:, "id"] = submission.loc[:, "id"].astype(int)
submission.to_csv(f"models/rf_submission.csv", index=False)
|
[
"pandas.read_csv",
"os.path.join",
"numpy.column_stack"
] |
[((244, 271), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (255, 271), True, 'import pandas as pd\n'), ((366, 393), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (377, 393), True, 'import pandas as pd\n'), ((1086, 1126), 'numpy.column_stack', 'np.column_stack', (['(test_idx, predictions)'], {}), '((test_idx, predictions))\n', (1101, 1126), True, 'import numpy as np\n'), ((425, 491), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}_label_encoder.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}_label_encoder.pkl')\n", (437, 491), False, 'import os\n'), ((520, 580), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}_columns.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}_columns.pkl')\n", (532, 580), False, 'import os\n'), ((805, 857), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}.pkl')\n", (817, 857), False, 'import os\n')]
|
from django import template
register = template.Library()
@register.filter
def leading_zeros(value, desired_digits):
"""
Given an integer, returns a string representation, padded with [desired_digits] zeros.
"""
num_zeros = int(desired_digits) - len(str(value))
padded_value = []
while num_zeros >= 1:
padded_value.append("0")
num_zeros = num_zeros - 1
padded_value.append(str(value))
return "".join(padded_value)
|
[
"django.template.Library"
] |
[((41, 59), 'django.template.Library', 'template.Library', ([], {}), '()\n', (57, 59), False, 'from django import template\n')]
|
import numpy as np
import tensorflow as tf
from utils.layers import PrimaryCaps, FCCaps, Length, Mask
from keras import regularizers
def efficient_capsnet_graph(input_shape):
"""
Efficient-CapsNet graph architecture.
Parameters
----------
input_shape: list
network input shape
"""
inputs = tf.keras.Input(input_shape)
#x = tf.keras.layers.Conv2D(32,5,2,activation="relu", padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l2(0.01),kernel_initializer='he_normal')(inputs)
#x = tf.keras.layers.BatchNormalization()(x)
#x = tf.keras.layers.Conv2D(64,4,2, activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01), kernel_initializer='he_normal')(x)
#x = tf.keras.layers.BatchNormalization()(x)
#x = tf.keras.layers.Conv2D(64,3,2, activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01), kernel_initializer='he_normal')(x)
#x = tf.keras.layers.BatchNormalization()(x)
#x = tf.keras.layers.Conv2D(128,2,2,activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),kernel_initializer='he_normal')(x)
#x = tf.keras.layers.BatchNormalization()(x)
#x = PrimaryCaps(128, (18,11), 16, 8)(x)
x = tf.keras.layers.Conv2D(32,(4,3),2,activation="relu", padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l2(0.01),kernel_initializer='he_normal')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
print (x.shape)
x = tf.keras.layers.Conv2D(32,3,2, activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01), kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(64,3,2, activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01), kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(64,(3,2),activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01), kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
print (x.shape)
x = tf.keras.layers.Conv2D(128,(2,1),activation='relu', padding='valid', kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = PrimaryCaps(128,(14,10), 16, 8)(x)
digit_caps = FCCaps(2,16)(x)
digit_caps_len = Length(name='length_capsnet_output')(digit_caps)
return tf.keras.Model(inputs=inputs,outputs=[digit_caps, digit_caps_len], name='Efficient_CapsNet')
def generator_graph(input_shape):
"""
Generator graph architecture.
Parameters
----------
input_shape: list
network input shape
"""
inputs = tf.keras.Input(16*2)
#x = tf.keras.layers.Dense(198)(inputs)
#x = tf.keras.layers.Reshape(target_shape=(18,11,1))(x)
#x = tf.keras.layers.UpSampling2D(size=(3,2), interpolation='bilinear')(x) #54,22
#x = tf.keras.layers.Conv2D(16, (2,4), (2,1), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #27,19
#x = tf.keras.layers.UpSampling2D(size=(3,3), interpolation='bilinear')(x) #81,57
#x = tf.keras.layers.Conv2D(16, (3,4), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #79,55
#x = tf.keras.layers.UpSampling2D(size=(2,2), interpolation='bilinear')(x) #158,110
#x = tf.keras.layers.Conv2D(32, (3,4), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #156 105
#x = tf.keras.layers.Conv2D(32, (4,3), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #153 103
#x = tf.keras.layers.UpSampling2D(size=(2,2), interpolation='bilinear')(x) #306, 206
#x = tf.keras.layers.Conv2D(16, (4,4), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x)
#x = tf.keras.layers.Conv2D(1, (4,4), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.sigmoid)(x) #300,200
x = tf.keras.layers.Dense(140)(inputs)
x = tf.keras.layers.Reshape(target_shape=(14,10,1))(x)
x = tf.keras.layers.UpSampling2D(size=(2,2), interpolation='bilinear')(x) #28,20
x = tf.keras.layers.Conv2D(16, 3, kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #26,18
x = tf.keras.layers.UpSampling2D(size=(3,3), interpolation='bilinear')(x) #78,54
x = tf.keras.layers.Conv2D(16, 3, kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #76,52
x = tf.keras.layers.UpSampling2D(size=(2,2), interpolation='bilinear')(x) #152,104
x = tf.keras.layers.Conv2D(32, (3,5), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #150 100
#x = tf.keras.layers.Conv2D(32, (4,3), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x) #153 103
#x = tf.keras.layers.UpSampling2D(size=(2,2), interpolation='bilinear')(x) #306, 206
#x = tf.keras.layers.Conv2D(16, (4,4), kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.leaky_relu)(x)
x = tf.keras.layers.Conv2D(1, 1, kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l2(0.01),padding="valid", activation=tf.nn.sigmoid)(x) #150,100
return tf.keras.Model(inputs=inputs, outputs=x, name='Generator')
def build_graph(input_shape, mode, verbose):
"""
Efficient-CapsNet graph architecture with reconstruction regularizer. The network can be initialize with different modalities.
Parameters
----------
input_shape: list
network input shape
mode: str
working mode ('train', 'test' & 'play')
verbose: bool
"""
inputs = tf.keras.Input(input_shape)
y_true = tf.keras.layers.Input(shape=(2,))
noise = tf.keras.layers.Input(shape=(2, 16))
efficient_capsnet = efficient_capsnet_graph(input_shape)
if verbose:
efficient_capsnet.summary()
print("\n\n")
digit_caps, digit_caps_len = efficient_capsnet(inputs)
noised_digitcaps = tf.keras.layers.Add()([digit_caps, noise]) # only if mode is play
masked_by_y = Mask()([digit_caps, y_true])
masked = Mask()(digit_caps)
masked_noised_y = Mask()([noised_digitcaps, y_true])
generator = generator_graph(input_shape)
if verbose:
generator.summary()
print("\n\n")
x_gen_train = generator(masked_by_y)
x_gen_eval = generator(masked)
x_gen_play = generator(masked_noised_y)
if mode == 'train':
return tf.keras.models.Model([inputs, y_true], [digit_caps_len, x_gen_train], name='Efficinet_CapsNet_Generator')
elif mode == 'test':
return tf.keras.models.Model(inputs, [digit_caps_len, x_gen_eval], name='Efficinet_CapsNet_Generator')
elif mode == 'play':
return tf.keras.models.Model([inputs, y_true, noise], [digit_caps_len, x_gen_play], name='Efficinet_CapsNet_Generator')
else:
raise RuntimeError('mode not recognized')
|
[
"utils.layers.Mask",
"keras.regularizers.l2",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"utils.layers.Length",
"utils.layers.FCCaps",
"tensorflow.keras.Model",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Add",
"utils.layers.PrimaryCaps"
] |
[((332, 359), 'tensorflow.keras.Input', 'tf.keras.Input', (['input_shape'], {}), '(input_shape)\n', (346, 359), True, 'import tensorflow as tf\n'), ((3204, 3302), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': '[digit_caps, digit_caps_len]', 'name': '"""Efficient_CapsNet"""'}), "(inputs=inputs, outputs=[digit_caps, digit_caps_len], name=\n 'Efficient_CapsNet')\n", (3218, 3302), True, 'import tensorflow as tf\n'), ((3480, 3502), 'tensorflow.keras.Input', 'tf.keras.Input', (['(16 * 2)'], {}), '(16 * 2)\n', (3494, 3502), True, 'import tensorflow as tf\n'), ((7161, 7219), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x', 'name': '"""Generator"""'}), "(inputs=inputs, outputs=x, name='Generator')\n", (7175, 7219), True, 'import tensorflow as tf\n'), ((7591, 7618), 'tensorflow.keras.Input', 'tf.keras.Input', (['input_shape'], {}), '(input_shape)\n', (7605, 7618), True, 'import tensorflow as tf\n'), ((7632, 7665), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (7653, 7665), True, 'import tensorflow as tf\n'), ((7678, 7714), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2, 16)'}), '(shape=(2, 16))\n', (7699, 7714), True, 'import tensorflow as tf\n'), ((1820, 1856), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1854, 1856), True, 'import tensorflow as tf\n'), ((2122, 2158), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2156, 2158), True, 'import tensorflow as tf\n'), ((2404, 2440), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2438, 2440), True, 'import tensorflow as tf\n'), ((2687, 2723), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2721, 2723), True, 'import tensorflow as tf\n'), ((2991, 3027), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3025, 3027), True, 'import tensorflow as tf\n'), ((3044, 3077), 'utils.layers.PrimaryCaps', 'PrimaryCaps', (['(128)', '(14, 10)', '(16)', '(8)'], {}), '(128, (14, 10), 16, 8)\n', (3055, 3077), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((3101, 3114), 'utils.layers.FCCaps', 'FCCaps', (['(2)', '(16)'], {}), '(2, 16)\n', (3107, 3114), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((3143, 3179), 'utils.layers.Length', 'Length', ([], {'name': '"""length_capsnet_output"""'}), "(name='length_capsnet_output')\n", (3149, 3179), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((5350, 5376), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(140)'], {}), '(140)\n', (5371, 5376), True, 'import tensorflow as tf\n'), ((5393, 5442), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', ([], {'target_shape': '(14, 10, 1)'}), '(target_shape=(14, 10, 1))\n', (5416, 5442), True, 'import tensorflow as tf\n'), ((5452, 5519), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)', 'interpolation': '"""bilinear"""'}), "(size=(2, 2), interpolation='bilinear')\n", (5480, 5519), True, 'import tensorflow as tf\n'), ((5756, 5823), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(3, 3)', 'interpolation': '"""bilinear"""'}), "(size=(3, 3), interpolation='bilinear')\n", (5784, 5823), True, 'import tensorflow as tf\n'), ((6070, 6137), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)', 'interpolation': '"""bilinear"""'}), "(size=(2, 2), interpolation='bilinear')\n", (6098, 6137), True, 'import tensorflow as tf\n'), ((7939, 7960), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (7958, 7960), True, 'import tensorflow as tf\n'), ((8028, 8034), 'utils.layers.Mask', 'Mask', ([], {}), '()\n', (8032, 8034), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((8072, 8078), 'utils.layers.Mask', 'Mask', ([], {}), '()\n', (8076, 8078), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((8113, 8119), 'utils.layers.Mask', 'Mask', ([], {}), '()\n', (8117, 8119), False, 'from utils.layers import PrimaryCaps, FCCaps, Length, Mask\n'), ((8429, 8540), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['[inputs, y_true]', '[digit_caps_len, x_gen_train]'], {'name': '"""Efficinet_CapsNet_Generator"""'}), "([inputs, y_true], [digit_caps_len, x_gen_train], name\n ='Efficinet_CapsNet_Generator')\n", (8450, 8540), True, 'import tensorflow as tf\n'), ((8576, 8676), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['inputs', '[digit_caps_len, x_gen_eval]'], {'name': '"""Efficinet_CapsNet_Generator"""'}), "(inputs, [digit_caps_len, x_gen_eval], name=\n 'Efficinet_CapsNet_Generator')\n", (8597, 8676), True, 'import tensorflow as tf\n'), ((1667, 1688), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1682, 1688), False, 'from keras import regularizers\n'), ((1706, 1727), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1721, 1727), False, 'from keras import regularizers\n'), ((1750, 1771), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1765, 1771), False, 'from keras import regularizers\n'), ((1974, 1995), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1989, 1995), False, 'from keras import regularizers\n'), ((2013, 2034), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2028, 2034), False, 'from keras import regularizers\n'), ((2056, 2077), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2071, 2077), False, 'from keras import regularizers\n'), ((2256, 2277), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2271, 2277), False, 'from keras import regularizers\n'), ((2295, 2316), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2310, 2316), False, 'from keras import regularizers\n'), ((2338, 2359), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2353, 2359), False, 'from keras import regularizers\n'), ((2539, 2560), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2554, 2560), False, 'from keras import regularizers\n'), ((2578, 2599), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2593, 2599), False, 'from keras import regularizers\n'), ((2621, 2642), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2636, 2642), False, 'from keras import regularizers\n'), ((2843, 2864), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2858, 2864), False, 'from keras import regularizers\n'), ((2882, 2903), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2897, 2903), False, 'from keras import regularizers\n'), ((2925, 2946), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2940, 2946), False, 'from keras import regularizers\n'), ((5588, 5609), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5603, 5609), False, 'from keras import regularizers\n'), ((5627, 5648), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5642, 5648), False, 'from keras import regularizers\n'), ((5670, 5691), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5685, 5691), False, 'from keras import regularizers\n'), ((5897, 5918), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5912, 5918), False, 'from keras import regularizers\n'), ((5936, 5957), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5951, 5957), False, 'from keras import regularizers\n'), ((5979, 6000), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5994, 6000), False, 'from keras import regularizers\n'), ((6217, 6238), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (6232, 6238), False, 'from keras import regularizers\n'), ((6256, 6277), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (6271, 6277), False, 'from keras import regularizers\n'), ((6299, 6320), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (6314, 6320), False, 'from keras import regularizers\n'), ((6986, 7007), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (7001, 7007), False, 'from keras import regularizers\n'), ((7025, 7046), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (7040, 7046), False, 'from keras import regularizers\n'), ((7068, 7089), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (7083, 7089), False, 'from keras import regularizers\n'), ((8712, 8828), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['[inputs, y_true, noise]', '[digit_caps_len, x_gen_play]'], {'name': '"""Efficinet_CapsNet_Generator"""'}), "([inputs, y_true, noise], [digit_caps_len, x_gen_play],\n name='Efficinet_CapsNet_Generator')\n", (8733, 8828), True, 'import tensorflow as tf\n')]
|
import numpy as np
import servo2
import math
import time
savedir="Camera_Data/"
map_A = np.load(savedir+'servoA.npy')
map_B = np.load(savedir+'servoB.npy')
steps = len(map_A)
wait = 0.3
def vect_to_deg(x, y):
servo_A, servo_B = 0, 0
try:
A = list(map(lambda k: k >= y, map_A)).index(True)-1
if map_A[A] == y:
servo_A = A*180/(steps-1)
else:
servo_A = A*180/(steps-1) + (y-map_A[A])*(180/(steps-1))/(map_A[A+1]-map_A[A])
except:
servo_A = 180
y = map_A[steps-1]
try:
B = list(map(lambda k: k >= x, map_B)).index(True)-1
if map_B[B] == x:
servo_B = B*180/(steps-1)
else:
servo_B = B*180/(steps-1) + (x-map_B[B])*(180/(steps-1))/(map_B[B+1]-map_B[B])
except:
servo_B = 180
x = map_B[steps-1]
return servo_A, servo_B, x, y
servo2.home()
cX, cY = 0.0, 0.0
print("maps : ", map_A, map_B, "\n")
# Main
while(True):
y, x = [float(j) for j in input("Location y x :").split()]
a, b, x, y = vect_to_deg(x, y)
print("angle A: ", round(a,1), " angle B: ", round(b,1))
_ = servo2.A(a, wait)
_ = servo2.B(b, wait)
time.sleep(wait)
# Calculate Displacement
s = math.sqrt((cX-x)**2+(cY-y)**2)
cX, cY = x, y
print("Servo Set. Displacement : ", s)
|
[
"servo2.A",
"numpy.load",
"math.sqrt",
"time.sleep",
"servo2.B",
"servo2.home"
] |
[((90, 121), 'numpy.load', 'np.load', (["(savedir + 'servoA.npy')"], {}), "(savedir + 'servoA.npy')\n", (97, 121), True, 'import numpy as np\n'), ((128, 159), 'numpy.load', 'np.load', (["(savedir + 'servoB.npy')"], {}), "(savedir + 'servoB.npy')\n", (135, 159), True, 'import numpy as np\n'), ((772, 785), 'servo2.home', 'servo2.home', ([], {}), '()\n', (783, 785), False, 'import servo2\n'), ((1019, 1036), 'servo2.A', 'servo2.A', (['a', 'wait'], {}), '(a, wait)\n', (1027, 1036), False, 'import servo2\n'), ((1042, 1059), 'servo2.B', 'servo2.B', (['b', 'wait'], {}), '(b, wait)\n', (1050, 1059), False, 'import servo2\n'), ((1061, 1077), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (1071, 1077), False, 'import time\n'), ((1110, 1150), 'math.sqrt', 'math.sqrt', (['((cX - x) ** 2 + (cY - y) ** 2)'], {}), '((cX - x) ** 2 + (cY - y) ** 2)\n', (1119, 1150), False, 'import math\n')]
|
"""
Tests for the Woopra template tags and filters.
"""
from django.contrib.auth.models import User, AnonymousUser
from django.http import HttpRequest
from django.template import Context
from analytical.templatetags.woopra import WoopraNode
from analytical.tests.utils import TagTestCase, override_settings, \
SETTING_DELETED
from analytical.utils import AnalyticalException
@override_settings(WOOPRA_DOMAIN='example.com')
class WoopraTagTestCase(TagTestCase):
"""
Tests for the ``woopra`` template tag.
"""
def test_tag(self):
r = self.render_tag('woopra', 'woopra')
self.assertTrue('var woo_settings = {"domain": "example.com"};' in r, r)
def test_node(self):
r = WoopraNode().render(Context({}))
self.assertTrue('var woo_settings = {"domain": "example.com"};' in r, r)
@override_settings(WOOPRA_DOMAIN=SETTING_DELETED)
def test_no_domain(self):
self.assertRaises(AnalyticalException, WoopraNode)
@override_settings(WOOPRA_DOMAIN='this is not a domain')
def test_wrong_domain(self):
self.assertRaises(AnalyticalException, WoopraNode)
@override_settings(WOOPRA_IDLE_TIMEOUT=1234)
def test_idle_timeout(self):
r = WoopraNode().render(Context({}))
self.assertTrue('var woo_settings = {"domain": "example.com", '
'"idle_timeout": "1234"};' in r, r)
def test_custom(self):
r = WoopraNode().render(Context({'woopra_var1': 'val1',
'woopra_var2': 'val2'}))
self.assertTrue('var woo_visitor = {"var1": "val1", "var2": "val2"};'
in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_identify_name_and_email(self):
r = WoopraNode().render(Context({'user': User(username='test',
first_name='Firstname', last_name='Lastname',
email="<EMAIL>")}))
self.assertTrue('var woo_visitor = {"name": "Firstname Lastname", '
'"email": "<EMAIL>"};' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_identify_username_no_email(self):
r = WoopraNode().render(Context({'user': User(username='test')}))
self.assertTrue('var woo_visitor = {"name": "test"};' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_no_identify_when_explicit_name(self):
r = WoopraNode().render(Context({'woopra_name': 'explicit',
'user': User(username='implicit')}))
self.assertTrue('var woo_visitor = {"name": "explicit"};' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_no_identify_when_explicit_email(self):
r = WoopraNode().render(Context({'woopra_email': 'explicit',
'user': User(username='implicit')}))
self.assertTrue('var woo_visitor = {"email": "explicit"};' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_identify_anonymous_user(self):
r = WoopraNode().render(Context({'user': AnonymousUser()}))
self.assertTrue('var woo_visitor = {};' in r, r)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = WoopraNode().render(context)
self.assertTrue(r.startswith(
'<!-- Woopra disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
|
[
"django.contrib.auth.models.AnonymousUser",
"analytical.templatetags.woopra.WoopraNode",
"django.contrib.auth.models.User",
"django.template.Context",
"django.http.HttpRequest",
"analytical.tests.utils.override_settings"
] |
[((388, 434), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'WOOPRA_DOMAIN': '"""example.com"""'}), "(WOOPRA_DOMAIN='example.com')\n", (405, 434), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((844, 892), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'WOOPRA_DOMAIN': 'SETTING_DELETED'}), '(WOOPRA_DOMAIN=SETTING_DELETED)\n', (861, 892), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((988, 1043), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'WOOPRA_DOMAIN': '"""this is not a domain"""'}), "(WOOPRA_DOMAIN='this is not a domain')\n", (1005, 1043), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((1142, 1185), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'WOOPRA_IDLE_TIMEOUT': '(1234)'}), '(WOOPRA_IDLE_TIMEOUT=1234)\n', (1159, 1185), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((1630, 1678), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_AUTO_IDENTIFY': '(True)'}), '(ANALYTICAL_AUTO_IDENTIFY=True)\n', (1647, 1678), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((2022, 2070), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_AUTO_IDENTIFY': '(True)'}), '(ANALYTICAL_AUTO_IDENTIFY=True)\n', (2039, 2070), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((2269, 2317), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_AUTO_IDENTIFY': '(True)'}), '(ANALYTICAL_AUTO_IDENTIFY=True)\n', (2286, 2317), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((2571, 2619), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_AUTO_IDENTIFY': '(True)'}), '(ANALYTICAL_AUTO_IDENTIFY=True)\n', (2588, 2619), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((2876, 2924), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_AUTO_IDENTIFY': '(True)'}), '(ANALYTICAL_AUTO_IDENTIFY=True)\n', (2893, 2924), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((3100, 3154), 'analytical.tests.utils.override_settings', 'override_settings', ([], {'ANALYTICAL_INTERNAL_IPS': "['1.1.1.1']"}), "(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])\n", (3117, 3154), False, 'from analytical.tests.utils import TagTestCase, override_settings, SETTING_DELETED\n'), ((3208, 3221), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (3219, 3221), False, 'from django.http import HttpRequest\n'), ((3284, 3309), 'django.template.Context', 'Context', (["{'request': req}"], {}), "({'request': req})\n", (3291, 3309), False, 'from django.template import Context\n'), ((744, 755), 'django.template.Context', 'Context', (['{}'], {}), '({})\n', (751, 755), False, 'from django.template import Context\n'), ((1251, 1262), 'django.template.Context', 'Context', (['{}'], {}), '({})\n', (1258, 1262), False, 'from django.template import Context\n'), ((1448, 1503), 'django.template.Context', 'Context', (["{'woopra_var1': 'val1', 'woopra_var2': 'val2'}"], {}), "({'woopra_var1': 'val1', 'woopra_var2': 'val2'})\n", (1455, 1503), False, 'from django.template import Context\n'), ((724, 736), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (734, 736), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((1231, 1243), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (1241, 1243), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((1428, 1440), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (1438, 1440), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((1735, 1747), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (1745, 1747), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((2130, 2142), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (2140, 2142), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((2381, 2393), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (2391, 2393), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((2684, 2696), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (2694, 2696), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((2981, 2993), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (2991, 2993), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((3322, 3334), 'analytical.templatetags.woopra.WoopraNode', 'WoopraNode', ([], {}), '()\n', (3332, 3334), False, 'from analytical.templatetags.woopra import WoopraNode\n'), ((1772, 1861), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""test"""', 'first_name': '"""Firstname"""', 'last_name': '"""Lastname"""', 'email': '"""<EMAIL>"""'}), "(username='test', first_name='Firstname', last_name='Lastname', email=\n '<EMAIL>')\n", (1776, 1861), False, 'from django.contrib.auth.models import User, AnonymousUser\n'), ((2167, 2188), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""test"""'}), "(username='test')\n", (2171, 2188), False, 'from django.contrib.auth.models import User, AnonymousUser\n'), ((2461, 2486), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""implicit"""'}), "(username='implicit')\n", (2465, 2486), False, 'from django.contrib.auth.models import User, AnonymousUser\n'), ((2765, 2790), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""implicit"""'}), "(username='implicit')\n", (2769, 2790), False, 'from django.contrib.auth.models import User, AnonymousUser\n'), ((3018, 3033), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (3031, 3033), False, 'from django.contrib.auth.models import User, AnonymousUser\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""resize_nearest"""
import akg
import akg.tvm
from akg.tvm.hybrid import script
from akg.utils import validation_check as vc_util
from akg.utils.format_transform import get_shape
def downsampling(inputs, output_shape):
"""downsampling"""
scale_h, scale_w = [int(akg.tvm.truncdiv(inputs.shape[i], output_shape[i])) for i in range(1, 3)]
res = akg.tvm.compute(output_shape, lambda b, h, w, c: inputs[b, h * scale_h, w * scale_w, c], name="downsampling")
return res
def process_integer_scale(inputs, output_shape):
"""high performance version for integer scale"""
inputs_shape = [x.value for x in inputs.shape]
inputs_h, inputs_w = inputs_shape[1:3]
output_h, output_w = output_shape[1:3]
if inputs_h >= output_h and inputs_w >= output_w:
if inputs_h % output_h != 0 or inputs_w % output_w != 0:
return None
return downsampling(inputs, output_shape)
elif inputs_h <= output_h and inputs_w <= output_w:
if output_h % inputs_h != 0 or output_w % inputs_w != 0:
return None
from .upsampling import upsampling
return upsampling(inputs, output_shape)
else:
return None
def process_non_integer_scale(inputs, out_shape):
"""non integer scale"""
in_shape = [x.value for x in inputs.shape]
batch, height, width, channel = inputs.shape
scale_h = akg.tvm.const(1.0 * in_shape[1] / out_shape[1], "float16")
scale_w = akg.tvm.const(1.0 * in_shape[2] / out_shape[2], "float16")
index_h_fp = akg.tvm.compute([out_shape[1]], lambda i: i * scale_h, name="index_h_fp")
index_w_fp = akg.tvm.compute([out_shape[2]], lambda i: i * scale_w, name="index_w_fp")
index_h = akg.lang.cce.floor(index_h_fp)
index_w = akg.lang.cce.floor(index_w_fp)
@script
def resize(inputs, index_h, index_w, newH, newW):
out = output_tensor((batch, newH, newW, channel), inputs.dtype)
for i in range(batch):
for j in range(height):
for k in range(width):
for l in range(channel):
for m in range(newH):
for n in range(newW):
if index_h[m] == j:
if index_w[n] == k:
out[i, m, n, l] = inputs[i, j, k, l]
return out
newH = akg.tvm.const(out_shape[1], "int32")
newW = akg.tvm.const(out_shape[2], "int32")
res = resize(inputs, index_h, index_w, newH, newW)
return res
@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple))
def resize_nearest(input, output_shape):
"""
Resize images using Nearest-neighbor interpolation.
Args:
input (tvm.tensor.Tensor): 4-D tensor of type float16 or float32 `("NHWC")`.
output_shape (Union[tuple, list]): New size of image 4 integers `("NHWC")`.
Note:
The batch_num("N") of input and output must be equal, channel_num("C") is also.
Returns:
tvm.tensor.Tensor, has the same type as `input`.
"""
input_shape = get_shape(input)
vc_util.check_shape(input, 4, "input")
vc_util.check_shape(output_shape, 4, "output_shape")
vc_util.ops_dtype_check(input.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
vc_util.check_equal("input batchsize", "output batchsize", input_shape[0], output_shape[0])
vc_util.check_equal("input channel num", "output channel num", input_shape[3], output_shape[3])
res = process_integer_scale(input, output_shape)
if res == None:
res = process_non_integer_scale(input, output_shape)
return res
|
[
"akg.utils.validation_check.check_input_type",
"akg.utils.validation_check.ops_dtype_check",
"akg.tvm.truncdiv",
"akg.tvm.const",
"akg.tvm.compute",
"akg.utils.format_transform.get_shape",
"akg.utils.validation_check.check_shape",
"akg.lang.cce.floor",
"akg.utils.validation_check.check_equal"
] |
[((3124, 3186), 'akg.utils.validation_check.check_input_type', 'vc_util.check_input_type', (['akg.tvm.tensor.Tensor', '(list, tuple)'], {}), '(akg.tvm.tensor.Tensor, (list, tuple))\n', (3148, 3186), True, 'from akg.utils import validation_check as vc_util\n'), ((945, 1058), 'akg.tvm.compute', 'akg.tvm.compute', (['output_shape', '(lambda b, h, w, c: inputs[b, h * scale_h, w * scale_w, c])'], {'name': '"""downsampling"""'}), "(output_shape, lambda b, h, w, c: inputs[b, h * scale_h, w *\n scale_w, c], name='downsampling')\n", (960, 1058), False, 'import akg\n'), ((1961, 2019), 'akg.tvm.const', 'akg.tvm.const', (['(1.0 * in_shape[1] / out_shape[1])', '"""float16"""'], {}), "(1.0 * in_shape[1] / out_shape[1], 'float16')\n", (1974, 2019), False, 'import akg\n'), ((2034, 2092), 'akg.tvm.const', 'akg.tvm.const', (['(1.0 * in_shape[2] / out_shape[2])', '"""float16"""'], {}), "(1.0 * in_shape[2] / out_shape[2], 'float16')\n", (2047, 2092), False, 'import akg\n'), ((2110, 2183), 'akg.tvm.compute', 'akg.tvm.compute', (['[out_shape[1]]', '(lambda i: i * scale_h)'], {'name': '"""index_h_fp"""'}), "([out_shape[1]], lambda i: i * scale_h, name='index_h_fp')\n", (2125, 2183), False, 'import akg\n'), ((2201, 2274), 'akg.tvm.compute', 'akg.tvm.compute', (['[out_shape[2]]', '(lambda i: i * scale_w)'], {'name': '"""index_w_fp"""'}), "([out_shape[2]], lambda i: i * scale_w, name='index_w_fp')\n", (2216, 2274), False, 'import akg\n'), ((2289, 2319), 'akg.lang.cce.floor', 'akg.lang.cce.floor', (['index_h_fp'], {}), '(index_h_fp)\n', (2307, 2319), False, 'import akg\n'), ((2334, 2364), 'akg.lang.cce.floor', 'akg.lang.cce.floor', (['index_w_fp'], {}), '(index_w_fp)\n', (2352, 2364), False, 'import akg\n'), ((2967, 3003), 'akg.tvm.const', 'akg.tvm.const', (['out_shape[1]', '"""int32"""'], {}), "(out_shape[1], 'int32')\n", (2980, 3003), False, 'import akg\n'), ((3015, 3051), 'akg.tvm.const', 'akg.tvm.const', (['out_shape[2]', '"""int32"""'], {}), "(out_shape[2], 'int32')\n", (3028, 3051), False, 'import akg\n'), ((3680, 3696), 'akg.utils.format_transform.get_shape', 'get_shape', (['input'], {}), '(input)\n', (3689, 3696), False, 'from akg.utils.format_transform import get_shape\n'), ((3701, 3739), 'akg.utils.validation_check.check_shape', 'vc_util.check_shape', (['input', '(4)', '"""input"""'], {}), "(input, 4, 'input')\n", (3720, 3739), True, 'from akg.utils import validation_check as vc_util\n'), ((3744, 3796), 'akg.utils.validation_check.check_shape', 'vc_util.check_shape', (['output_shape', '(4)', '"""output_shape"""'], {}), "(output_shape, 4, 'output_shape')\n", (3763, 3796), True, 'from akg.utils import validation_check as vc_util\n'), ((3801, 3872), 'akg.utils.validation_check.ops_dtype_check', 'vc_util.ops_dtype_check', (['input.dtype', 'vc_util.DtypeForDavinci.ALL_FLOAT'], {}), '(input.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)\n', (3824, 3872), True, 'from akg.utils import validation_check as vc_util\n'), ((3877, 3972), 'akg.utils.validation_check.check_equal', 'vc_util.check_equal', (['"""input batchsize"""', '"""output batchsize"""', 'input_shape[0]', 'output_shape[0]'], {}), "('input batchsize', 'output batchsize', input_shape[0],\n output_shape[0])\n", (3896, 3972), True, 'from akg.utils import validation_check as vc_util\n'), ((3973, 4073), 'akg.utils.validation_check.check_equal', 'vc_util.check_equal', (['"""input channel num"""', '"""output channel num"""', 'input_shape[3]', 'output_shape[3]'], {}), "('input channel num', 'output channel num', input_shape[\n 3], output_shape[3])\n", (3992, 4073), True, 'from akg.utils import validation_check as vc_util\n'), ((861, 911), 'akg.tvm.truncdiv', 'akg.tvm.truncdiv', (['inputs.shape[i]', 'output_shape[i]'], {}), '(inputs.shape[i], output_shape[i])\n', (877, 911), False, 'import akg\n')]
|
from django.conf import settings
from django.db import models
from django.db.models import Max
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.urls import reverse
from factory.django import mute_signals
from bibliography.models import Source
from brit.models import NamedUserObjectModel
from distributions.models import Timestep, TemporalDistribution
from users.models import get_default_owner
class MaterialCategory(NamedUserObjectModel):
pass
class BaseMaterial(NamedUserObjectModel):
"""
Base for all specialized models of material
"""
type = models.CharField(max_length=127, default='material')
categories = models.ManyToManyField(MaterialCategory, blank=True)
class Meta:
verbose_name = 'Material'
unique_together = [['name', 'owner']]
class MaterialManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(type='material')
class Material(BaseMaterial):
"""
Generic material class for many purposes. E.g. this is used as top level definition to link semantic definition of
materials with analysis data.
"""
class Meta:
proxy = True
class MaterialComponentManager(models.Manager):
def default(self):
return self.get_queryset().get(name='Fresh Matter (FM)', owner=get_default_owner())
def other(self):
return self.get_queryset().get(name='Other', owner=get_default_owner())
class MaterialComponent(BaseMaterial):
"""
Component class of a material for which a weight fraction can be assigned but which cannot itself be defined as a
material (e.g. total solids, volatile solids, etc.)
"""
objects = MaterialComponentManager()
class Meta:
proxy = True
verbose_name = 'component'
@receiver(post_save, sender=MaterialComponent)
def add_type_component(sender, instance, created, **kwargs):
if created:
instance.type = 'component'
instance.save()
def get_default_component():
return MaterialComponent.objects.get_or_create(
owner=get_default_owner(),
name=getattr(settings, 'DEFAULT_MATERIALCOMPONENT_NAME', 'Fresh Matter (FM)')
)[0]
def get_default_component_pk():
return MaterialComponent.objects.get_or_create(
owner=get_default_owner(),
name=getattr(settings, 'DEFAULT_MATERIALCOMPONENT_NAME', 'Fresh Matter (FM)')
)[0].pk
class MaterialComponentGroupManager(models.Manager):
def default(self):
return self.get_queryset().get(name='Total Material', owner=get_default_owner())
class MaterialComponentGroup(NamedUserObjectModel):
"""
Definition of a group of components that belong together to form a composition which can be described with
weight fractions. E.g. Macro component, chemical elements, etc. The actual composition is described in its own
model: Composition. This is a container that allows to identify comparable compositions.
"""
objects = MaterialComponentGroupManager()
class Meta:
unique_together = [['name', 'owner']]
def get_default_group():
return MaterialComponentGroup.objects.get_or_create(
owner=get_default_owner(),
name=getattr(settings, 'DEFAULT_MATERIALCOMPONENTGROUP_NAME', 'Total Material')
)[0]
class SampleSeries(NamedUserObjectModel):
"""
Sample series are used to add concrete experimental data to the abstract semantic definition of materials. A sample
series consists of several samples that are taken from a comparable source at different times. That way a temporal
distribution of material properties and compositions over time can be described.
"""
material = models.ForeignKey(Material, on_delete=models.PROTECT)
preview = models.ImageField(default='materials/img/generic_material.jpg', null=False)
publish = models.BooleanField(default=False)
standard = models.BooleanField(default=True)
temporal_distributions = models.ManyToManyField(TemporalDistribution)
def add_component_group(self, group, fractions_of=None):
"""Adds compositions of a component group to all samples of this sample series."""
if not fractions_of:
fractions_of = MaterialComponent.objects.default()
for sample in self.samples.all():
Composition.objects.create(
owner=self.owner,
group=group,
sample=sample,
fractions_of=fractions_of
)
def remove_component_group(self, group):
"""Removes all compositions of a component group from all samples of this sample series."""
for sample in self.samples.all():
Composition.objects.filter(sample=sample, group=group).delete()
def add_component(self, component, group):
"""Creates WeightShare objects for a new component for all samples of a SampleSeries at once."""
for sample in self.samples.all():
for composition in sample.compositions.filter(group=group):
composition.add_component(component)
def remove_component(self, component, group):
"""Removes all WeightShare objects of a given component and component group"""
for sample in self.samples.all():
for composition in sample.compositions.filter(group=group):
composition.remove_component(component)
def add_temporal_distribution(self, distribution):
"""
Adds the temporal distribution to the m2m field and also creates shares for all timesteps of the distribution
for all components of this group.
"""
# In case this method is called manually and not by m2m_changed
if distribution not in self.temporal_distributions.all():
self.temporal_distributions.add(distribution)
# Use average and standard deviation of component averages as default values for all timesteps
for timestep in distribution.timestep_set.all():
Sample.objects.create(owner=self.owner, series=self, timestep=timestep)
def remove_temporal_distribution(self, distribution):
"""
Removes the temporal distribution from the m2m field and also cleans up all related composition sets and shares.
"""
if distribution in self.temporal_distributions.all():
for timestep in distribution.timestep_set.all():
self.samples.filter(timestep=timestep).delete()
self.temporal_distributions.remove(distribution)
@property
def components(self):
"""
Queryset of all components that have been assigned to this group.
"""
return MaterialComponent.objects.filter(id__in=[share['component'] for share in
WeightShare.objects.filter(
composition__sample__series=self).values(
'component').distinct()])
@property
def component_groups(self):
return MaterialComponentGroup.objects.filter(
id__in=[composition['group'] for composition in
Composition.objects.filter(
sample__series=self
).exclude(
id=MaterialComponentGroup.objects.default().id
).values('group').distinct()]
)
@property
def group_ids(self):
"""
Ids of component groups that have been assigned to this material.
"""
return [setting['group'] for setting in
Composition.objects.filter(sample__series=self).values('group').distinct()]
@property
def blocked_ids(self):
"""
Returns a list of group ids that cannot be added to the material because they are already assigned.
"""
return self.group_ids
@property
def shares(self):
return WeightShare.objects.filter(composition__sample__series=self)
def duplicate(self, creator, **kwargs):
with mute_signals(post_save):
duplicate = SampleSeries.objects.create(
owner=creator,
name=kwargs.get('name', self.name),
material=kwargs.get('material', self.material)
)
for sample in self.samples.all():
sample_duplicate = sample.duplicate(creator)
sample_duplicate.series = duplicate
sample_duplicate.save()
duplicate.temporal_distributions.set(self.temporal_distributions.all())
return duplicate
@property
def full_name(self):
return f'{self.material.name} {self.name}'
@property
def group_settings(self):
return Composition.objects.filter(
sample__series=self
).exclude(
group=MaterialComponentGroup.objects.default()
)
@receiver(post_save, sender=SampleSeries)
def add_default_temporal_distribution(sender, instance, created, **kwargs):
if created:
instance.add_temporal_distribution(TemporalDistribution.objects.default())
class MaterialProperty(NamedUserObjectModel):
unit = models.CharField(max_length=63)
def __str__(self):
return f'{self.name} [{self.unit}]'
class MaterialPropertyValue(NamedUserObjectModel):
property = models.ForeignKey(MaterialProperty, on_delete=models.PROTECT)
average = models.FloatField()
standard_deviation = models.FloatField()
def duplicate(self, creator):
with mute_signals(post_save):
duplicate = MaterialPropertyValue.objects.create(
owner=creator,
property=self.property,
average=self.average,
standard_deviation=self.standard_deviation,
)
return duplicate
class Sample(NamedUserObjectModel):
"""
Representation of a single sample that was taken at a specific location and time. Equivalent samples are associated
with a SampleSeries to temporal distribution of properties and composition.
"""
series = models.ForeignKey(SampleSeries, related_name='samples', on_delete=models.CASCADE)
timestep = models.ForeignKey(Timestep, related_name='samples', on_delete=models.PROTECT, null=True)
taken_at = models.DateTimeField(blank=True, null=True)
preview = models.ImageField(blank=True, null=True)
properties = models.ManyToManyField(MaterialPropertyValue)
sources = models.ManyToManyField(Source)
def duplicate(self, creator, **kwargs):
with mute_signals(post_save):
duplicate = Sample.objects.create(
owner=creator,
series=kwargs.get('series', self.series),
timestep=kwargs.get('timestep', self.timestep),
taken_at=kwargs.get('taken_at', self.taken_at),
)
for composition in self.compositions.all():
duplicate_composition = composition.duplicate(creator)
duplicate_composition.sample = duplicate
duplicate_composition.save()
for prop in self.properties.all():
duplicate.properties.add(prop.duplicate(creator))
return duplicate
@receiver(post_save, sender=Sample)
def add_default_composition(sender, instance, created, **kwargs):
if created:
composition = Composition.objects.create(
owner=instance.owner,
group=get_default_group(),
sample=instance,
fractions_of=get_default_component(),
)
composition.add_component(MaterialComponent.objects.default())
class Composition(NamedUserObjectModel):
"""
Utility model to store the settings for component groups for each material in each customization. This model is not
supposed to be edited directly by a user. It depends on user objects and must be deleted, when any of the user
objects it depends on is deleted.
"""
group = models.ForeignKey(MaterialComponentGroup, related_name='compositions', on_delete=models.PROTECT)
sample = models.ForeignKey(Sample, related_name='compositions', on_delete=models.CASCADE)
fractions_of = models.ForeignKey(MaterialComponent, on_delete=models.PROTECT, default=get_default_component_pk)
order = models.IntegerField(default=90)
class Meta:
ordering = ['order']
@property
def material(self):
return self.sample.series.material
@property
def timestep(self):
return self.sample.timestep
@property
def component_ids(self):
"""
Ids of all material components that have been assigned to this group.
"""
return [share['component'] for share in self.shares.values('component').distinct()]
def components(self):
"""
Queryset of all components that have been assigned to this group.
"""
return MaterialComponent.objects.filter(id__in=self.component_ids)
@property
def blocked_component_ids(self):
"""
Returns a list of ids that cannot be added to the group because they are either already assigned to the group
or would create a circular reference.
"""
ids = self.component_ids
ids.append(self.fractions_of.id)
ids.append(self.material.id)
return ids
@property
def blocked_distribution_ids(self):
return [dist.id for dist in self.sample.series.temporal_distributions.all()]
def add_component(self, component, **kwargs):
"""
Convenience method to create a correct WeightShare object with correct for this model.
"""
return WeightShare.objects.create(
owner=self.owner,
component=component,
composition=self,
average=kwargs.setdefault('average', 0.0),
standard_deviation=kwargs.setdefault('standard_deviation', 0.0),
)
def remove_component(self, component):
"""
Removes the component from all compositions in which it appears.
"""
self.shares.filter(component=component).delete()
def add_temporal_distribution(self, distribution):
"""
Adds the temporal distribution to the m2m field and also creates shares for all timesteps of the distribution
for all components of this group.
"""
self.sample.series.add_temporal_distribution(distribution)
def remove_temporal_distribution(self, distribution):
"""
Removes the temporal distribution from the m2m field and also cleans up all related composition sets and shares.
"""
self.sample.series.remove_temporal_distribution(distribution)
def order_up(self):
current_order = self.order
next_composition = self.sample.compositions.filter(order__gt=self.order).order_by('order').first()
if next_composition:
self.order = next_composition.order
next_composition.order = current_order
next_composition.save()
self.save()
def order_down(self):
current_order = self.order
previous_composition = self.sample.compositions.filter(order__lt=self.order).order_by('-order').first()
if previous_composition:
self.order = previous_composition.order
previous_composition.order = current_order
previous_composition.save()
self.save()
def duplicate(self, creator):
with mute_signals(post_save):
duplicate = Composition.objects.create(
owner=creator,
group=self.group,
sample=self.sample,
fractions_of=self.fractions_of,
order=self.order
)
for share in self.shares.all():
duplicate_share = share.duplicate(creator)
duplicate_share.composition = duplicate
duplicate_share.save()
return duplicate
def get_absolute_url(self):
return self.sample.get_absolute_url()
def __str__(self):
return f'Composition of {self.group.name} of sample {self.sample.name}'
@receiver(post_save, sender=Composition)
def add_next_order_value(sender, instance, created, **kwargs):
if created:
compositions = Composition.objects.filter(sample=instance.sample)
instance.order = compositions.aggregate(Max('order'))['order__max'] + 10
instance.save()
class WeightShare(NamedUserObjectModel):
"""
Holds the actual values of weight fractions that are part of any material composition. This model is not edited
directly to maintain consistency within compositions. Use API of Composition instead.
"""
component = models.ForeignKey(MaterialComponent, related_name='shares', on_delete=models.CASCADE)
composition = models.ForeignKey(Composition, related_name='shares', on_delete=models.CASCADE)
average = models.FloatField(default=0.0)
standard_deviation = models.FloatField(default=0.0)
class Meta:
ordering = ['-average']
@property
def as_percentage(self):
return f'{round(self.average * 100, 1)} ± {round(self.standard_deviation * 100, 1)}%'
@property
def material(self):
return self.composition.sample.series.material
@property
def material_settings(self):
return self.composition.sample.series
@property
def group(self):
return self.composition.group
@property
def group_settings(self):
return self.composition
@property
def timestep(self):
return self.composition.sample.timestep
def get_absolute_url(self):
return reverse('sampleseries-detail', kwargs={'pk': self.composition.sample.series.id})
def duplicate(self, creator):
duplicate = WeightShare.objects.create(
owner=creator,
component=self.component,
composition=self.composition,
average=self.average,
standard_deviation=self.standard_deviation)
return duplicate
def __str__(self):
return f'Component share of material: {self.material.name}, component: {self.component.name}'
|
[
"django.db.models.Max",
"distributions.models.TemporalDistribution.objects.default",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.dispatch.receiver",
"factory.django.mute_signals",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"users.models.get_default_owner",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.urls.reverse",
"django.db.models.DateTimeField"
] |
[((1825, 1870), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'MaterialComponent'}), '(post_save, sender=MaterialComponent)\n', (1833, 1870), False, 'from django.dispatch import receiver\n'), ((8958, 8998), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'SampleSeries'}), '(post_save, sender=SampleSeries)\n', (8966, 8998), False, 'from django.dispatch import receiver\n'), ((11269, 11303), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Sample'}), '(post_save, sender=Sample)\n', (11277, 11303), False, 'from django.dispatch import receiver\n'), ((16190, 16229), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Composition'}), '(post_save, sender=Composition)\n', (16198, 16229), False, 'from django.dispatch import receiver\n'), ((618, 670), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(127)', 'default': '"""material"""'}), "(max_length=127, default='material')\n", (634, 670), False, 'from django.db import models\n'), ((688, 740), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['MaterialCategory'], {'blank': '(True)'}), '(MaterialCategory, blank=True)\n', (710, 740), False, 'from django.db import models\n'), ((3721, 3774), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Material'], {'on_delete': 'models.PROTECT'}), '(Material, on_delete=models.PROTECT)\n', (3738, 3774), False, 'from django.db import models\n'), ((3789, 3864), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""materials/img/generic_material.jpg"""', 'null': '(False)'}), "(default='materials/img/generic_material.jpg', null=False)\n", (3806, 3864), False, 'from django.db import models\n'), ((3879, 3913), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3898, 3913), False, 'from django.db import models\n'), ((3929, 3962), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3948, 3962), False, 'from django.db import models\n'), ((3992, 4036), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['TemporalDistribution'], {}), '(TemporalDistribution)\n', (4014, 4036), False, 'from django.db import models\n'), ((9233, 9264), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(63)'}), '(max_length=63)\n', (9249, 9264), False, 'from django.db import models\n'), ((9401, 9462), 'django.db.models.ForeignKey', 'models.ForeignKey', (['MaterialProperty'], {'on_delete': 'models.PROTECT'}), '(MaterialProperty, on_delete=models.PROTECT)\n', (9418, 9462), False, 'from django.db import models\n'), ((9477, 9496), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9494, 9496), False, 'from django.db import models\n'), ((9522, 9541), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9539, 9541), False, 'from django.db import models\n'), ((10152, 10238), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SampleSeries'], {'related_name': '"""samples"""', 'on_delete': 'models.CASCADE'}), "(SampleSeries, related_name='samples', on_delete=models.\n CASCADE)\n", (10169, 10238), False, 'from django.db import models\n'), ((10249, 10342), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Timestep'], {'related_name': '"""samples"""', 'on_delete': 'models.PROTECT', 'null': '(True)'}), "(Timestep, related_name='samples', on_delete=models.\n PROTECT, null=True)\n", (10266, 10342), False, 'from django.db import models\n'), ((10353, 10396), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10373, 10396), False, 'from django.db import models\n'), ((10411, 10451), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10428, 10451), False, 'from django.db import models\n'), ((10469, 10514), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['MaterialPropertyValue'], {}), '(MaterialPropertyValue)\n', (10491, 10514), False, 'from django.db import models\n'), ((10529, 10559), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Source'], {}), '(Source)\n', (10551, 10559), False, 'from django.db import models\n'), ((12013, 12113), 'django.db.models.ForeignKey', 'models.ForeignKey', (['MaterialComponentGroup'], {'related_name': '"""compositions"""', 'on_delete': 'models.PROTECT'}), "(MaterialComponentGroup, related_name='compositions',\n on_delete=models.PROTECT)\n", (12030, 12113), False, 'from django.db import models\n'), ((12123, 12208), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Sample'], {'related_name': '"""compositions"""', 'on_delete': 'models.CASCADE'}), "(Sample, related_name='compositions', on_delete=models.CASCADE\n )\n", (12140, 12208), False, 'from django.db import models\n'), ((12223, 12324), 'django.db.models.ForeignKey', 'models.ForeignKey', (['MaterialComponent'], {'on_delete': 'models.PROTECT', 'default': 'get_default_component_pk'}), '(MaterialComponent, on_delete=models.PROTECT, default=\n get_default_component_pk)\n', (12240, 12324), False, 'from django.db import models\n'), ((12332, 12363), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(90)'}), '(default=90)\n', (12351, 12363), False, 'from django.db import models\n'), ((16769, 16859), 'django.db.models.ForeignKey', 'models.ForeignKey', (['MaterialComponent'], {'related_name': '"""shares"""', 'on_delete': 'models.CASCADE'}), "(MaterialComponent, related_name='shares', on_delete=\n models.CASCADE)\n", (16786, 16859), False, 'from django.db import models\n'), ((16873, 16952), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Composition'], {'related_name': '"""shares"""', 'on_delete': 'models.CASCADE'}), "(Composition, related_name='shares', on_delete=models.CASCADE)\n", (16890, 16952), False, 'from django.db import models\n'), ((16967, 16997), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (16984, 16997), False, 'from django.db import models\n'), ((17023, 17053), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (17040, 17053), False, 'from django.db import models\n'), ((17715, 17800), 'django.urls.reverse', 'reverse', (['"""sampleseries-detail"""'], {'kwargs': "{'pk': self.composition.sample.series.id}"}), "('sampleseries-detail', kwargs={'pk': self.composition.sample.series.id}\n )\n", (17722, 17800), False, 'from django.urls import reverse\n'), ((8107, 8130), 'factory.django.mute_signals', 'mute_signals', (['post_save'], {}), '(post_save)\n', (8119, 8130), False, 'from factory.django import mute_signals\n'), ((9134, 9172), 'distributions.models.TemporalDistribution.objects.default', 'TemporalDistribution.objects.default', ([], {}), '()\n', (9170, 9172), False, 'from distributions.models import Timestep, TemporalDistribution\n'), ((9590, 9613), 'factory.django.mute_signals', 'mute_signals', (['post_save'], {}), '(post_save)\n', (9602, 9613), False, 'from factory.django import mute_signals\n'), ((10618, 10641), 'factory.django.mute_signals', 'mute_signals', (['post_save'], {}), '(post_save)\n', (10630, 10641), False, 'from factory.django import mute_signals\n'), ((15523, 15546), 'factory.django.mute_signals', 'mute_signals', (['post_save'], {}), '(post_save)\n', (15535, 15546), False, 'from factory.django import mute_signals\n'), ((1353, 1372), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (1370, 1372), False, 'from users.models import get_default_owner\n'), ((1455, 1474), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (1472, 1474), False, 'from users.models import get_default_owner\n'), ((2105, 2124), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (2122, 2124), False, 'from users.models import get_default_owner\n'), ((2586, 2605), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (2603, 2605), False, 'from users.models import get_default_owner\n'), ((3204, 3223), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (3221, 3223), False, 'from users.models import get_default_owner\n'), ((2321, 2340), 'users.models.get_default_owner', 'get_default_owner', ([], {}), '()\n', (2338, 2340), False, 'from users.models import get_default_owner\n'), ((16431, 16443), 'django.db.models.Max', 'Max', (['"""order"""'], {}), "('order')\n", (16434, 16443), False, 'from django.db.models import Max\n')]
|
# Generated by Django 3.1.7 on 2021-04-10 15:11
from django.db import migrations, models
import phone_field.models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_team_manager',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='is_writer',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='mpesa_no',
field=phone_field.models.PhoneField(blank=True, help_text='Mpesa phone number', max_length=31),
),
migrations.AddField(
model_name='user',
name='phone',
field=phone_field.models.PhoneField(blank=True, help_text='Contact phone number', max_length=31),
),
]
|
[
"django.db.models.BooleanField"
] |
[((363, 397), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (382, 397), False, 'from django.db import migrations, models\n'), ((518, 552), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (537, 552), False, 'from django.db import migrations, models\n')]
|
import turtle
WIDTH = 50
HEIGHT = 100
SIZE = 5
GAP = 15
BEGIN = -280
def goto(x):
t.setposition(x, 0)
def dot(start, color):
goto(start)
t.pencolor(color)
t.down()
for _ in range(4):
t.forward(SIZE)
t.left(90)
t.up()
def zero(start, color):
goto(start)
t.pencolor(color)
t.down()
for _ in range(2):
t.forward(WIDTH)
t.left(90)
t.forward(HEIGHT)
t.left(90)
t.up()
def one(start, color):
goto(start)
t.pencolor(color)
t.forward(WIDTH)
t.down()
t.left(90)
t.forward(HEIGHT)
t.up()
t.right(90)
def two(start, color):
goto(start)
t.pencolor(color)
t.left(90)
t.forward(HEIGHT)
t.down()
t.right(90)
t.forward(WIDTH)
t.right(90)
t.forward(HEIGHT/2)
t.right(90)
t.forward(WIDTH)
t.left(90)
t.forward(HEIGHT / 2)
t.left(90)
t.forward(WIDTH)
t.up()
def seven(start, color):
goto(start)
t.pencolor(color)
t.forward(WIDTH)
t.down()
t.left(90)
t.forward(HEIGHT)
t.left(90)
t.forward(WIDTH)
t.up()
t.left(90)
t.forward(HEIGHT/2)
t.left(90)
t.forward(WIDTH/2)
t.pendown()
t.forward(WIDTH)
t.up()
def eight(start, color):
goto(start)
t.pencolor(color)
t.down()
for _ in range(2):
t.forward(WIDTH)
t.left(90)
t.forward(HEIGHT)
t.left(90)
t.left(90)
t.forward(HEIGHT / 2)
t.right(90)
t.forward(WIDTH)
t.up()
t = turtle.Pen()
t.width(SIZE)
t.speed(3)
t.up()
goto(BEGIN)
# 0
s = BEGIN
zero(s, '#42ddf5')
# 8
s = s + WIDTH + GAP
eight(s, '#716be8')
# .
s = s + WIDTH + GAP
dot(s, '#42ddf5')
# 0
s = s + GAP
zero(s, '#42ddf5')
# 7
s = s + WIDTH + GAP
seven(s, '#f5b342')
# .
s = s + WIDTH + GAP
dot(s, '#42ddf5')
# 2
s = s + GAP
two(s, '#42f55d')
# 0
s = s + WIDTH + GAP
zero(s, '#42ddf5')
# 1
s = s + WIDTH + GAP
one(s, '#f542dd')
# 0
s = s + WIDTH + GAP
zero(s, '#42ddf5')
t.forward(WIDTH + GAP)
turtle.Screen().exitonclick()
|
[
"turtle.Screen",
"turtle.Pen"
] |
[((1531, 1543), 'turtle.Pen', 'turtle.Pen', ([], {}), '()\n', (1541, 1543), False, 'import turtle\n'), ((2024, 2039), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (2037, 2039), False, 'import turtle\n')]
|
import os
notes = []
for file in os.listdir('.'):
if file.endswith(".py"):
continue
filename_w_ext = os.path.basename(file).lower()
filename, file_extension = os.path.splitext(filename_w_ext)
notes.append(filename.split("_"))
sortedNotes = sorted(notes, key=lambda note:int(note[2]))
for note in range(len(sortedNotes)):
sortedNotes[note] = "_".join(sortedNotes[note])
print(sortedNotes)
|
[
"os.path.splitext",
"os.listdir",
"os.path.basename"
] |
[((35, 50), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (45, 50), False, 'import os\n'), ((182, 214), 'os.path.splitext', 'os.path.splitext', (['filename_w_ext'], {}), '(filename_w_ext)\n', (198, 214), False, 'import os\n'), ((120, 142), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (136, 142), False, 'import os\n')]
|
"""
Daemonize a process.
Based on: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
Taken from: https://github.com/WIPACrepo/iceprod/blob/master/iceprod/server/daemon.py
"""
import sys
import os
import time
import atexit
import signal
from builtins import str
class Daemon(object):
"""
A generic daemon class.
Usage:
d=Daemon(pidfile - filename for pidfile (required)
runner - function to execute in daemon (required)
stdin - input filename (default is /dev/null)
stdout - output filename (default is /dev/null)
stderr - error filename (default is /dev/null)
chdir - working directory (default is /)
umask - umask of new files (default is 0)
)
d.start()
d.stop()
d.kill()
"""
def __init__(self, pidfile, runner,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null',
chdir='/',
umask=0):
if not isinstance(pidfile, str):
raise Exception('pidfile is not a string')
if not callable(runner):
raise Exception('runner is not callable')
self.pidfile = pidfile
self.runner = runner
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.chdir = chdir
self.umask = umask
def _daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir(self.chdir)
os.setsid()
os.umask(self.umask)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'rb')
so = open(self.stdout, 'ab+')
se = open(self.stderr, 'ab+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
pgrp = str(os.getpgrp())
open(self.pidfile,'w+').write("%s %s\n" % (pid,pgrp))
def _sendsignal(self,pid,sig,waitfordeath=True):
"""Send the specified signal to the process"""
try:
os.kill(pid, sig)
if waitfordeath:
for _ in range(10):
time.sleep(1)
os.kill(pid, sig)
return False
except OSError as err:
err = str(err)
if 'No such process' in err:
self.delpid()
else:
sys.stdout.write("OSError: %s\n" % err)
sys.exit(1)
return True
def _sendsignalgrp(self,pid,sig,waitfordeath=True):
"""Send the specified signal to the process group"""
try:
os.killpg(pid, sig)
if waitfordeath:
for _ in range(10):
time.sleep(1)
os.killpg(pid, sig)
return False
except OSError as err:
err = str(err)
if 'No such process' in err:
self.delpid()
else:
sys.stdout.write("OSError: %s\n" % err)
sys.exit(1)
return True
def delpid(self):
if os.path.exists(self.pidfile):
sys.stdout.write("Deleting pidfile\n")
os.remove(self.pidfile)
def getpid(self):
"""Get the pid from the pidfile"""
try:
pf = open(self.pidfile,'r')
pid,pgrp = [int(x.strip()) for x in pf.read().split()]
pf.close()
except IOError:
pid = None
pgrp = None
return (pid,pgrp)
def start(self):
"""Start the daemon"""
pid,pgrp = self.getpid()
if pid:
message = "pidfile %s already exist. Daemon already running?"
raise Exception(message % self.pidfile)
# Start the daemon
self._daemonize()
self.runner()
def stop(self):
"""Stop the daemon"""
pid,pgrp = self.getpid()
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return
if not self._sendsignal(pid,signal.SIGINT):
sys.stdout.write('SIGINT failed, try SIGQUIT\n')
if not self._sendsignalgrp(pgrp,signal.SIGQUIT):
sys.stdout.write('SIGQUIT failed, try SIGKILL\n')
self._sendsignalgrp(pgrp,signal.SIGKILL)
def kill(self):
"""Kill the daemon"""
pid,pgrp = self.getpid()
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return
if not self._sendsignal(pid,signal.SIGQUIT):
self._sendsignalgrp(pgrp,signal.SIGTERM)
def hardkill(self):
"""Kill the daemon"""
pid,pgrp = self.getpid()
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return
self._sendsignalgrp(pgrp,signal.SIGKILL)
def restart(self):
"""Restart the daemon"""
self.stop()
self.start()
|
[
"sys.stdout.write",
"atexit.register",
"os.remove",
"sys.stderr.fileno",
"sys.stdout.flush",
"os.chdir",
"sys.stdout.fileno",
"os.path.exists",
"os.umask",
"os.fork",
"sys.stderr.flush",
"time.sleep",
"sys.stdin.fileno",
"builtins.str",
"os.getpgrp",
"sys.exit",
"os.getpid",
"os.killpg",
"os.kill",
"os.setsid",
"sys.stderr.write"
] |
[((2017, 2037), 'os.chdir', 'os.chdir', (['self.chdir'], {}), '(self.chdir)\n', (2025, 2037), False, 'import os\n'), ((2047, 2058), 'os.setsid', 'os.setsid', ([], {}), '()\n', (2056, 2058), False, 'import os\n'), ((2068, 2088), 'os.umask', 'os.umask', (['self.umask'], {}), '(self.umask)\n', (2076, 2088), False, 'import os\n'), ((2453, 2471), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2469, 2471), False, 'import sys\n'), ((2480, 2498), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (2496, 2498), False, 'import sys\n'), ((2800, 2828), 'atexit.register', 'atexit.register', (['self.delpid'], {}), '(self.delpid)\n', (2815, 2828), False, 'import atexit\n'), ((4140, 4168), 'os.path.exists', 'os.path.exists', (['self.pidfile'], {}), '(self.pidfile)\n', (4154, 4168), False, 'import os\n'), ((1725, 1734), 'os.fork', 'os.fork', ([], {}), '()\n', (1732, 1734), False, 'import os\n'), ((2152, 2161), 'os.fork', 'os.fork', ([], {}), '()\n', (2159, 2161), False, 'import os\n'), ((2643, 2661), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (2659, 2661), False, 'import sys\n'), ((2692, 2711), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (2709, 2711), False, 'import sys\n'), ((2742, 2761), 'sys.stderr.fileno', 'sys.stderr.fileno', ([], {}), '()\n', (2759, 2761), False, 'import sys\n'), ((2847, 2858), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2856, 2858), False, 'import os\n'), ((2879, 2891), 'os.getpgrp', 'os.getpgrp', ([], {}), '()\n', (2889, 2891), False, 'import os\n'), ((3089, 3106), 'os.kill', 'os.kill', (['pid', 'sig'], {}), '(pid, sig)\n', (3096, 3106), False, 'import os\n'), ((3667, 3686), 'os.killpg', 'os.killpg', (['pid', 'sig'], {}), '(pid, sig)\n', (3676, 3686), False, 'import os\n'), ((4182, 4220), 'sys.stdout.write', 'sys.stdout.write', (['"""Deleting pidfile\n"""'], {}), "('Deleting pidfile\\n')\n", (4198, 4220), False, 'import sys\n'), ((4233, 4256), 'os.remove', 'os.remove', (['self.pidfile'], {}), '(self.pidfile)\n', (4242, 4256), False, 'import os\n'), ((5064, 5104), 'sys.stderr.write', 'sys.stderr.write', (['(message % self.pidfile)'], {}), '(message % self.pidfile)\n', (5080, 5104), False, 'import sys\n'), ((5188, 5236), 'sys.stdout.write', 'sys.stdout.write', (['"""SIGINT failed, try SIGQUIT\n"""'], {}), "('SIGINT failed, try SIGQUIT\\n')\n", (5204, 5236), False, 'import sys\n'), ((5610, 5650), 'sys.stderr.write', 'sys.stderr.write', (['(message % self.pidfile)'], {}), '(message % self.pidfile)\n', (5626, 5650), False, 'import sys\n'), ((5969, 6009), 'sys.stderr.write', 'sys.stderr.write', (['(message % self.pidfile)'], {}), '(message % self.pidfile)\n', (5985, 6009), False, 'import sys\n'), ((1812, 1823), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1820, 1823), False, 'import sys\n'), ((1867, 1936), 'sys.stderr.write', 'sys.stderr.write', (["('fork #1 failed: %d (%s)\\n' % (e.errno, e.strerror))"], {}), "('fork #1 failed: %d (%s)\\n' % (e.errno, e.strerror))\n", (1883, 1936), False, 'import sys\n'), ((1949, 1960), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1957, 1960), False, 'import sys\n'), ((2245, 2256), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2253, 2256), False, 'import sys\n'), ((2300, 2369), 'sys.stderr.write', 'sys.stderr.write', (["('fork #2 failed: %d (%s)\\n' % (e.errno, e.strerror))"], {}), "('fork #2 failed: %d (%s)\\n' % (e.errno, e.strerror))\n", (2316, 2369), False, 'import sys\n'), ((2382, 2393), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2390, 2393), False, 'import sys\n'), ((3322, 3330), 'builtins.str', 'str', (['err'], {}), '(err)\n', (3325, 3330), False, 'from builtins import str\n'), ((3904, 3912), 'builtins.str', 'str', (['err'], {}), '(err)\n', (3907, 3912), False, 'from builtins import str\n'), ((5314, 5363), 'sys.stdout.write', 'sys.stdout.write', (['"""SIGQUIT failed, try SIGKILL\n"""'], {}), "('SIGQUIT failed, try SIGKILL\\n')\n", (5330, 5363), False, 'import sys\n'), ((3192, 3205), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3202, 3205), False, 'import time\n'), ((3226, 3243), 'os.kill', 'os.kill', (['pid', 'sig'], {}), '(pid, sig)\n', (3233, 3243), False, 'import os\n'), ((3436, 3475), 'sys.stdout.write', 'sys.stdout.write', (["('OSError: %s\\n' % err)"], {}), "('OSError: %s\\n' % err)\n", (3452, 3475), False, 'import sys\n'), ((3492, 3503), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3500, 3503), False, 'import sys\n'), ((3772, 3785), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3782, 3785), False, 'import time\n'), ((3806, 3825), 'os.killpg', 'os.killpg', (['pid', 'sig'], {}), '(pid, sig)\n', (3815, 3825), False, 'import os\n'), ((4018, 4057), 'sys.stdout.write', 'sys.stdout.write', (["('OSError: %s\\n' % err)"], {}), "('OSError: %s\\n' % err)\n", (4034, 4057), False, 'import sys\n'), ((4074, 4085), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4082, 4085), False, 'import sys\n')]
|
import os
from ..xjson import XJson
def test_json_eq_xjson():
json = XJson(os.path.join("examples", "countries", "single_file.json"))
yaml_file = XJson(os.path.join("examples","countries", "single_file.yaml"))
assert json.structure == yaml_file.structure
|
[
"os.path.join"
] |
[((86, 143), 'os.path.join', 'os.path.join', (['"""examples"""', '"""countries"""', '"""single_file.json"""'], {}), "('examples', 'countries', 'single_file.json')\n", (98, 143), False, 'import os\n'), ((169, 226), 'os.path.join', 'os.path.join', (['"""examples"""', '"""countries"""', '"""single_file.yaml"""'], {}), "('examples', 'countries', 'single_file.yaml')\n", (181, 226), False, 'import os\n')]
|
# encoding = utf-8
import os
import re
import sys
import time
import json
import logging
from datetime import datetime, timedelta
'''
IMPORTANT
Edit only the validate_input and collect_events functions.
Do not edit any other part in this file.
This file is generated only once when creating the modular input.
'''
# For advanced users, if you want to create single instance mod input, uncomment this method.
'''
def use_single_instance_mode():
return True
'''
def _fromIso8601ToUnix(iso8601):
'''
@@@ feels like a hack, revisit with fresh eyes
'''
date = datetime.strptime(iso8601, "%Y-%m-%dT%H:%M:%S.%fZ")
unix = time.mktime(date.timetuple())
myMS = float(iso8601[-5:-1])
aTs = unix + myMS
return aTs
def _rateLimitEnforce(helper, headers, rc):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
myTimeStamp = int(time.time())
try:
myReset = int(headers['X-Rate-Limit-Reset'])
myRemaining = float(headers['X-Rate-Limit-Remaining'])
myLimit = float(headers['X-Rate-Limit-Limit'])
mySecLeft = int(myReset - myTimeStamp)
myPctLeft = float(100 * myRemaining / myLimit)
except KeyError:
helper.log_info(log_metric + "_rateLimitEnforce with no ratelimit info in headers, using defaults")
myRemaining = int(100)
mySecLeft = int(60)
myPctLeft = float(50.0)
#if less than 1 second left just be 1
if mySecLeft < 1:
helper.log_debug(log_metric + "_rateLimitEnforce mySecLeft was less than 1, setting to 1 to avoid issues")
mySecLeft = 1
helper.log_debug(log_metric + "_rateLimitEnforce Invoked. There are " + str(mySecLeft) + " seconds left in the window and we have " + str(myPctLeft) + " percent of the limit available. The response code returned was " + str(rc) )
if rc == 429:
#the rate limit is exhausted, sleep
sleepTime = mySecLeft +7
helper.log_warning(log_metric + "_rateLimitEnforce is now pausing operations for " + str(sleepTime) + " as the rate limit has been exhausted" )
time.sleep(sleepTime)
elif 200 <= rc <= 299:
#sleep only if rate limit reaches a rate, adapt based on exhaustion and time left
# how many calls per second do we assume are happening
cps=4
# percentage to start throttling at
try:
throttle = _getSetting(helper,'throttle_threshold')
throttle = float(throttle)
except:
throttle=float(20.0)
# percentage to start throttling at
try:
warningpct = _getSetting(helper,'warning_threshold')
warningpct = float(warningpct)
except:
warningpct=float(50.0)
# Should we try to avoid warnings?
try:
avoidWarnings = _getSetting(helper,'avoid_warnings')
avoidWarnings = bool(avoidWarnings)
except:
avoidWarnings=True
#divide by zero is no good
if myRemaining == 0:
myRemaining = 1
# figure out what our number of remaining calls is taking warning limits into account
if avoidWarnings:
helper.log_info(log_metric + "_rateLimitEnforce is applying a warning threshold adjustment " + str(myRemaining) + " before adjustment" )
myRemaining = (myRemaining * warningpct / 100)
if myRemaining < 1:
myRemaining = 1
helper.log_info(log_metric + "_rateLimitEnforce has applied the threshold adjustment " + str(myRemaining) + " after adjustment" )
try:
myPctLeft = float(100 * myRemaining / myLimit)
except KeyError:
myPctLeft = float(10.0)
# How agressive do we throttle, less time to reset = more agressive sleep
if mySecLeft * cps > myRemaining:
sleepTime = mySecLeft * cps / myRemaining
else:
sleepTime = mySecLeft * cps / myRemaining / 10
#never sleep much longer than reset time, saftey factor of 7 seconds
if sleepTime > (mySecLeft + 7):
sleepTime = mySecLeft + 7
if myPctLeft < throttle:
helper.log_info(log_metric + "_rateLimitEnforce is now pausing operations for " + str(sleepTime) + " to avoid exhausting the rate limit" )
time.sleep(sleepTime)
elif 400 <= rc <= 499:
#some error on the client side, throw in a sleep to keep from hammering us but nothing adaptive
helper.log_warning(log_metric + "_rateLimitEnforce is going to pause for 1 second now as a client side error was returned from the server (400-499)" )
time.sleep(1)
elif rc >= 500:
helper.log_error(log_metric + "_rateLimitEnforce is going to pause for 1 second now as a client server side error was indicated (500+)" )
time.sleep(1)
else:
helper.log_error(log_metric + "_rateLimitEnforce is going to pause for 1 second now as an unknown error was indicated (not an http response code)" )
time.sleep(1)
def _getSetting(helper, setting):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_getSetting Invoked")
myDefaults = {
'max_log_batch': 60000,
'user_limit': 200,
'group_limit': 200,
'app_limit': 500,
'log_limit': 1000,
'log_history': 7,
'throttle_threshold': 25.0,
'warning_threshold': 50.0,
'http_request_timeout': 90,
'fetch_empty_pages': False,
'skip_empty_pages': True,
'allow_proxy': False,
'write_appUser': True,
'write_groupUser': True,
'bypass_verify_ssl_certs': False,
'custom_ca_cert_bundle_path': False,
'avoid_warnings': True
}
# early fail if the setting we've been asked for isn't something we know about
if setting not in myDefaults:
helper.log_error(log_metric + "_getSetting has no way of finding values for: " + str(setting))
return None
else:
helper.log_info(log_metric + "_getSetting is looking for values for: " + str(setting))
try:
myVal = helper.get_global_setting(setting)
helper.log_debug(log_metric + "_getSetting has a defined " + setting + " value of: " + str(myVal))
except:
myVal = myDefaults[setting]
helper.log_debug(log_metric + "_getSetting has a default1 " + setting + " value of: " + str(myVal))
#test for nonetype
if myVal is None:
myVal = myDefaults[setting]
helper.log_debug(log_metric + "_getSetting has a default2 " + setting + " value of: " + str(myVal))
return myVal
def _write_oktaResults(helper, ew, metric, results):
global_account = helper.get_arg('global_account')
okta_org = global_account['username']
log_metric = "metric=" + metric + " | message="
helper.log_debug(log_metric + "_write_oktaResults Invoked")
eventHost = okta_org
eventSourcetype = "OktaIM2:" + metric
eventSource = "Okta:im2"
eventTime = None
for item in results:
#print log
'''
extract the time
log.published
user.lastUpdated
group.lastUpdated
app.lastUpdated
derive the host
okta_org
'''
if 'log' == metric:
eventTime = _fromIso8601ToUnix(item['published'])
elif 'app' == metric:
item.pop('_links','None')
elif metric in ['user', 'group']:
item.pop('_links','None')
eventTime = _fromIso8601ToUnix(item['lastUpdated'])
data = json.dumps(item)
data = re.sub(r'[\s\r\n]+'," ", data)
event = helper.new_event \
(
source=eventSource,
index=helper.get_output_index(),
sourcetype=eventSourcetype,
data=data,
host=eventHost,
time=eventTime
)
ew.write_event(event)
def _okta_caller(helper, resource, params, method, limit):
#this calls the _okta_client with baked URL's
#makes pagination calls
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_okta_caller Invoked")
global_account = helper.get_arg('global_account')
cp_prefix = global_account['name']
okta_org = global_account['username']
myValidPattern = ("https://" + okta_org + "/api/").lower()
#settings
try:
max_log_batch = int(_getSetting(helper,'max_log_batch'))
except:
max_log_batch = int(60000)
try:
skipEmptyPages = bool(_getSetting(helper,'skip_empty_pages'))
except:
skipEmptyPages = bool(True)
#if I get a full URL as resource use it, this will happne if we are picking up from a previous page
if resource.lower().startswith(myValidPattern.lower()):
url = resource
else:
url = "https://" + okta_org + "/api/v1" + resource
url = url.lower()
#make a first call
response = _okta_client(helper, url, params, method)
results = list()
getPages = True
stashNVal = str()
#determine if and what the next pages is and retrieve as required
while(getPages):
n_val = str(response.pop('n_val', False))
i_results = response.pop('results', {})
i_count = int(len(i_results))
results += i_results
r_count = int(len(results))
helper.log_debug(log_metric + "_okta_caller returned: " + str(i_count) + " this pass and: " + str(r_count) + " results so far")
helper.log_debug(log_metric + "_okta_caller Iteration Count: " + str(i_count) + " Limit " + str(limit) )
#special case here for 0 and logs
if 0 == i_count:
helper.log_debug(log_metric + "_okta_caller we have 0 results returned, determining what to store for next run" )
getPages = False
if "log" == opt_metric:
if n_val.startswith(myValidPattern):
'''
429 case, penalty has been paid already but lets bail anyhow and pickup on next iteration
We will also encounter this case if/when the logs API ALWAYS returns a next link
'''
helper.log_info(log_metric + "_okta_caller n_val matches our valid pattern with 0 results, store the return n_val: " + n_val)
stashNVal = n_val
else:
'''
The current functionality of the logs API will not return a next link if the request produced 0 results
in these cases we are going to keep asking for this same page until new logs are produced and we get a new cursor
'''
helper.log_info(log_metric + "_okta_caller n_val does not match our valid pattern with 0 results, store the current URL: " + url )
stashNVal = url
elif i_count < limit:
'''
potential hitch here: If a limit value is raised after initial collection has begun
the number of results in each page will always be lower than our currently defined limit
because limit is a retained parameter in our next link
include something in the docs around this
'''
helper.log_debug(log_metric + "_okta_caller only returned " + str(i_count) + " results in this call, this indicates the next page is empty")
if skipEmptyPages:
helper.log_debug(log_metric + "_okta_caller skip empty pages is set to true")
getPages = False
if "log" == opt_metric:
helper.log_info(log_metric + "_okta_caller is will save the returned logs and store the n_val: " + n_val)
stashNVal = n_val
if ( ("log" == opt_metric) and (r_count >= max_log_batch) ):
'''
To avoid exhausting the Splunk server we are going to end this thread after we hit our max batch size
We will pick up on the next interval where we left off
'''
getPages = False
helper.log_info(log_metric + "_okta_caller exceeded the max batch size for logs, saving returned logs and storing n_val: " + n_val)
stashNVal = n_val
if getPages:
if n_val.startswith(myValidPattern):
helper.log_info(log_metric + "_okta_caller we will be getting the next page: " + n_val)
url = n_val
response = _okta_client(helper, url, {}, method)
else:
helper.log_warning(log_metric + "_okta_caller n_val didn't match my pattern check: " + n_val)
getPages = False
elif "log" == opt_metric:
if stashNVal.startswith(myValidPattern):
helper.log_info(log_metric + "_okta_caller we will now stash n_val with: " + str(stashNVal) )
helper.save_check_point((cp_prefix + "logs_n_val"), stashNVal)
helper.log_debug("n_val stashed")
else:
helper.log_warning(log_metric + "_okta_caller next link value was noneType " + str(stashNVal) )
helper.log_debug("Returning Results from _okta_caller")
return results
def _okta_client(helper, url, params, method):
#Calls Okta
#deals with rate limit enforcement before returning
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_okta_client Invoked with a url of: " + url)
userAgent = "Splunk-AddOn/2.25.19"
global_account = helper.get_arg('global_account')
okta_token = global_account['password']
try:
reqTimeout = float(_getSetting(helper,'http_request_timeout'))
except:
helper.log_debug(log_metric + "_okta_client using coded timeout value")
reqTimeout = float(90)
headers = { 'Authorization': 'SSWS ' + okta_token,
'User-Agent': userAgent,
'Content-Type': 'application/json',
'accept': 'application/json' }
allow_proxy = bool(_getSetting(helper,'allow_proxy'))
bypass_verify_ssl_certs = bool(_getSetting(helper,'bypass_verify_ssl_certs'))
custom_ca_cert_bundle_path = _getSetting(helper,'custom_ca_cert_bundle_path')
if bypass_verify_ssl_certs:
sslVerify = False
else:
sslVerify = True
helper.log_debug(log_metric + "_okta_client Invoked with sslVerify set to: " + str(sslVerify))
#Requests uses the same verify param to use a custom bundle, if a custom bundle is defined verification is implied.
if (custom_ca_cert_bundle_path):
helper.log_debug(log_metric + "_okta_client Invoked with custom_ca_cert_bundle_path set to: " + str(custom_ca_cert_bundle_path))
#if it is set, is the path valid?
if os.path.exists(custom_ca_cert_bundle_path):
#ok, override whatever bool param was set with this.
helper.log_debug(log_metric + "_okta_client custom_ca_cert_bundle_path path is valid, overriding sslVerify")
sslVerify = custom_ca_cert_bundle_path
else:
helper.log_debug(log_metric + "_okta_client custom_ca_cert_bundle_path path is NOT valid, ignoring")
if allow_proxy:
helper.log_info("Use of the proxy has been enabled through explicit definition of allow_proxy")
response = helper.send_http_request \
(
url, method, parameters=params,
payload=None, headers=headers,
cookies=None, verify=sslVerify, cert=None,
timeout=reqTimeout
)
else:
helper.log_info("Use of a proxy has been explicitly disabled")
response = helper.send_http_request \
(
url, method, parameters=params,
payload=None, headers=headers,
cookies=None, verify=sslVerify, cert=None,
timeout=reqTimeout, use_proxy=False
)
# get the response headers
r_headers = response.headers
requestid = r_headers.pop('X-Okta-Request-Id','None')
#try catch except
try:
results = response.json()
except:
sendBack = { 'results': {}, 'n_val': False }
return sendBack
if response.status_code == 429:
helper.log_error(log_metric + "_okta_client returned an error: " + results['errorCode'] + " : " + results['errorSummary'] + " : rid=" + requestid)
_rateLimitEnforce(helper, r_headers, response.status_code)
# If we hit a 429 send back the current url as the n_val, we will pick up from there next time.
sendBack = { 'results': {}, 'n_val': url }
return sendBack
helper.log_debug(log_metric + "_okta_client returned response to our request rid=" + requestid)
#historical_responses = response.history
# get response status code
#r_status = response.status_code
# check the response status, if the status is not sucessful, raise requests.HTTPError
response.raise_for_status()
# get the response body as text
#r_text = response.text
# get response body as json. If the body text is not a json string, raise a ValueError
#r_json = response.json()
_rateLimitEnforce(helper, r_headers, response.status_code)
count = str(len(results))
helper.log_debug(log_metric + "_okta_client Returned: " + count + " records")
if 'next' in response.links:
n_val = response.links['next']['url']
helper.log_info(log_metric + "_okta_client sees another page at this URL: " + n_val )
else:
n_val = False
sendBack = { 'results': results, 'n_val': n_val }
return sendBack
def _collectUsers(helper):
#Distinct entry point for user collection
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
global_account = helper.get_arg('global_account')
cp_prefix = global_account['name']
resource = "/users"
method = "Get"
opt_limit = int(_getSetting(helper,'user_limit'))
dtnow = datetime.now()
end_date = dtnow.isoformat()[:-3] + 'Z'
start_date = helper.get_check_point((cp_prefix + "users_lastUpdated"))
if ( (str(start_date)) == "None" ):
start_date = "1970-01-01T00:00:00.000Z"
helper.log_debug(log_metric + "_collectUsers Invoked, searching for users lastUpdated between " + start_date + " and " + end_date)
myfilter = 'lastUpdated gt "' + start_date + '" and lastUpdated lt "' + end_date + '"'
params = {'filter': myfilter, 'limit': opt_limit}
users = _okta_caller(helper, resource, params, method, opt_limit)
if ( len(users) > 0 ):
lastUpdated = _fromIso8601ToUnix(users[-1]['lastUpdated'])
end_date = users[-1]['lastUpdated']
helper.log_debug(log_metric + "_collectUsers checkpoint lastUpdated first guess is " + end_date)
#loop through users returned and determine the largest lastUpdated date
for user in users:
t_lastUpdated = _fromIso8601ToUnix(user['lastUpdated'])
if t_lastUpdated > lastUpdated:
lastUpdated = t_lastUpdated
end_date = user['lastUpdated']
helper.log_debug(log_metric + "_collectUsers checkpoint lastUpdated middle guess is " + end_date)
#stash the value of our current end_date, will be used as start date on next run
helper.log_debug(log_metric + "_collectUsers checkpoint lastUpdated last guess is " + end_date)
helper.save_check_point((cp_prefix + "users_lastUpdated"), end_date)
return users
def _collectGroups(helper, ew):
#Distinct entry point for group collection
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
global_account = helper.get_arg('global_account')
cp_prefix = global_account['name']
resource = "/groups"
method = "Get"
opt_limit = int(_getSetting(helper,'group_limit'))
start_lastUpdated = helper.get_check_point((cp_prefix + "groups_lastUpdated"))
if ( (str(start_lastUpdated)) == "None" ):
start_lastUpdated = "1970-01-01T00:00:00.000Z"
start_lastMembershipUpdated = helper.get_check_point((cp_prefix + "groups_lastMembershipUpdated"))
if ( (str(start_lastMembershipUpdated)) == "None" ):
start_lastMembershipUpdated = "1970-01-01T00:00:00.000Z"
lastUpdated = '(lastUpdated gt "' + start_lastUpdated + '")'
lastMembershipUpdated = '(lastMembershipUpdated gt "' + start_lastMembershipUpdated + '")'
helper.log_debug(log_metric + "_collectGroups Invoked, searching for groups lastUpdated after " + start_lastUpdated +
" or membershipUpdated after " + start_lastMembershipUpdated)
myfilter = "( " + lastUpdated + " or " + lastMembershipUpdated + " )"
params = {'filter': myfilter, 'limit': opt_limit, 'expand': 'stats,app'}
groups = _okta_caller(helper, resource, params, method, opt_limit)
if ( len(groups) > 0 ):
lastUpdated = _fromIso8601ToUnix(start_lastUpdated)
lastMembershipUpdated = _fromIso8601ToUnix(start_lastMembershipUpdated)
helper.log_debug(log_metric + "_collectGroups checkpoint lastUpdated first guess is " + start_lastUpdated)
helper.log_debug(log_metric + "_collectGroups checkpoint lastMembershipUpdated first guess is " + start_lastMembershipUpdated)
#loop to find the most recent date from result set
for group in groups:
t_lastUpdated = _fromIso8601ToUnix(group['lastUpdated'])
if t_lastUpdated > lastUpdated:
lastUpdated = t_lastUpdated
start_lastUpdated = group['lastUpdated']
helper.log_debug(log_metric + "_collectGroups checkpoint lastUpdated middle guess is " + start_lastUpdated)
t_lastMembershipUpdated = _fromIso8601ToUnix(group['lastMembershipUpdated'])
if t_lastMembershipUpdated > lastMembershipUpdated:
lastMembershipUpdated = t_lastMembershipUpdated
start_lastMembershipUpdated = group['lastMembershipUpdated']
helper.log_debug(log_metric + "_collectGroups checkpoint lastMembershipUpdated middle guess is " + start_lastMembershipUpdated)
#Loop through and enrich groups with members IF they have members or apps assigned
for group in groups:
#pop the _links object, it is pointless in this context
try:
group['_embedded']['stats'].pop('_links','None')
if group['_embedded']['stats']['usersCount'] > 0:
members = _collectGroupUsers(helper, ew, group['id'])
else:
members = []
if group['_embedded']['stats']['appsCount'] > 0:
assignedApps = _collectGroupApps(helper, group['id'])
else:
assignedApps = []
except KeyError:
members = _collectGroupUsers(helper, ew, group['id'])
assignedApps = _collectGroupApps(helper, group['id'])
group['members'] = members
group['assignedApps'] = assignedApps
'''
if group['_embedded']['stats']['groupPushMappingsCount'] > 0:
'''
helper.log_debug(log_metric + "_collectGroups checkpoint lastUpdated last guess is " + start_lastUpdated)
helper.log_debug(log_metric + "_collectGroups checkpoint lastMembershipUpdated last guess is " + start_lastMembershipUpdated)
helper.save_check_point((cp_prefix + "groups_lastUpdated"), start_lastUpdated)
helper.save_check_point((cp_prefix + "groups_lastMembershipUpdated"), start_lastMembershipUpdated)
return groups
def _collectGroupUsers(helper, ew, gid):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectGroupUsers has been invoked: " + gid )
resource = "/groups/" + gid + "/skinny_users"
method = "Get"
write_groupUser = bool(_getSetting(helper,'write_groupUser'))
'''
concerned that this limit won't be honored in pagination links triggering the bug i fear
'''
opt_limit = int(_getSetting(helper,'group_limit'))
params = {'limit': opt_limit}
groupUsers = _okta_caller(helper, resource, params, method, opt_limit)
myArray = []
for groupUser in groupUsers:
if write_groupUser:
myArray.append( {"groupid": gid, "userid": groupUser['id']} )
else:
myArray.append(groupUser['id'])
if write_groupUser:
if ( len(myArray) > 0 ):
helper.log_info(log_metric + "Writing " + (str(len(myArray))) + " groupUsers to splunk")
_write_oktaResults(helper, ew, "groupUser", myArray)
return ['see groupUser sourcetype']
else:
helper.log_info(log_metric + "Zero groupUsers returned")
return []
else:
return myArray
def _collectGroupApps(helper, gid):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectGroupApps has been invoked for: " + gid )
resource = "/groups/" + gid + "/apps"
method = "Get"
'''
concerned that this limit won't be honored in pagination links triggering the bug i fear
'''
opt_limit = int(_getSetting(helper,'group_limit'))
params = {'limit': opt_limit}
groupApps = _okta_caller(helper, resource, params, method, opt_limit)
assignedApps = []
for groupApp in groupApps:
assignedApps.append(groupApp['id'])
return assignedApps
def _collectApps(helper, ew):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectApps has been invoked")
resource = "/apps"
method = "Get"
opt_limit = int(_getSetting(helper,'app_limit'))
params = {'limit': opt_limit, 'filter': 'status eq "ACTIVE"'}
apps = _okta_caller(helper, resource, params, method, opt_limit)
for app in apps:
#assigned_users
assignedUsers = _collectAppUsers(helper, ew, app['id'])
app['assigned_users'] = assignedUsers
#assigned_groups
assignedGroups = _collectAppGroups(helper, app['id'])
app['assigned_groups'] = assignedGroups
return apps
def _collectAppUsers(helper, ew, aid):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectAppUsers has been invoked: " + aid )
resource = "/apps/" + aid + "/skinny_users"
method = "Get"
write_appUser = bool(_getSetting(helper,'write_appUser'))
'''
fear this limit won't be honored in pagination links triggering an early exit in _okta_caller
'''
opt_limit = int(_getSetting(helper,'app_limit'))
params = {'limit': opt_limit}
appUsers = _okta_caller(helper, resource, params, method, opt_limit)
myArray = []
for appUser in appUsers:
if write_appUser:
try:
myUsername = appUser['credentials']['userName']
except TypeError:
myUsername = "Un<PASSWORD>"
myArray.append(
{ "appid": aid,
"userid": appUser['id'],
"externalId": appUser['externalId'],
"userName": myUsername,
"created": appUser['created'],
"lastUpdated": appUser['lastUpdated'],
"statusChanged": appUser['statusChanged'],
"scope": appUser['scope'],
"status": appUser['status']
})
else:
myArray.append(appUser['id'])
if write_appUser:
if ( len(myArray) > 0 ):
helper.log_info(log_metric + "Writing " + (str(len(myArray))) + " appUsers to splunk")
_write_oktaResults(helper, ew, "appUser", myArray)
return ['see appUser sourcetype']
else:
helper.log_info(log_metric + "Zero appUsers returned")
return []
else:
return myArray
def _collectAppGroups(helper, aid):
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectAppGroups has been invoked: " + aid )
resource = "/apps/" + aid + "/groups"
method = "Get"
'''
fear this limit won't be honored in pagination links triggering an early exit in _okta_caller
'''
opt_limit = int(_getSetting(helper,'app_limit'))
params = {'limit': opt_limit}
appGroups = _okta_caller(helper, resource, params, method, opt_limit)
assigned_groups = []
for appGroup in appGroups:
assigned_groups.append(appGroup['id'])
return assigned_groups
def _collectLogs(helper):
#Distinct entry point for log Collection
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "_collectLogs Invoked")
global_account = helper.get_arg('global_account')
cp_prefix = global_account['name']
resource = "/logs"
method = "Get"
dtnow = datetime.now()
opt_limit = int(_getSetting(helper,'log_limit'))
since = helper.get_check_point((cp_prefix + "logs_since"))
n_val = helper.get_check_point((cp_prefix + "logs_n_val"))
if n_val:
'''
We are picking up a stashed next link, this is the normal operating mode
define a blank param obj since the next link contains everythign we need
'''
helper.log_info(log_metric + "_collectLogs sees an existing next link value of: " + n_val + ", picking up from there" )
resource = n_val
params = {}
helper.log_debug("deleting checkpoint")
helper.delete_check_point((cp_prefix + "logs_n_val"))
helper.log_debug("checkpoint deleted")
elif since:
'''
Not a cold start, use the checkpoint values for retrieval, this is a failsafe method
This case should be uncommon and would usually be the indication of an error
'''
helper.log_info(log_metric + "_collectLogs sees an existing since value of: " + since + ", picking up from there" )
params = {'sortOrder': 'ASCENDING', 'limit': opt_limit, 'since': since}
else:
'''
this is a cold start, use our config values input for since
'''
opt_history = int(_getSetting(helper,'log_history'))
helper.log_debug(log_metric + "_collectLogs sees a coldstart for logs, collecting " + (str(opt_history)) + " days of history" )
dtsince = dtnow - timedelta( days = int(opt_history))
since = dtsince.isoformat()[:-3] + 'Z'
params = {'sortOrder': 'ASCENDING', 'limit': opt_limit, 'since': since}
helper.log_debug("Calling _okta_caller")
logs = _okta_caller(helper, resource, params, method, opt_limit)
helper.log_debug("_okta_caller returned")
'''
Stash the last UUID returned
Stash the since value as a failsafe
Remove potential dupes that may come from failsafe polling method
'''
if ( (len(logs)) > 0 ):
lastUuid = helper.get_check_point((cp_prefix + "logs_lastUuid"))
if (logs[0]['uuid'] == lastUuid):
helper.log_debug(log_metric + "_collectLogs removing duplicate log uuid=" + lastUuid)
pop = logs.pop(0)
helper.log_info(log_metric + "_collectLogs removed duplicate entry: " + pop['uuid'])
helper.log_debug(log_metric + "_collectLogs checkpoint logs_since: " + logs[-1]['published'] + " and logs_lastUuid: " + logs[-1]['uuid'])
helper.save_check_point((cp_prefix + "logs_since"), logs[-1]['published'])
helper.save_check_point((cp_prefix + "logs_lastUuid"), logs[-1]['uuid'])
return logs
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# Best i can tell this never gets invoked
helper.log_debug("validate_input has been invoked" )
pass
def collect_events(helper, ew):
""" Do this thing """
opt_metric = helper.get_arg('metric')
log_metric = "metric=" + opt_metric + " | message="
helper.log_debug(log_metric + "Fetching Metric")
global_account = helper.get_arg('global_account')
cp_prefix = global_account['name']
loglevel = helper.get_log_level()
helper.set_log_level(loglevel)
limits = { 'log': {'minTime': 29, 'minSize':10, 'defSize':1000, 'maxSize': 1000, 'maxHistory': 180 },
'user': {'minTime': 899, 'minSize':20, 'defSize':200, 'maxSize': 1000 },
'group': {'minTime': 899, 'minSize':20, 'defSize':500, 'maxSize': 1000 },
'app': {'minTime': 86390, 'minSize':20, 'defSize':500, 'maxSize': 1000 },
'zset': {'minTime': 86400, 'minSize':42, 'defSize':42, 'maxSize': 42 }, }
#Enforce minTimes at runtime
lastTs = helper.get_check_point((cp_prefix + ":" + opt_metric + ":lastRun"))
if lastTs is None:
lastTs = 0
lastTs = int(lastTs)
ts = int(time.time())
diff = (ts - lastTs)
#Confirm we aren't too frequent
if (diff < limits[opt_metric]['minTime']):
helper.log_error(log_metric + "collect_events Invoked, it has been only been " + str(diff) + " seconds since we last ran, skipping")
return
#Confirm are values are within an acceptable range
try:
#limVar becomes log_limit, user_limit etc
limVar = opt_metric + '_limit'
opt_limit = _getSetting(helper, limVar )
opt_limit = int(opt_limit)
except:
opt_limit = limits[opt_metric]['defSize']
if (limits[opt_metric]['minSize'] <= opt_limit <= limits[opt_metric]['maxSize']):
helper.log_debug(log_metric + "collect_events Invoked, the input limit size of " + str(opt_limit) + " is INSIDE the allowable range, continuing")
else:
helper.log_error(log_metric + "collect_events Invoked, the input limit size of " + str(opt_limit) + " is OUTSIDE the allowable range of " + str(limits[opt_metric]['minSize']) + " and " + str(limits[opt_metric]['maxSize']) + ", skipping")
return
#passed validation, set the lastrun checkpoint
helper.save_check_point((cp_prefix + ":" + opt_metric + ":lastRun"), diff)
if opt_metric == "zset":
helper.log_debug(log_metric + "Invoking a call to reset all of our checkpoints")
# can i run a query to find my checkpoints dynamically?
# reset = helper.delete_check_point((cp_prefix + "logs_lastUuid"))
# reset = helper.delete_check_point((cp_prefix + "logs_n_val"))
# reset = helper.delete_check_point((cp_prefix + "logs_since"))
# reset = helper.delete_check_point((cp_prefix + "users_lastUpdated"))
# reset = helper.delete_check_point((cp_prefix + "groups_lastUpdated"))
# reset = helper.delete_check_point((cp_prefix + ":log:lastRun"))
# reset = helper.delete_check_point((cp_prefix + ":app:lastRun"))
# reset = helper.delete_check_point((cp_prefix + ":group:lastRun"))
# reset = helper.delete_check_point((cp_prefix + ":user:lastRun"))
elif opt_metric == "log":
helper.log_debug(log_metric + "Invoking a call for logs")
logs = _collectLogs(helper)
if ( len(logs) > 0 ):
helper.log_info(log_metric + "Writing " + (str(len(logs))) + " logs to splunk")
_write_oktaResults(helper, ew, opt_metric, logs)
else:
helper.log_info(log_metric + "Zero logs returned")
elif opt_metric == "user":
helper.log_debug(log_metric + "Invoking a call for users")
users = _collectUsers(helper)
if ( len(users) > 0 ):
helper.log_debug(log_metric + "Writing " + (str(len(users))) + " users to splunk")
_write_oktaResults(helper, ew, opt_metric, users)
else:
helper.log_debug(log_metric + "Zero users returned")
elif opt_metric == "group":
helper.log_debug(log_metric + "Invoking a call for groups")
groups = _collectGroups(helper, ew)
if ( len(groups) > 0 ):
helper.log_info(log_metric + "Writing " + (str(len(groups))) + " groups to splunk")
_write_oktaResults(helper, ew, opt_metric, groups)
else:
helper.log_info(log_metric + "Zero groups returned")
elif opt_metric == "app":
helper.log_debug(log_metric + "Invoking a call for apps")
apps = _collectApps(helper, ew)
if ( len(apps) > 0 ):
helper.log_info(log_metric + "Writing " + (str(len(apps))) + " apps to splunk")
_write_oktaResults(helper, ew, opt_metric , apps)
else:
helper.log_info(log_metric + "Zero apps returned")
else:
#this is bad
helper.log_error(log_metric + "Something happened that should never have happend")
|
[
"os.path.exists",
"json.dumps",
"time.sleep",
"time.time",
"datetime.datetime.strptime",
"datetime.datetime.now",
"re.sub"
] |
[((595, 646), 'datetime.datetime.strptime', 'datetime.strptime', (['iso8601', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(iso8601, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (612, 646), False, 'from datetime import datetime, timedelta\n'), ((18378, 18392), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18390, 18392), False, 'from datetime import datetime, timedelta\n'), ((29853, 29867), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29865, 29867), False, 'from datetime import datetime, timedelta\n'), ((924, 935), 'time.time', 'time.time', ([], {}), '()\n', (933, 935), False, 'import time\n'), ((2132, 2153), 'time.sleep', 'time.sleep', (['sleepTime'], {}), '(sleepTime)\n', (2142, 2153), False, 'import time\n'), ((7754, 7770), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (7764, 7770), False, 'import json\n'), ((7786, 7819), 're.sub', 're.sub', (['"""[\\\\s\\\\r\\\\n]+"""', '""" """', 'data'], {}), "('[\\\\s\\\\r\\\\n]+', ' ', data)\n", (7792, 7819), False, 'import re\n'), ((15139, 15181), 'os.path.exists', 'os.path.exists', (['custom_ca_cert_bundle_path'], {}), '(custom_ca_cert_bundle_path)\n', (15153, 15181), False, 'import os\n'), ((33883, 33894), 'time.time', 'time.time', ([], {}), '()\n', (33892, 33894), False, 'import time\n'), ((4376, 4397), 'time.sleep', 'time.sleep', (['sleepTime'], {}), '(sleepTime)\n', (4386, 4397), False, 'import time\n'), ((4709, 4722), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4719, 4722), False, 'import time\n'), ((4897, 4910), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4907, 4910), False, 'import time\n'), ((5086, 5099), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5096, 5099), False, 'import time\n')]
|
# -*- coding: future_fstrings -*-
import threading
from flask_mail import Message
from flask import render_template, current_app
from app.extensions import mail
def send_reset_password(user, pin, callback_url):
send(f'{pin} is your Typer account recovery code',
sender = current_app.config['MAIL_USERNAME'],
recipients = [user.email],
html = render_template(
'email.html',
user=user, pin=pin, callback_url=callback_url)
)
def send_email(app, msg):
with app.app_context():
mail.send(msg)
def send(subject, sender, recipients, html):
message = Message(subject, sender=sender, recipients=recipients)
message.html = html
threading.Thread(
target=send_email,
args=(current_app._get_current_object(), message,)
).start()
|
[
"flask.current_app._get_current_object",
"app.extensions.mail.send",
"flask.render_template",
"flask_mail.Message"
] |
[((611, 665), 'flask_mail.Message', 'Message', (['subject'], {'sender': 'sender', 'recipients': 'recipients'}), '(subject, sender=sender, recipients=recipients)\n', (618, 665), False, 'from flask_mail import Message\n'), ((537, 551), 'app.extensions.mail.send', 'mail.send', (['msg'], {}), '(msg)\n', (546, 551), False, 'from app.extensions import mail\n'), ((370, 446), 'flask.render_template', 'render_template', (['"""email.html"""'], {'user': 'user', 'pin': 'pin', 'callback_url': 'callback_url'}), "('email.html', user=user, pin=pin, callback_url=callback_url)\n", (385, 446), False, 'from flask import render_template, current_app\n'), ((741, 774), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (772, 774), False, 'from flask import render_template, current_app\n')]
|