content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
from abaqusConstants import *
from .AnalysisStep import AnalysisStep
from ..Adaptivity.AdaptiveMeshConstraintState import AdaptiveMeshConstraintState
from ..Adaptivity.AdaptiveMeshDomain import AdaptiveMeshDomain
from ..BoundaryCondition.BoundaryConditionState import BoundaryConditionState
from ..Load.LoadCase import LoadCase
from ..Load.LoadState import LoadState
from ..PredefinedField.PredefinedFieldState import PredefinedFieldState
from ..StepMiscellaneous.Control import Control
from ..StepMiscellaneous.SolverControl import SolverControl
from ..StepOutput.DiagnosticPrint import DiagnosticPrint
from ..StepOutput.FieldOutputRequestState import FieldOutputRequestState
from ..StepOutput.HistoryOutputRequestState import HistoryOutputRequestState
from ..StepOutput.Monitor import Monitor
from ..StepOutput.Restart import Restart
class AnnealStep(AnalysisStep):
"""The AnnealStep object anneals a structure by setting the velocities and all appropriate
state variables to zero.
The AnnealStep object is derived from the AnalysisStep object.
Attributes
----------
name: str
A String specifying the repository key.
refTemp: float
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
previous: str
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description: str
A String specifying a description of the new step. The default value is an empty string.
explicit: SymbolicConstant
A SymbolicConstant specifying whether the step has an explicit procedure type
(**procedureType=ANNEAL**, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
perturbation: Boolean
A Boolean specifying whether the step has a perturbation procedure type.
nonmechanical: Boolean
A Boolean specifying whether the step has a mechanical procedure type.
procedureType: SymbolicConstant
A SymbolicConstant specifying the Abaqus procedure. Possible values are:
- ANNEAL
- BUCKLE
- COMPLEX_FREQUENCY
- COUPLED_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRIC
- DIRECT_CYCLIC
- DYNAMIC_IMPLICIT
- DYNAMIC_EXPLICIT
- DYNAMIC_SUBSPACE
- DYNAMIC_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
- FREQUENCY
- GEOSTATIC
- HEAT_TRANSFER
- MASS_DIFFUSION
- MODAL_DYNAMICS
- RANDOM_RESPONSE
- RESPONSE_SPECTRUM
- SOILS
- STATIC_GENERAL
- STATIC_LINEAR_PERTURBATION
- STATIC_RIKS
- STEADY_STATE_DIRECT
- STEADY_STATE_MODAL
- STEADY_STATE_SUBSPACE
- VISCO
suppressed: Boolean
A Boolean specifying whether the step is suppressed or not. The default value is OFF.
fieldOutputRequestState: dict[str, FieldOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.FieldOutputRequestState.FieldOutputRequestState` objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.HistoryOutputRequestState.HistoryOutputRequestState` objects.
diagnosticPrint: DiagnosticPrint
A :py:class:`~abaqus.StepOutput.DiagnosticPrint.DiagnosticPrint` object.
monitor: Monitor
A :py:class:`~abaqus.StepOutput.Monitor.Monitor` object.
restart: Restart
A :py:class:`~abaqus.StepOutput.Restart.Restart` object.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshConstraintState.AdaptiveMeshConstraintState` objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshDomain.AdaptiveMeshDomain` objects.
control: Control
A :py:class:`~abaqus.StepMiscellaneous.Control.Control` object.
solverControl: SolverControl
A :py:class:`~abaqus.StepMiscellaneous.SolverControl.SolverControl` object.
boundaryConditionStates: dict[str, BoundaryConditionState]
A repository of :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` objects.
interactionStates: int
A repository of :py:class:`~abaqus.Interaction.InteractionState.InteractionState` objects.
loadStates: dict[str, LoadState]
A repository of :py:class:`~abaqus.Load.LoadState.LoadState` objects.
loadCases: dict[str, LoadCase]
A repository of :py:class:`~abaqus.Load.LoadCase.LoadCase` objects.
predefinedFieldStates: dict[str, PredefinedFieldState]
A repository of :py:class:`~abaqus.PredefinedField.PredefinedFieldState.PredefinedFieldState` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].steps[name]
The corresponding analysis keywords are:
- ANNEAL
- STEP
"""
# A String specifying the repository key.
name: str = ''
# A Float specifying the post-anneal reference temperature. The default value is the
# current temperature at all nodes in the model after the annealing has completed.
refTemp: float = None
# A String specifying the name of the previous step. The new step appears after this step
# in the list of analysis steps.
previous: str = ''
# A String specifying a description of the new step. The default value is an empty string.
description: str = ''
# A SymbolicConstant specifying whether the step has an explicit procedure type
# (*procedureType*=ANNEAL, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
explicit: SymbolicConstant = None
# A Boolean specifying whether the step has a perturbation procedure type.
perturbation: Boolean = OFF
# A Boolean specifying whether the step has a mechanical procedure type.
nonmechanical: Boolean = OFF
# A SymbolicConstant specifying the Abaqus procedure. Possible values are:
# - ANNEAL
# - BUCKLE
# - COMPLEX_FREQUENCY
# - COUPLED_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRIC
# - DIRECT_CYCLIC
# - DYNAMIC_IMPLICIT
# - DYNAMIC_EXPLICIT
# - DYNAMIC_SUBSPACE
# - DYNAMIC_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
# - FREQUENCY
# - GEOSTATIC
# - HEAT_TRANSFER
# - MASS_DIFFUSION
# - MODAL_DYNAMICS
# - RANDOM_RESPONSE
# - RESPONSE_SPECTRUM
# - SOILS
# - STATIC_GENERAL
# - STATIC_LINEAR_PERTURBATION
# - STATIC_RIKS
# - STEADY_STATE_DIRECT
# - STEADY_STATE_MODAL
# - STEADY_STATE_SUBSPACE
# - VISCO
procedureType: SymbolicConstant = None
# A Boolean specifying whether the step is suppressed or not. The default value is OFF.
suppressed: Boolean = OFF
# A repository of FieldOutputRequestState objects.
fieldOutputRequestState: dict[str, FieldOutputRequestState] = dict[str, FieldOutputRequestState]()
# A repository of HistoryOutputRequestState objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState] = dict[str, HistoryOutputRequestState]()
# A DiagnosticPrint object.
diagnosticPrint: DiagnosticPrint = DiagnosticPrint()
# A Monitor object.
monitor: Monitor = None
# A Restart object.
restart: Restart = Restart()
# A repository of AdaptiveMeshConstraintState objects.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState] = dict[
str, AdaptiveMeshConstraintState]()
# A repository of AdaptiveMeshDomain objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain] = dict[str, AdaptiveMeshDomain]()
# A Control object.
control: Control = Control()
# A SolverControl object.
solverControl: SolverControl = SolverControl()
# A repository of BoundaryConditionState objects.
boundaryConditionStates: dict[str, BoundaryConditionState] = dict[str, BoundaryConditionState]()
# A repository of InteractionState objects.
interactionStates: int = None
# A repository of LoadState objects.
loadStates: dict[str, LoadState] = dict[str, LoadState]()
# A repository of LoadCase objects.
loadCases: dict[str, LoadCase] = dict[str, LoadCase]()
# A repository of PredefinedFieldState objects.
predefinedFieldStates: dict[str, PredefinedFieldState] = dict[str, PredefinedFieldState]()
def __init__(self, name: str, previous: str, description: str = '', refTemp: float = None,
maintainAttributes: Boolean = False):
"""This method creates an AnnealStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AnnealStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
An AnnealStep object.
Raises
------
RangeError
"""
super().__init__()
pass
def setValues(self, description: str = '', refTemp: float = None):
"""This method modifies the AnnealStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
Raises
------
RangeError
"""
pass
| src/abaqus/Step/AnnealStep.py | 10,587 | The AnnealStep object anneals a structure by setting the velocities and all appropriate
state variables to zero.
The AnnealStep object is derived from the AnalysisStep object.
Attributes
----------
name: str
A String specifying the repository key.
refTemp: float
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
previous: str
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description: str
A String specifying a description of the new step. The default value is an empty string.
explicit: SymbolicConstant
A SymbolicConstant specifying whether the step has an explicit procedure type
(**procedureType=ANNEAL**, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
perturbation: Boolean
A Boolean specifying whether the step has a perturbation procedure type.
nonmechanical: Boolean
A Boolean specifying whether the step has a mechanical procedure type.
procedureType: SymbolicConstant
A SymbolicConstant specifying the Abaqus procedure. Possible values are:
- ANNEAL
- BUCKLE
- COMPLEX_FREQUENCY
- COUPLED_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRIC
- DIRECT_CYCLIC
- DYNAMIC_IMPLICIT
- DYNAMIC_EXPLICIT
- DYNAMIC_SUBSPACE
- DYNAMIC_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
- FREQUENCY
- GEOSTATIC
- HEAT_TRANSFER
- MASS_DIFFUSION
- MODAL_DYNAMICS
- RANDOM_RESPONSE
- RESPONSE_SPECTRUM
- SOILS
- STATIC_GENERAL
- STATIC_LINEAR_PERTURBATION
- STATIC_RIKS
- STEADY_STATE_DIRECT
- STEADY_STATE_MODAL
- STEADY_STATE_SUBSPACE
- VISCO
suppressed: Boolean
A Boolean specifying whether the step is suppressed or not. The default value is OFF.
fieldOutputRequestState: dict[str, FieldOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.FieldOutputRequestState.FieldOutputRequestState` objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.HistoryOutputRequestState.HistoryOutputRequestState` objects.
diagnosticPrint: DiagnosticPrint
A :py:class:`~abaqus.StepOutput.DiagnosticPrint.DiagnosticPrint` object.
monitor: Monitor
A :py:class:`~abaqus.StepOutput.Monitor.Monitor` object.
restart: Restart
A :py:class:`~abaqus.StepOutput.Restart.Restart` object.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshConstraintState.AdaptiveMeshConstraintState` objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshDomain.AdaptiveMeshDomain` objects.
control: Control
A :py:class:`~abaqus.StepMiscellaneous.Control.Control` object.
solverControl: SolverControl
A :py:class:`~abaqus.StepMiscellaneous.SolverControl.SolverControl` object.
boundaryConditionStates: dict[str, BoundaryConditionState]
A repository of :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` objects.
interactionStates: int
A repository of :py:class:`~abaqus.Interaction.InteractionState.InteractionState` objects.
loadStates: dict[str, LoadState]
A repository of :py:class:`~abaqus.Load.LoadState.LoadState` objects.
loadCases: dict[str, LoadCase]
A repository of :py:class:`~abaqus.Load.LoadCase.LoadCase` objects.
predefinedFieldStates: dict[str, PredefinedFieldState]
A repository of :py:class:`~abaqus.PredefinedField.PredefinedFieldState.PredefinedFieldState` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].steps[name]
The corresponding analysis keywords are:
- ANNEAL
- STEP
This method creates an AnnealStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AnnealStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
An AnnealStep object.
Raises
------
RangeError
This method modifies the AnnealStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
Raises
------
RangeError
A String specifying the repository key. A Float specifying the post-anneal reference temperature. The default value is the current temperature at all nodes in the model after the annealing has completed. A String specifying the name of the previous step. The new step appears after this step in the list of analysis steps. A String specifying a description of the new step. The default value is an empty string. A SymbolicConstant specifying whether the step has an explicit procedure type (*procedureType*=ANNEAL, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT). A Boolean specifying whether the step has a perturbation procedure type. A Boolean specifying whether the step has a mechanical procedure type. A SymbolicConstant specifying the Abaqus procedure. Possible values are: - ANNEAL - BUCKLE - COMPLEX_FREQUENCY - COUPLED_TEMP_DISPLACEMENT - COUPLED_THERMAL_ELECTRIC - DIRECT_CYCLIC - DYNAMIC_IMPLICIT - DYNAMIC_EXPLICIT - DYNAMIC_SUBSPACE - DYNAMIC_TEMP_DISPLACEMENT - COUPLED_THERMAL_ELECTRICAL_STRUCTURAL - FREQUENCY - GEOSTATIC - HEAT_TRANSFER - MASS_DIFFUSION - MODAL_DYNAMICS - RANDOM_RESPONSE - RESPONSE_SPECTRUM - SOILS - STATIC_GENERAL - STATIC_LINEAR_PERTURBATION - STATIC_RIKS - STEADY_STATE_DIRECT - STEADY_STATE_MODAL - STEADY_STATE_SUBSPACE - VISCO A Boolean specifying whether the step is suppressed or not. The default value is OFF. A repository of FieldOutputRequestState objects. A repository of HistoryOutputRequestState objects. A DiagnosticPrint object. A Monitor object. A Restart object. A repository of AdaptiveMeshConstraintState objects. A repository of AdaptiveMeshDomain objects. A Control object. A SolverControl object. A repository of BoundaryConditionState objects. A repository of InteractionState objects. A repository of LoadState objects. A repository of LoadCase objects. A repository of PredefinedFieldState objects. | 7,139 | en | 0.561797 |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(cell_line, cross_cell_line, label_rate, k_mer):
"""
Load input data from data/cell_line directory.
| x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |
| ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |
| vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |
| tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |
| features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |
| nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |
| labels | the one-hot labels of all instances as numpy.ndarray object |
| graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |
All objects above must be saved using python pickle module.
:param cell_line: Name of the cell line to which the datasets belong
:return: All data input files loaded (as well the training/test data).
"""
if (cross_cell_line != None) and (cross_cell_line != cell_line):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
# STEP 1: Load all feature vectors, class labels and graph
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), "rb")
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), "rb")
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), "rb")
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
# STEP 2: Load IDs of labeled_train/unlabeled_train/validation/test nodes
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), "rb")
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), "rb")
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), "rb")
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), "rb")
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
# STEP 3: Take subsets from loaded features and class labels using loaded IDs
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print("x={} ux={} vx={} tx={}".format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
# STEP 4: Mask labels
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| utils.py | 6,506 | Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
Construct feed dictionary.
Load input data from data/cell_line directory.
| x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |
| ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |
| vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |
| tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |
| features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |
| nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |
| labels | the one-hot labels of all instances as numpy.ndarray object |
| graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |
All objects above must be saved using python pickle module.
:param cell_line: Name of the cell line to which the datasets belong
:return: All data input files loaded (as well the training/test data).
Symmetrically normalize adjacency matrix.
Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.
Row-normalize feature matrix and convert to tuple representation
Create mask.
Convert sparse matrix to tuple representation.
STEP 1: Load all feature vectors, class labels and graph STEP 2: Load IDs of labeled_train/unlabeled_train/validation/test nodes STEP 3: Take subsets from loaded features and class labels using loaded IDs STEP 4: Mask labels | 1,660 | en | 0.764393 |
class FittingAngleUsage(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type representing the options for how to limit the angle values applicable to fitting content.
enum FittingAngleUsage,values: UseAnAngleIncrement (1),UseAnyAngle (0),UseSpecificAngles (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
UseAnAngleIncrement = None
UseAnyAngle = None
UseSpecificAngles = None
value__ = None
| release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | 1,260 | An enumerated type representing the options for how to limit the angle values applicable to fitting content.
enum FittingAngleUsage,values: UseAnAngleIncrement (1),UseAnyAngle (0),UseSpecificAngles (2)
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
__format__(formattable: IFormattable,format: str) -> str
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | 532 | en | 0.391853 |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlFileListing(PerlPackage):
"""Parse directory listing"""
homepage = "http://search.cpan.org/~gaas/File-Listing-6.04/lib/File/Listing.pm"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/File-Listing-6.04.tar.gz"
version('6.04', '83f636b477741f3a014585bb9cc079a6')
depends_on('perl-http-date', type=('build', 'run'))
| var/spack/repos/builtin/packages/perl-file-listing/package.py | 580 | Parse directory listing
Copyright 2013-2018 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 214 | en | 0.643363 |
"""
This plugin is for recording test results in the Testcase Database.
"""
import getpass
import time
import uuid
from nose.plugins import Plugin
from nose.exc import SkipTest
from seleniumbase.core.application_manager import ApplicationManager
from seleniumbase.core.testcase_manager import ExecutionQueryPayload
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import errors
class DBReporting(Plugin):
"""
This plugin records test results in the Testcase Database.
"""
name = "db_reporting" # Usage: --with-db_reporting
def __init__(self):
Plugin.__init__(self)
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.testcase_manager = None
self._result_set = False
self._test = None
def options(self, parser, env):
super(DBReporting, self).options(parser, env=env)
parser.add_option(
"--database_env",
"--database-env",
action="store",
dest="database_env",
choices=(
constants.Environment.QA,
constants.Environment.STAGING,
constants.Environment.DEVELOP,
constants.Environment.PRODUCTION,
constants.Environment.MASTER,
constants.Environment.REMOTE,
constants.Environment.LOCAL,
constants.Environment.ALPHA,
constants.Environment.BETA,
constants.Environment.MAIN,
constants.Environment.TEST,
),
default=constants.Environment.TEST,
help="The database environment to run the tests in.",
)
def configure(self, options, conf):
super(DBReporting, self).configure(options, conf)
self.options = options
self.testcase_manager = TestcaseManager(self.options.database_env)
def begin(self):
"""At the start of the run, we want to record the test
execution information in the database."""
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
def startTest(self, test):
"""At the start of the test, set the testcase details."""
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, "browser"):
data_payload.browser = test.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
# Make the testcase guid available to other plugins
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None
def finalize(self, result):
"""At the end of the test run, we want to
update the DB row with the total execution time."""
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
def afterTest(self, test):
if not self._result_set:
err = None
try:
err = self._test._nose_skip_reason
if err:
err = "Skipped: " + str(err)
err = (err, err)
except Exception:
pass
if not err:
err = "Skipped: (no reason given)"
err = (err, err)
self.__insert_test_result(constants.State.SKIPPED, self._test, err)
def addSuccess(self, test, capt):
"""
After each test success, record testcase run information.
"""
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True
def addFailure(self, test, err, capt=None, tbinfo=None):
"""
After each test failure, record testcase run information.
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def addError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
(Test errors should be treated the same as test failures.)
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def handleError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first.
"""
if err[0] == errors.BlockedTest:
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
def __insert_test_result(self, state, test, err=None):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err is not None:
data_payload.message = (
err[1]
.__str__()
.split(
"""-------------------- >> """
"""begin captured logging"""
""" << --------------------""",
1,
)[0]
)
self.testcase_manager.update_testcase_data(data_payload)
| seleniumbase/plugins/db_reporting_plugin.py | 6,841 | This plugin records test results in the Testcase Database.
After each test error, record testcase run information.
(Test errors should be treated the same as test failures.)
After each test failure, record testcase run information.
After each test success, record testcase run information.
At the start of the run, we want to record the test
execution information in the database.
At the end of the test run, we want to
update the DB row with the total execution time.
After each test error, record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first.
At the start of the test, set the testcase details.
This plugin is for recording test results in the Testcase Database.
Usage: --with-db_reporting Make the testcase guid available to other plugins | 813 | en | 0.83674 |
# -*- encoding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class DjangoPagesDashboard(Dashboard):
"""
Custom index dashboard for Django-pages
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(
modules.ModelList(
_('General'),
column=1,
collapsible=True,
models=(
'django_pages.site.models.Site',
'django_pages.site.models.Script',
'django_pages.language.models.*',
'django_pages.looks.models.*',
'django_pages.feed.models.*'
),
)
)
self.children.append(
modules.ModelList(
_('Pages'),
column=1,
collapsible=True,
models=('django_pages.pages.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Menu'),
column=2,
collapsible=True,
models=('django_pages.menu.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Comments'),
column=2,
collapsible=True,
models=('django_pages.comments.models.*', )
)
)
self.children.append(
modules.ModelList(
_('SEO'),
column=2,
collapsible=True,
models=('django_pages.metadata.models.*', )
)
)
self.children.append(
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*', )
)
)
self.children.append(modules.LinkList(
_('File Management'),
column=3,
children=[
{
'title': _('File Browser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| django_pages/dashboard.py | 2,529 | Custom index dashboard for Django-pages
-*- encoding: utf-8 -*- | 65 | en | 0.547252 |
import h5py
import numpy as np
import os
from plyfile import PlyData, PlyElement
HDF5_DATA = 'hdf5_data'
print('Generating .h5 files...', '\n')
if not os.path.exists(HDF5_DATA):
os.mkdir(HDF5_DATA)
filenames_training = [line.rstrip() for line in open("filelist_training.txt", 'r')]
filenames_testing = [line.rstrip() for line in open("filelist_testing.txt", 'r')]
print((len(filenames_training)))
print((len(filenames_testing)))
f_training = h5py.File("./hdf5_data/data_training.h5", 'w')
f_testing = h5py.File("./hdf5_data/data_testing.h5", 'w')
a_data_training = np.zeros((len(filenames_training), 2048, 3))
a_pid_training = np.zeros((len(filenames_training), 2048), dtype = np.uint8)
labeldata_training = []
a_label_training = np.zeros((len(filenames_training), 1), dtype = np.uint8)
a_data_testing = np.zeros((len(filenames_testing), 2048, 3))
a_pid_testing = np.zeros((len(filenames_testing), 2048), dtype = np.uint8)
labeldata_testing = []
a_label_testing = np.zeros((len(filenames_testing), 1), dtype = np.uint8)
# ====== GENERATING TRAINING FILES ======
#========================================
for i in range(0, len(filenames_training)):
print(filenames_training[i])
plydata = PlyData.read("./ply_dir/" + filenames_training[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_training[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_training[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_training[i, j] = piddata[j]
# a_label_training[i, j] = labeldata[j]
for i in range(0, len(filenames_training)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
a_label_training[i] = labeldata[0]
data = f_training.create_dataset("data", data = a_data_training)
pid = f_training.create_dataset("pid", data = a_pid_training)
label = f_training.create_dataset("label", data = a_label_training)
# ====== GENERATING TRAINING FILES ======
#========================================
# ====== GENERATING TESTING FILES ======
#========================================
for i in range(0, len(filenames_testing)):
plydata = PlyData.read("./ply_dir/" + filenames_testing[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_testing[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_testing[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_testing[i, j] = piddata[j]
# a_label_testing[i, j] = labeldata[j]
for i in range(0, len(filenames_testing)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
a_label_testing[i] = labeldata[0]
data = f_testing.create_dataset("data", data = a_data_testing)
pid = f_testing.create_dataset("pid", data = a_pid_testing)
label = f_testing.create_dataset("label", data = a_label_testing)
#========================================
#========================================
print('HDF5 files generated.') | data/make_hdf5_files.py | 3,299 | ====== GENERATING TRAINING FILES ============================================== labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')] a_label_training[i, j] = labeldata[j] ====== GENERATING TRAINING FILES ============================================== ====== GENERATING TESTING FILES ============================================== labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')] a_label_testing[i, j] = labeldata[j]================================================================================ | 590 | en | 0.451641 |
#!/usr/bin/env python
"""
Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
"""
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
# Remove the full pathname, we only want the directory and the filename
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
# Read in all the devices from the nominated file
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
#pprint(result)
print(f"Title of page set to '{result['title']}'")
print(f"Confluence revision for page is now {result['version']['confRev']}")
if __name__ == "__main__":
main()
| python/atlassian/config-report.py | 2,382 | Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
!/usr/bin/env python Remove the full pathname, we only want the directory and the filename Read in all the devices from the nominated filepprint(result) | 279 | en | 0.866185 |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMatrixstats(RPackage):
"""High-performing functions operating on rows and columns of matrices,
e.g. col / rowMedians(), col / rowRanks(), and col / rowSds(). Functions
optimized per data type and for subsetted calculations such that both
memory usage and processing time is minimized. There are also optimized
vector-based methods, e.g. binMeans(), madDiff() and
weightedMedian()."""
homepage = "https://cran.rstudio.com/web/packages/matrixStats/index.html"
url = "https://cran.rstudio.com/src/contrib/matrixStats_0.52.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/matrixStats"
version('0.52.2', '41b987d3ae96ee6895875c413adcba3c')
| var/spack/repos/builtin/packages/r-matrixstats/package.py | 944 | High-performing functions operating on rows and columns of matrices,
e.g. col / rowMedians(), col / rowRanks(), and col / rowSds(). Functions
optimized per data type and for subsetted calculations such that both
memory usage and processing time is minimized. There are also optimized
vector-based methods, e.g. binMeans(), madDiff() and
weightedMedian().
Copyright 2013-2018 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 545 | en | 0.776768 |
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
| model/recommendation_functions.py | 12,791 | INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
Fill in the function here return the user_item matrix Return the top article titles from df (not df_content) Return the top article ids Your code here return the ids and names compute similarity of each user to the provided user 1. Make Nan to 0 2. Select a row 3. Dot product of each of row of the matrix remove the own user's id sort by similarity create list of just the ids return a list of the users in order from most to least similar Make neighbor_id column make similarity column 1. Make Nan to 0 Select a row Dot product of each of row of the matrix remove the own user's id Make num_interactions column Return the dataframe specified in the doc_string Get seen article ids and names from selected user id Make set to find unseen articles Find similar users of the selected user Get neighbor_df Make recommendation list Get seen article ids and names from similar users Make dict (key: article_ids, value:article_names) Make set to find unseen articles Create set of unseen articles_ids Get rid of other sepcial characters Tokenize Lemmatize Remove stop words Text Processing, Feature Extraction get counts of each token (word) in text data Get rid of other sepcial characters Tokenize Lemmatize Remove stop words Get title of the document of interest Tokenize the title | 4,760 | en | 0.833564 |
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class SimpleRoverController:
def __init__(self):
self.namespace = rospy.get_param("name_space", "scout_1")
self.w_s = rospy.get_param("wheel_separation", 1.7680) # wheel seperation
self.w_r = rospy.get_param("wheel_separation", 0.3048) # wheel radisu
if "/" in self.namespace:
rospy.logerr("[rover_motion_controller] invalid namespace. namespace can not contain /")
exit(1)
self.lf_steering_pub = rospy.Publisher("/" + self.namespace + "/fl_steering_arm_controller/command", Float64, queue_size=2)
self.rf_steering_pub = rospy.Publisher("/" + self.namespace + "/fr_steering_arm_controller/command", Float64, queue_size=2)
self.lr_steering_pub = rospy.Publisher("/" + self.namespace + "/bl_steering_arm_controller/command", Float64, queue_size=2)
self.rr_steering_pub = rospy.Publisher("/" + self.namespace + "/br_steering_arm_controller/command", Float64, queue_size=2)
self.lf_axle_pub = rospy.Publisher("/" + self.namespace + "/fl_wheel_controller/command", Float64, queue_size=2)
self.rf_axle_pub = rospy.Publisher("/" + self.namespace + "/fr_wheel_controller/command", Float64, queue_size=2)
self.lr_axle_pub = rospy.Publisher("/" + self.namespace + "/bl_wheel_controller/command", Float64, queue_size=2)
self.rr_axle_pub = rospy.Publisher("/" + self.namespace + "/br_wheel_controller/command", Float64, queue_size=2)
self.steering_cmd = 0
self.linear_vel = 0
self.linear_x = 0
self.angular_z = 0
rospy.Subscriber("/csi_rover/cmd_vel", Twist, callback=self.directional_movement)
rospy.init_node('rover_motion_controller', anonymous=True)
rate = rospy.Rate(30) # 10hz
while not rospy.is_shutdown():
# check to see if there's an explicit yaw command
if self.angular_z != 0:
self.rf_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.rr_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.lf_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
self.lr_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
# lock all steering joints to be zero
self.synchronized_steering(0)
# else use crab steering
else:
self.lf_axle_pub.publish(self.linear_vel)
self.lr_axle_pub.publish(self.linear_vel)
self.rf_axle_pub.publish(self.linear_vel)
self.rr_axle_pub.publish(self.linear_vel)
self.synchronized_steering(self.steering_cmd)
rate.sleep()
# move all of the steering joints to a position.
# the parameter is an angle value in radians
def synchronized_steering(self, angle):
self.lf_steering_pub.publish(angle)
self.rf_steering_pub.publish(angle)
self.lr_steering_pub.publish(angle)
self.rr_steering_pub.publish(angle)
# Determine steering angle
# Set linear_vel as magnitude
# Range -pi/2 to pi/2
# else use skid_steering
def directional_movement(self, data):
# data comes in as ( x , y )
# https://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/
# rospy.loginfo("Received a /cmd_vel message!")
# rospy.loginfo("Linear Components: [%f, %f, %f]"%(data.linear.x, data.linear.y, data.linear.z))
# rospy.loginfo("Angular Components: [%f, %f, %f]"%(data.angular.x, data.angular.y, data.angular.z))
theta = math.atan2(data.linear.x, data.linear.y)
self.steering_cmd = theta
self.linear_vel = math.sqrt(math.pow(data.linear.x, 2) + math.pow(data.linear.y, 2))
self.angular_z = data.angular.z
self.linear_x = data.linear.x
if __name__ == '__main__':
try:
SimpleRoverController()
except rospy.ROSInterruptExoception:
pass
| src/csi_rover_controls/deprecated/simple_rover_controller.py | 4,207 | !/usr/bin/env python wheel seperation wheel radisu 10hz check to see if there's an explicit yaw command lock all steering joints to be zero else use crab steering move all of the steering joints to a position. the parameter is an angle value in radians Determine steering angle Set linear_vel as magnitude Range -pi/2 to pi/2 else use skid_steering data comes in as ( x , y ) https://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/ rospy.loginfo("Received a /cmd_vel message!") rospy.loginfo("Linear Components: [%f, %f, %f]"%(data.linear.x, data.linear.y, data.linear.z)) rospy.loginfo("Angular Components: [%f, %f, %f]"%(data.angular.x, data.angular.y, data.angular.z)) | 689 | en | 0.562433 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
from extensions.middle.ConstSwitchResolver import ConstSwitchEraser
from mo.graph.graph import erase_node
from mo.middle.replacement import MiddleReplacementPattern
class UselessMergeEraser(MiddleReplacementPattern):
enabled = True
def run_after(self):
return [ConstSwitchEraser]
def pattern(self):
return dict(
nodes=[('merge', dict(kind='op', op='Merge')),
('merge_data', dict(kind='data'))],
edges=[('merge', 'merge_data')]
)
def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
if len(graph.in_edges(match['merge'].id)) <= 1:
erase_node(match['merge'])
erase_node(match['merge_data'])
log.info("Useles Merge op and data nodes was deleted op='{}' data='{}'"
"".format(match['merge'].id, match['merge_data'].id))
| model-optimizer/extensions/middle/UselessMerge.py | 1,507 | Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | 562 | en | 0.864985 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode=True
from distutils.core import setup
from pyxsltp import __version__
setup(
name = "pyxsltp",
version = __version__,
py_modules = ['pyxsltp'],
scripts = ['pyxsltp'],
)
| setup.py | 265 | !/usr/bin/python -*- coding: utf-8 -*- | 38 | en | 0.437977 |
# Generated by Django 2.0.6 on 2018-07-05 16:13
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=posts.models.upload_location, width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('content', models.TextField()),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
| posts/migrations/0001_initial.py | 1,158 | Generated by Django 2.0.6 on 2018-07-05 16:13 | 45 | en | 0.496208 |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '2.3.0'
| presto/datadog_checks/presto/__about__.py | 137 | (C) Datadog, Inc. 2019-present All rights reserved Licensed under a 3-clause BSD style license (see LICENSE) | 108 | en | 0.81047 |
from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("오픈파일럿을 사용할 수 없음", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "오픈파일럿이 해제됩니다."
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "항상 핸들을 잡고 도로를 주시하세요", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 5.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"차선 변경을 시작합니다 in (%d)" % alc_timer,
"차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
EventName.lkasDisabled: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"핸들을 잡아주세요",
"차선이탈 감지됨",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 일시적으로 사용불가",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요 : 운전자 도로주시 불안",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요",
"운전자 도로주시 불안",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 도로주시 불안",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요 : 운전자 인식 불가",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"운전자 응답하지않음",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 응답하지않음",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"수동으로 재활성화하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"앞차량 멈춤",
"앞차가 출발하면 자동 재출발",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"차선을 변경합니다",
"좌측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"차선을 변경합니다",
"우측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"후측방 차량감지",
"차선에 차량이 감지되니 대기하세요",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"차선을 변경합니다",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 제한을 초과함",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.slowingDownSpeed, 2.),
},
}
| selfdrive/controls/lib/events.py | 30,954 | Alert priorities Event types get event name from enum less harsh version of SoftDisable, where the condition is user-triggered ********** helper functions ********** ********** alert callback functions **********if soft_disable_time < int(0.5 / DT_CTRL): return ImmediateDisableAlert(alert_text_2)if soft_disable_time < int(0.5 / DT_CTRL): return ImmediateDisableAlert(alert_text_2) ********** events with no alerts ********** ********** events only containing alerts displayed in all states ********** Car is recognized, but marked as dashcam only Car is not recognizedET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"), Some features or cars are marked as community features. If openpilot detects the use of a community feature it switches to dashcam mode until these features are allowed using a toggle in settings. openpilot doesn't recognize the car. This switches openpilot into a read-only mode. This can be solved by adding your fingerprint. See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information ********** events only containing alerts that display while engaged ********** openpilot tries to learn certain parameters about your car by observing how the car behaves to steering inputs from both human and openpilot driving. This includes: - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle - tire stiffness: how much grip your tires have - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight This alert is thrown when any of these values exceed a sanity check. This can be caused by bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub Thrown when the fan is driven at >50% but is not rotating Camera is not outputting frames at a constant framerate Unused When the GPS position and localizer diverge the localizer is reset to the current GPS position. This alert is thrown when the localizer is reset more often than expected. ********** events that affect controls state transitions ********** This alert is thrown when the calibration angles are outside of the acceptable range. For example if the device is pointed too much to the left or the right. Usually this can only be solved by removing the mount from the windshield completely, and attaching while making sure the device is pointed straight forward and is level. See https://comma.ai/setup for more information Different openpilot services communicate between each other at a certain interval. If communication does not follow the regular schedule this alert is thrown. This can mean a service crashed, did not broadcast a message for ten times the regular interval, or the average interval is more than 10% too high. Thrown when manager detects a service exited unexpectedly while driving Every frame from the camera should be processed by the model. If modeld is not processing frames fast enough they have to be dropped. This alert is thrown when over 20% of frames are dropped. Besides predicting the path, lane lines and lead car data the model also predicts the current velocity and rotation speed of the car. If the model is very uncertain about the current velocity while the car is moving, this usually means the model has trouble understanding the scene. This is used as a heuristic to warn the driver. When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we alert the driver the device might have fallen from the windshield.ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"), Sometimes the USB stack on the device can get into a bad state causing the connection to the panda to be lost This alert can be thrown for the following reasons: - No CAN data received at all - CAN data is received, but some message are not received at the right frequency If you're not writing a new car port, this is usually cause by faulty wiring On cars that use stock ACC the car can decide to cancel ACC for various reasons. When this happens we can no long control the car so the user needs to be warned immediately. For planning the trajectory Model Predictive Control (MPC) is used. This is an optimization algorithm that is not guaranteed to find a feasible solution. If no solution is found or the solution has a very high cost this alert is thrown. When the relay in the harness box opens the CAN bus between the LKAS camera and the rest of the car is separated. When messages from the LKAS camera are received on the car side this usually means the relay hasn't opened correctly and this alert is thrown. When the car is driving faster than most cars in the training data, the model outputs can be unpredictable. | 4,792 | en | 0.918436 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceDtevalIdentitycheckQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).__init__()
self._evidence = None
self._ext_map = None
self._id_card_no_match_flag = None
self._name_match_flag = None
self._push_ant_data_flag = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, value):
self._evidence = value
@property
def ext_map(self):
return self._ext_map
@ext_map.setter
def ext_map(self, value):
self._ext_map = value
@property
def id_card_no_match_flag(self):
return self._id_card_no_match_flag
@id_card_no_match_flag.setter
def id_card_no_match_flag(self, value):
self._id_card_no_match_flag = value
@property
def name_match_flag(self):
return self._name_match_flag
@name_match_flag.setter
def name_match_flag(self, value):
self._name_match_flag = value
@property
def push_ant_data_flag(self):
return self._push_ant_data_flag
@push_ant_data_flag.setter
def push_ant_data_flag(self, value):
self._push_ant_data_flag = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).parse_response_content(response_content)
if 'evidence' in response:
self.evidence = response['evidence']
if 'ext_map' in response:
self.ext_map = response['ext_map']
if 'id_card_no_match_flag' in response:
self.id_card_no_match_flag = response['id_card_no_match_flag']
if 'name_match_flag' in response:
self.name_match_flag = response['name_match_flag']
if 'push_ant_data_flag' in response:
self.push_ant_data_flag = response['push_ant_data_flag']
| alipay/aop/api/response/SsdataDataserviceDtevalIdentitycheckQueryResponse.py | 2,093 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProviderAttributes(Model):
"""SourceProviderAttributes.
:param name: The name of the source provider.
:type name: str
:param supported_capabilities: The capabilities supported by this source provider.
:type supported_capabilities: dict
:param supported_triggers: The types of triggers supported by this source provider.
:type supported_triggers: list of :class:`SupportedTrigger <build.v4_1.models.SupportedTrigger>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_capabilities': {'key': 'supportedCapabilities', 'type': '{bool}'},
'supported_triggers': {'key': 'supportedTriggers', 'type': '[SupportedTrigger]'}
}
def __init__(self, name=None, supported_capabilities=None, supported_triggers=None):
super(SourceProviderAttributes, self).__init__()
self.name = name
self.supported_capabilities = supported_capabilities
self.supported_triggers = supported_triggers
| venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py | 1,634 | SourceProviderAttributes.
:param name: The name of the source provider.
:type name: str
:param supported_capabilities: The capabilities supported by this source provider.
:type supported_capabilities: dict
:param supported_triggers: The types of triggers supported by this source provider.
:type supported_triggers: list of :class:`SupportedTrigger <build.v4_1.models.SupportedTrigger>`
-------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- Generated file, DO NOT EDIT Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------------------------- | 929 | en | 0.534629 |
""" Tests the creation of tables, and the methods of the sql class
"""
from pyrate.repositories.sql import Table
from utilities import setup_database
class TestSql:
""" Tests the Sql class
"""
def test_get_list_of_columns(self, setup_database):
db = setup_database
rows = [{'unit': 'days',
'description': 'At berth/anchor',
'name': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
def test_get_list_of_columns_lowerconversion(self, setup_database):
db = setup_database
rows = [{'uNit': 'days',
'Description': 'At berth/anchor',
'namE': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
| tests/test_sql.py | 1,664 | Tests the Sql class
Tests the creation of tables, and the methods of the sql class | 87 | en | 0.796186 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# pylint: disable-all
# flake8: noqa
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.ads import ads
import numpy as np
# Set up ads dataset
for split in ['train', 'val']:
name = 'pitt_ads_{}'.format(split)
__sets[name] = (lambda split=split : ads(split))
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2014_cap_<split>
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up vg_<split>
# for version in ['1600-400-20']:
# for split in ['minitrain', 'train', 'minival', 'val', 'test']:
# name = 'vg_{}_{}'.format(version,split)
# __sets[name] = (lambda split=split, version=version: vg(version, split))
for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version,split)
__sets[name] = (lambda split=split, version=version: vg(version, split))
# set up imagenet.
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split,devkit_path,data_path))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| lib/datasets/factory.py | 2,880 | Get an imdb (image database) by name.
List all registered imdbs.
Factory method for easily getting imdbs by name.
-------------------------------------------------------- Fast R-CNN Copyright (c) 2015 Microsoft Licensed under The MIT License [see LICENSE for details] Written by Ross Girshick -------------------------------------------------------- pylint: disable-all flake8: noqa Set up ads dataset Set up voc_<year>_<split> Set up coco_2014_<split> Set up coco_2014_cap_<split> Set up coco_2015_<split> Set up vg_<split> for version in ['1600-400-20']: for split in ['minitrain', 'train', 'minival', 'val', 'test']: name = 'vg_{}_{}'.format(version,split) __sets[name] = (lambda split=split, version=version: vg(version, split)) set up imagenet. | 771 | en | 0.513835 |
import math
def is_prime(num):
if num < 2:
return False
for i in range(num):
if i < 2:
continue
if num % i == 0:
return False
return True
def get_nth_prime(n):
cnt = 0
i = 0
while cnt < n:
i += 1
if is_prime(i):
cnt += 1
return i
if __name__ == '__main__':
#print get_nth_prime(6)
print get_nth_prime(10001)
| problems/007/run.v1.py | 371 | print get_nth_prime(6) | 22 | ja | 0.056703 |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateConfigurationResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'configuration': 'ConfigurationSummaryForCreate'
}
attribute_map = {
'configuration': 'configuration'
}
def __init__(self, configuration=None):
"""CreateConfigurationResponse - a model defined in huaweicloud sdk"""
super(CreateConfigurationResponse, self).__init__()
self._configuration = None
self.discriminator = None
if configuration is not None:
self.configuration = configuration
@property
def configuration(self):
"""Gets the configuration of this CreateConfigurationResponse.
:return: The configuration of this CreateConfigurationResponse.
:rtype: ConfigurationSummaryForCreate
"""
return self._configuration
@configuration.setter
def configuration(self, configuration):
"""Sets the configuration of this CreateConfigurationResponse.
:param configuration: The configuration of this CreateConfigurationResponse.
:type: ConfigurationSummaryForCreate
"""
self._configuration = configuration
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateConfigurationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | 3,079 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
CreateConfigurationResponse - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print` and `pprint`
Gets the configuration of this CreateConfigurationResponse.
:return: The configuration of this CreateConfigurationResponse.
:rtype: ConfigurationSummaryForCreate
Sets the configuration of this CreateConfigurationResponse.
:param configuration: The configuration of this CreateConfigurationResponse.
:type: ConfigurationSummaryForCreate
Returns the model properties as a dict
Returns the string representation of the model
coding: utf-8 | 847 | en | 0.673042 |
from tkinter import *
from tkinter import ttk
import time
import time
window = Tk()
mygreen = "lightblue"
myred = "blue"
style = ttk.Style()
style.theme_create( "dedoff", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1], "background": mygreen },
"map": {"background": [("selected", myred)],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("dedoff")
window.title("Электронный учебник tkinter")
window.geometry('1920x1080')
tab_control = ttk.Notebook(window)
#панели
tab1 = ttk.Frame(tab_control, width=1920, height=1080)
tab2 = ttk.Frame(tab_control, width=1920, height=1080)
tab3 = ttk.Frame(tab_control, width=1080, height=600)
tab4 = ttk.Frame(tab_control, width=1080, height=600)
tab5 = ttk.Frame(tab_control, width=1080, height=600)
tab6 = ttk.Frame(tab_control, width=1080, height=600)
tab7 = ttk.Frame(tab_control, width=1080, height=600)
tab8 = ttk.Frame(tab_control, width=1080, height=600)
tab9 = ttk.Frame(tab_control, width=1080, height=600)
tab10 = ttk.Frame(tab_control, width=1080, height=600)
tab_control.add(tab1, text='Начало')
background_image = PhotoImage(file='background.ppm')
background_label = Label(tab1, image=background_image)
background_label.place(relwidth=1, relheight=1)
lower_frame = Frame(tab1, bg="lightblue", bd=10)
lower_frame.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labeltext1 = Label(lower_frame, text="Tkinter – это кроссплатформенная библиотека для разработки графического интерфейса на "
"языке Python\n (начиная с Python 3.0 переименована в tkinter). Tkinter расшифровывается "
"как Tk interface \nНачиная с версии python-3.0 библиотека переименована в соответствии с "
"PEP 8 в tkinter (с маленькой буквы). \nИмпортируется она как и любая другая библиотека "
"абсолютно весь код в этом учебнике написан для python версии 3.x \nПодключить модуль "
"можно с помощью инструкции import. После ключевого слова import указывается название "
"модуля.\n Одной инструкцией можно подключить несколько модулей. Для подключения всех \n"
"функций модуля используем:\n"
"import tkinter \n"
"или \n"
"from tkinter import * \n"
"Чтобы убедиться, что Tkinter установлен и работает, воспользуемся стандартной "
"функцией Tkinter: test():"
"\n"
"import tkinter \n"
"tkinter._test() \n"
,
font=("Times New Roman", 13), bg="white")
labeltext1.place(relwidth=1, relheight=0.6)
photo = PhotoImage(file='edu54img.pgm')
labelimage = Label(lower_frame,bg='white', image=photo)
labelimage.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.4, anchor='n')
#ОГО ВТОРООООООООООЙ ТААААААААААААААААААБ
tab_control.add(tab2, text='Canvas')
background_image2 = PhotoImage(file='background.ppm')
background_label1 = Label(tab2, image=background_image2)
background_label1.place(relwidth=1, relheight=1)
lower_frame1 = Frame(tab2, bg="lightblue", bd=10)
lower_frame1.place(relx=0.5, rely=0.02, relwidth=0.75, relheight=0.95, anchor='n')
labeltext2 = Label(lower_frame1, text=u"Привет, это второй раздел учебника.\n В tkinter от класса Canvas создаются объекты-холсты, на которых можно рисовать,\n"
"размещая различные фигуры и объекты. Делается это с помощью вызовов соответствующих \n"
"методов. При создании экземпляра Canvas необходимо указать его ширину и высоту. При \n"
"размещении геометрических примитивов и других объектов указываются их координаты на \n "
"холсте. Точкой отсчета является верхний левый угол.", font=("Times New Roman", 12), bg="white")
labeltext2.place(relwidth=1, relheight=0.3)
photo2 = PhotoImage(file='edu54img2.pgm')
labelimage1 = Label(lower_frame1, bg='white', image=photo2)
labelimage1.place(relx=0.5, rely=0.30, relwidth=1, relheight=0.49, anchor='n')
labeltext2 = Label(lower_frame1, text="В программе ниже создается холст.\n"
"from tkinter import *\n"
"window = Tk()\n"
"c = Canvas(root, width=200, height=200, bg='white')\n"
"c.pack()\n"
"window.mainloop()\n"
"в следующей главе мы разберем как рисовать на этом холсте", font=("Times New Roman", 12), bg="white")
labeltext2.place(relx=0.5, rely=0.75, relwidth=1, relheight=0.3, anchor='n')
tab_control.add(tab3, text='Примитивы')
background_image3 = PhotoImage(file='background.ppm')
background_label2 = Label(tab3, image=background_image3)
background_label2.place(relwidth=1, relheight=1)
lower_frame2 = Frame(tab3, bg="lightblue", bd=10)
lower_frame2.place(relx=0.5, rely=0.02, relwidth=0.8, relheight=0.95, anchor='n')
labeltext3 = Label(lower_frame2, text="В tkinter уже есть графические примитивы, для рисования, их нужно всего лишь правильно "
"указать.\n В программе ниже создается холст. На нем с помощью метода create_line() "
"рисуются отрезки. \n Сначала указываются координаты начала (x1, y1), затем – конца (x2, "
"y2) В программе ниже создаётся и рисуется линия на холсте.", font=("Times New Roman", 12), bg="white")
labeltext3.place(relwidth=1, relheight=0.12)
codeimg = PhotoImage(file='code.pgm')
labelimg = Label(lower_frame2, bg='white', image=codeimg)
labelimg.place(relx=0.5, rely=0.11, relwidth=1, relheight=0.5, anchor='n')
labelgotext = Label(lower_frame2, text="Собственно сами примитивы. Указываем координаты примитива всегда следующим образом – \n "
"верхний левый угол(x1, y1), вторые – правый нижний(x2, y2).", font=("Times New "
"Roman", 11),
bg='white')
labelgotext.place(relx=0.5, rely=0.52, relwidth=1, relheight=0.07, anchor='n')
rectangle = PhotoImage(file='rectangle.ppm')
rectanglelabel = Label(lower_frame2, bg='white', image=rectangle)
rectanglelabel.place(relx=0.5, rely=0.60, relwidth=1, relheight=0.45, anchor='n')
labelgotext2 = Label(lower_frame2, text="Далее о других примитивах в следующей вкладке", font=("Times New "
"Roman", 11),
bg='white')
labelgotext2.place(relx=0.5, rely=0.97, relwidth=1, relheight=0.05, anchor='n')
tab_control.add(tab4, text='Примитивы 2')
background_image4 = PhotoImage(file='background.ppm')
background_label3 = Label(tab4, image=background_image4)
background_label3.place(relwidth=1, relheight=1)
lower_frame3 = Frame(tab4, bg="lightblue", bd=10)
lower_frame3.place(relx=0.5, rely=0, relwidth=0.9, relheight=1, anchor='n')
oval = PhotoImage(file='oval_1.ppm')
ovallabel = Label(lower_frame3,bg='white', image=oval)
ovallabel.place(relx=0.5, rely=0, relwidth=1, relheight=0.55, anchor='n')
elipsoid = PhotoImage(file='ellipssmall.ppm')
elabel = Label(lower_frame3, bg='white', image=elipsoid)
elabel.place(relx=0.5, rely=0.5, relwidth=1, relheight=0.25, anchor='n')
labeltext4 = Label(lower_frame3, text="Метод create_oval(x1, y1, x2, y2) создает эллипсы. При этом задаются координаты гипотетического "
"прямоугольника, описывающего эллипс. \nЕсли нужно получить круг, то соответственно "
"описываемый прямоугольник должен быть квадратом.\n"
"Методом create_polygon(x1, x2...xn, yn) рисуется произвольный многоугольник путем задания координат каждой его точки\n"
"Создание прямоугольников методом create_rectangle(x1, y1, x2, y2)\n"
"Опции: \nwidth=число - ширина обводки, fill='color' - цвет заливки,\n outline='color' - цвет "
"обводки,\n activefill определяет цвет при наведении на него курсора мыши.\n"
"activeoutline определяет цвет обводки при наведении курсор", font=("Times New Roman", 11),
bg="white")
labeltext4.place(relx=0.5, rely=0.74, relwidth=1, relheight=0.26, anchor='n')
tab_control.add(tab5, text='Примитивы 3')
background_image5 = PhotoImage(file='background.ppm')
background_label4 = Label(tab5, image=background_image5)
background_label4.place(relwidth=1, relheight=1)
lower_frame4 = Frame(tab5, bg="lightblue", bd=10)
lower_frame4.place(relx=0.5, rely=0.05, relwidth=0.75, relheight=0.9, anchor='n')
labeltext5 = Label(lower_frame4, text="Более сложные для понимания фигуры получаются при использовании метода create_arc(). В \n"
"зависимости от значения опции style можно получить сектор (по умолчанию), \n"
"сегмент (CHORD) или дугу (ARC). Также как в случае create_oval() координаты задают \n"
"прямоугольник, в который вписана окружность (или эллипс), из которой вырезают сектор, \n"
"сегмент или дугу. Опции start присваивается градус начала фигуры, extent определяет "
"угол поворота.",
font=("Times New Roman", 11), bg="white")
labeltext5.place(relwidth=1, relheight=0.2)
arc = PhotoImage(file='arc.ppm')
arclabel = Label(lower_frame4,bg='white', image=arc)
arclabel.place(relx=0.5, rely=0.15, relwidth=1, relheight=0.4, anchor='n')
arc2 = PhotoImage(file='arc2.ppm')
arclabel2 = Label(lower_frame4,bg='white', image=arc2)
arclabel2.place(relx=0.5, rely=0.55, relwidth=1, relheight=0.5, anchor='n')
tab_control.add(tab6, text='Полезное')
background_image6 = PhotoImage(file='background.ppm')
background_label6 = Label(tab6, image=background_image6)
background_label6.place(relwidth=1, relheight=1)
table = PhotoImage(file='colortable.ppm')
tablelabel = Label(tab6,bg='lightblue', image=table)
tablelabel.place(relx=0.5, rely=0, relwidth=0.82, relheight=1, anchor='n')
tab_control.add(tab7, text='Практикум')
background_image7 = PhotoImage(file='background.ppm')
background_label7 = Label(tab7, bg='white', image=background_image7)
background_label7.place(relwidth=1, relheight=1)
lower_frame7 = Frame(tab7, bg="lightblue", bd=10)
lower_frame7.place(relx=0.5, rely=0.001, relwidth=0.65, relheight=1, anchor='n')
labelTASK1 = Label(lower_frame7, text="1) Пропеллер"
":Нарисуйте пропеллер, как это показано ниже\n"
"'Кто мечтает быть пилотом, очень смелый видно тот. От-от-от вин-та!'", font=("Georgia", 12,), bg='white')
labelTASK1.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
propeller = PhotoImage(file='propellersmall.ppm')
propelabel = Label(lower_frame7, bg='white', image=propeller)
propelabel.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.55, anchor='n')
labelTASK2 = Label(lower_frame7, text="2) Торт"
":Нарисуйте торт для учителя информатики.\n'Треугольник' должен пропадать при наведении курсора.'\n"
"'Кто сьел мой двумерный массив?!'", font=("Georgia", 12, ), bg='white')
labelTASK2.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.1, anchor='n')
tort = PhotoImage(file='tortsmall.ppm')
tortlabel = Label(lower_frame7, bg='white', image=tort)
tortlabel.place(relx=0.5, rely=0.69, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab8, text='Анимации')
background_image8 = PhotoImage(file='background.ppm')
background_label8 = Label(tab8, image=background_image8)
background_label8.place(relwidth=1, relheight=1)
lower_frame8 = Frame(tab8, bg="lightblue", bd=10)
lower_frame8.place(relx=0.5, rely=0.5, relwidth=0.59, relheight=0.5, anchor='n')
labelanimation = Label(lower_frame8, text='Методы, создающие фигуры на холсте, возвращают численные идентификаторы \n'
'этих объектов, которые можно присвоить переменным,\n через которые позднее '
'обращаться к созданным фигурам. \n Основной шаблон для анимации с Tkinter – написать функцию, которая рисует один кадр. \n Затем используйте что-то подобное, чтобы называть его через регулярные интервалы: \n'
" def animate(self): self.draw_one_frame() self.after(100, self.animate) \n"
"Как только вы вызываете эту функцию один раз,\n она будет продолжать "
'рисовать кадры со скоростью десять в секунду – один раз каждые 100 '
"миллисекунд.\n В следующей вкладке разберём это подробно", font=("Times New Roman", 11),
bg="white")
labelanimation.place(relwidth=1, relheight=1)
WIDTH = 350
HEIGHT = 300
SIZE = 50
canvas = Canvas(tab8, width=WIDTH, height=HEIGHT, bg="blue")
canvas.pack()
color = '#6098cd'
class Ball:
def __init__(self, tag):
self.shape = canvas.create_oval(0, 0, SIZE, SIZE, fill=color, tags=tag)
self.speedx = 10
self.speedy = 15
self.active = True
def ball_update(self):
canvas.move(self.shape, self.speedx, self.speedy)
pos = canvas.coords(self.shape)
if pos[2] >= WIDTH or pos[0] <= 0:
self.speedx *= -1
if pos[3] >= HEIGHT or pos[1] <= 0:
self.speedy *= -1
global switcher
switcher = True
def cycle():
global switcher
canvas.tag_raise("bg")
if switcher:
ball2.ball_update()
ball2.ball_update()
canvas.tag_raise("ball")
else:
ball.ball_update()
ball.ball_update()
canvas.tag_raise("ball2")
tab8.update_idletasks()
switcher = not switcher
tab8.after(40, cycle)
bg = canvas.create_rectangle(0, 0, WIDTH+1, HEIGHT+1, fill="white", tags="bg")
ball = Ball("ball")
ball.ball_update()
ball2 = Ball("ball2")
tab8.after(0, cycle)
tab_control.add(tab9, text='Анимации 2')
background_image9 = PhotoImage(file='background.ppm')
background_label9 = Label(tab9, image=background_image9)
background_label9.place(relwidth=1, relheight=1)
lower_frame9 = Frame(tab9, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labelanimation2 = Label(lower_frame9, text='Рассмотрим следующий код, отвечающий за создание анимации и после этого попрактикуемся. Собственно сам код: \n', font=("Times New Roman", 11),
bg="white")
labelanimation2.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
code_image8 = PhotoImage(file='sharcode.ppm')
code_label8 = Label(lower_frame9, bg='white', image=code_image8)
code_label8.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.6, anchor='n')
labelanimation3 = Label(lower_frame9, text='В данном коде создаётся шар, который двигается. Вначале происходит '
'создание холста Canvas и его "упаковка"\n, а также объекта ball, '
'с помощью примитива круг. После всего этого создаётся функция, которая '
'анимирует данный объект, рассмотрим её очень подробно \n '
'def motion (): - создание функции с названием motion \n'
'c.move(ball, 1, 0) - движение объекта на c. В самом начале при создании \n '
'холста мы назвали его c, следовательно при указании движения на нём мы \n'
'пишем c. move - декоратор, который указывает, что делать. В нашем случае \n'
'двигаться. Но чему? В скобках указываем объект движения и его координаты \n'
'движения x, y. if c.coords(ball)[2] < 300, отвечает за то, чтобы шар \n'
'двигался по координате X меньше 300. root.after(10, motion) - Частота обновлений окна в милисекундах. \n'
'После чего с помощью motion(), запускаем нашу функцию и само окно tkinter.', font=("Times New Roman", 10),
bg="white")
labelanimation3.place(relx=0.5, rely=0.65, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab10, text='Практикум 2')
background_image10 = PhotoImage(file='background.ppm')
background_label10 = Label(tab10, image=background_image10)
background_label10.place(relwidth=1, relheight=1)
# Практикум 2_поезд
c = Canvas(tab10, width=300, height=200, bg="white")
c.place(relx=0.5, rely=0.65, relwidth=0.15, relheight=0.2, anchor='n')
vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue')
line = c.create_line(60, 70, 70, 70, fill='brown', width=6)
vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue')
relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3)
def motion():
c.move(vagon1, 1, 0)
c.move(vagon2, 1, 0)
c.move(line, 1, 0)
if c.coords(vagon1)[0] < 50:
tab10.after(20, motion)
motion()
tab_control.pack(expand=10, fill='both', padx=5, pady=5)
lower_frame9 = Frame(tab10, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.35, relwidth=0.45, relheight=0.25, anchor='n')
labelpractic2 = Label(lower_frame9, text="Анимируйте данный скетч поезда! Исходный код создания самого скетча без холста: \n vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue'\n"
"line = c.create_line(60, 70, 70, 70, fill='brown', width=6) \n"
"vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue') \n"
"relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3) \n", bg='white', font=("Times New Roman", 11))
labelpractic2.place(relwidth=1, relheight=1)
Button(window, text='© Dedov Georgiy 2019').pack(fill='x')
window.resizable(True, True)
window.mainloop()
| edu54book/edu54bookSizeAuto.py | 22,153 | панелиОГО ВТОРООООООООООЙ ТААААААААААААААААААБ Практикум 2_поезд | 64 | lv | 0.744733 |
import unittest
from facial_recog.app import *
from .test_config import test_run_count, seed, success_perc
from .test_util import *
class TestFR(unittest.TestCase):
subject_names = dict()
subject_classes = dict()
def setUp(self):
random.seed(seed)
create_app_dirs()
setup_logger()
logging.debug('Seed is %s', seed)
# only for super strict testing
# clear_fdb()
prepare_fdb()
self.subject_names, self.subject_classes = create_sample()
logging.info('Subject names: %s', self.subject_names)
logging.info('Subject classes are: %s', self.subject_classes)
recreate_db()
populate_db(self.subject_classes)
logging.info('New db created')
clear_dataset()
copy_dataset(subject_names=self.subject_names)
logging.info('Training Dataset created')
clear_recognizers()
for class_id in get_all_classes():
train(class_id=class_id)
logging.info('Classifiers trained')
def test_fr(self):
success = 0
for _ in range(test_run_count):
random_class = random.choice(get_all_classes())
random_subject = random.choice(get_class_subjects(random_class))
random_image = random.choice(
get_images_for_subject(subject_name=self.subject_names[random_subject]))
logging.info('Testing subject %s in class %s with image %s', random_subject, random_class, random_image)
if predict(img=path_to_img(random_image), class_id=random_class) == random_subject:
success += 1
logging.info('Test success')
else:
logging.warning('Test failed')
self.assertGreaterEqual(success, int(success_perc * test_run_count))
| facial_recog/tests/test_app.py | 1,821 | only for super strict testing clear_fdb() | 41 | en | 0.529469 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
| bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | 51,202 | CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
__init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
Draw an arrow along an arc.
canvas_angle(self, base) -> (float, float, float)
draw(self)
Draw a circular diagram of the stored data
draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
Copyright 2003-2008 by Leighton Pritchard. All rights reserved. Revisions copyright 2008-2009 by Peter Cock. This code is part of the Biopython distribution and governed by its license. Please see the LICENSE file that should have been included as part of this package. Contact: Leighton Pritchard, Scottish Crop Research Institute, Invergowrie, Dundee, Scotland, DD2 5DA, UK L.Pritchard@scri.ac.uk ReportLab imports GenomeDiagram imports Use the superclass' instantiation method Useful measurements on the page Determine the proportion of the circumference around which information will be drawn The 'highest' track to draw Holds total number of 'units' taken up by all tracks Holds start and end units for each track keyed by track number placeholder variable track numbers to 'draw' Get track height ...or default to 1 increment total track unit height move to next height Calculate top and bottom radii for each track The inner, outer and center radii for each track 'step back' in pixels Instantiate the drawing canvas holds feature elements holds feature labels holds track background holds track foreground labels holds scale axes holds scale axis labels Get tracks to be drawn and set track sizes Go through each track in the parent (if it is to be drawn) one by one and collate the data as drawing elements Greytracks Features and graphs Scale axes Groups listed in order of addition to page (from back to front) Draw track backgrounds Draw features and graphs Draw scale axes Draw scale labels Draw feature labels Draw track labels Draw test tracks over top of diagram Holds elements for features and graphs Holds labels for features and graphs Distribution dictionary for dealing with different set types Draw the feature or graph setsprint 'draw feature set' Holds diagram elements belonging to the features Holds diagram elements belonging to feature labels Collect all the elements for the feature set Holds drawable elements for a single feature Holds labels for a single feature Don't show feature: return early A single feature may be split into subfeatures, so loop over them Get sigil for the feature/ each subfeature If there's a label Establish the co-ordinates for the sigil Distribution dictionary for various ways of drawing the feature Each method takes the inner and outer radii, the start and end angle subtended at the diagram center, and the color as arguments Get sigil for the feature, location dependent on the feature strand Support for clickable links... needs ReportLab 2.4 or laterwhich added support for links in SVG output. Feature needs a label Make text radial Feature is on top, or covers both strands Turn text round and anchor end to inner radius Feature on bottom strand Turn text round and anchor end to inner radiusif locstart > locend: print locstart, locend, feature.strand, sigil, feature.nameprint locstart, locend, feature.nameprint 'draw graph set' Holds graph elements Distribution dictionary for how to draw the graphprint graph.nameprint '\tdraw_line_graph' holds drawable elements Get graph data midval is the value at which the x-axis is plotted, and is the central ring in the track Whichever is the greatest difference: max-midval or min-midval, is taken to specify the number of pixel units resolved along the y-axis Start from first data point We calculate the track height start xy coords next xy coordsprint '\tdraw_bar_graph' At each point contained in the graph data, we draw a vertical bar from the track center to the height of the datapoint value (positive values go up in one color, negative go down in the alternative color). Set the number of pixels per unit for the data midval is the value at which the x-axis is plotted, and is the central ring in the track Convert data into 'binned' blocks, covering half the distance to the next data point on either side, accounting for the ends of fragments and tracks Whichever is the greatest difference: max-midval or min-midval, is taken to specify the number of pixel units resolved along the y-axis Create elements for the bar graph based on newdata Draw barprint '\tdraw_heat_graph' At each point contained in the graph data, we draw a box that is the full height of the track, extending from the midpoint between the previous and current data points to the midpoint between the current and next data points holds drawable elements Get graph data mid is the value at the X-axis Create elements on the graph, indicating a large positive value by the graph's poscolor, and a large negative value by the graph's negcolor attributes Calculate the heat color, based on the differential between the value and the median value Draw heat box holds axes and ticks holds labels no scale required, exit early Get track locations X-axisDraw an arc, leaving out the wedgeNote reportlab counts angles anti-clockwise from the horizontal(as in mathematics, e.g. complex numbers and polar coordinates)in degrees.Draw a full circle Ticks are required on the scale Draw large ticks I want the ticks to be consistently positioned relative tothe start of the sequence (position 0), not relative to thecurrent viewpoint (self.start and self.end)Note that we could just start the list of ticks usingrange(0,self.end,tickinterval) and the filter out theones before self.start - but this seems wasteful.Using tickiterval * (self.start/tickiterval) is a shortcut. If there's a label, add it Draw small ticks If there's a label, add it Check to see if the track contains a graph - if it does, get the minimum and maximum values, and put them on the scale Y-axis at 60 degree intervals, ordering the labels by graph_id Y-axis Calculate tick co-ordinates Calculate height of text label so it can be offset on lower half of diagram LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdatalabel_offset = _fontdata.ascent_descent[track.scale_font][0]*\ track.scale_fontsize/1000. Put tick position on as label Make label string LP: This label_offset depends on ascent_descent data, which is not available for all fonts, so has been deprecated.if 0.5*pi < tickangle < 1.5*pi: y1 -= label_offsetprint 'drawing test tracks' Add lines only for drawn tracks top line middle line bottom line Holds track backgrounds Holds track foreground labels No greytrack required, return early Get track location Make backgroundMake a partial circle, a large arc boxThis method assumes the correct center for us.Make a full circle (using a VERY thick linewidth) Labels are required for this track label interval Add a new label at each interval start text halfway up marker Anchor end of text to inner radius and reorient it Don't overrun the circleLet the UK spelling (colour) override the USA spelling (color) Force black border on white boxes with undefined border, else use fill colour Wide arc, must use full curvesNote reportlab counts angles anti-clockwise from the horizontal(as in mathematics, e.g. complex numbers and polar coordinates)but we use clockwise from the vertical. Also reportlab usesdegrees, but we use radians.Cheat and just use a four sided polygon. Calculate trig values for angle and coordinates origin of the circleLet the UK spelling (colour) override the USA spelling (color) Force black border on white boxes with undefined border, else use fill colourif orientation == 'right': startangle, endangle = min(startangle, endangle), max(startangle, endangle)elif orientation == 'left': startangle, endangle = max(startangle, endangle), min(startangle, endangle)else: angle subtended by arcreverse it Calculate trig values for angle and coordinates origin of the circleIf the angle is small, and the arrow is all head,cheat and just use a triangle.return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border, stroke_line_join=1)1=round, not mitre!default is mitre/miter which can stick out too much:1=roundNote reportlab counts angles anti-clockwise from the horizontal(as in mathematics, e.g. complex numbers and polar coordinates)but we use clockwise from the vertical. Also reportlab usesdegrees, but we use radians.auto-scale number of stepsdefault is mitre/miter which can stick out too much:1=roundNote reportlab counts angles anti-clockwise from the horizontal(as in mathematics, e.g. complex numbers and polar coordinates)but we use clockwise from the vertical. Also reportlab usesdegrees, but we use radians.TODO - two staight lines is only a good approximation for smallhead angle, in general will need to curved lines here:auto-scale number of steps | 18,317 | en | 0.783473 |
import plotly.graph_objects as go
import pandas as pd
from .Colors import COLOR_DISCRETE_MAP
from Classification import CATEGORIES
def all_categories_grouping(row: pd.Series) -> str:
"""
Merge Category, Fuel and segment to a single string for unique categorization
"""
if row['Fuel'] == 'Battery Electric':
return row['Category'] + ' / ' + row['Fuel']
else:
try:
result = row['Fuel'] + ' / ' + row['Segment'] + ' / ' + row['Euro Standard']
except: # For Off Road type with no Segment nor Euro Standard
result = row['Fuel']
return result
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
"""
Horizontal bar chart representing mean activity and other activities per unique categorization
:param stock_and_mileage_df: Dataframe of the vehicles registration list
:param output_folder: output folder name where to store resulting chart
:return: an html file containing the horizontal bar chart of the mean activity
"""
data = stock_and_mileage_df.copy()
# Delete off road data
data = data[data['Category'] != 'Off Road']
# Create single column classification
data['segmentation'] = data.apply(lambda row: all_categories_grouping(row), axis=1)
horizontal_plot = go.Figure()
# Add Activity statistics and stock traces
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers',
name='Activitat màxima', marker_color='rgb(288, 26, 28)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers',
name='Activitat mínima', marker_color='rgb(229, 196, 148)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers',
name="Desviació standard de l'activitat", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers',
name="Estoc", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers',
name="Lifetime cumulative activity mitja", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
# For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart)
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(
y=data[data['Category'] == category]['segmentation'], x=data[data['Category'] == category]['Mean_Activity'],
orientation='h', marker_color=COLOR_DISCRETE_MAP[category],
name=f'Activitat mitjana {category}'
))
# Update plot information
horizontal_plot.update_layout(
title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra",
title_x=0.5,
height=4000,
width=1500,
template='plotly_white',
xaxis_title='Activitat mitja (km/any)',
yaxis_title='Tipologia de vehicle',
hovermode="y unified",
hoverlabel=dict(namelength=100),
xaxis_range=[0, stock_and_mileage_df['Max_Activity'].max()*1.05],
xaxis=dict(
tickmode='array',
tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000],
ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k'])
)
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
# Save plot to html file
filename = output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html"
horizontal_plot.write_html(filename) | Graphing/MeanActivityHorizontalBarChart.py | 4,224 | Horizontal bar chart representing mean activity and other activities per unique categorization
:param stock_and_mileage_df: Dataframe of the vehicles registration list
:param output_folder: output folder name where to store resulting chart
:return: an html file containing the horizontal bar chart of the mean activity
Merge Category, Fuel and segment to a single string for unique categorization
For Off Road type with no Segment nor Euro Standard Delete off road data Create single column classification Add Activity statistics and stock traces For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart) Update plot information Save plot to html file | 712 | en | 0.790518 |
class Pessoa:
def __init__(self,nome,idade,cpf,salario):
self.nome = nome
self.idade = idade
self.cpf = cpf
self.salario = salario
def Aumento(self):
return self.salario *0.05
class Gerente(Pessoa):
def __init__(self,nome,idade,cpf,salario,senha):
super().__init__(nome,idade,cpf,salario)
self.senha = senha
def Aumento(self):
return self.salario * 0.01 + 1000
p = Gerente('Fabio',25,41075570816,21000,456578)
print(p.nome)
print(p.idade)
print(p.cpf)
print(p.senha)
print(p.salario)
print(p.Aumento())
print('='*30)
class Animal:
def __init__(self,nome,raca,cor,peso,comportamento = True):
self.nome = nome
self.raca = raca
self.cor = cor
self.peso = peso
self.comportamento = comportamento
def Comportamento(self):
if(self.comportamento == False):
return self.peso + 500
print('Ta Gordo por sem ruim')
class Pitbull(Animal):
pass
#def Comportamento(self):
#return False
dog = Pitbull('Luci','Pitbull','Preta',53,False)
print(dog.nome)
print(dog.raca)
print(dog.cor)
print(dog.peso)
print(dog.Comportamento())
| Python_OO/Exercicio.py | 1,236 | def Comportamento(self):return False | 36 | pt | 0.178826 |
import os
import re
import wx
import wx.grid
from . import dialog_base
def pop_error(msg):
wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)
class SettingsDialog(dialog_base.SettingsDialogBase):
def __init__(self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint, version):
dialog_base.SettingsDialogBase.__init__(self, None)
self.panel = SettingsDialogPanel(
self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint)
best_size = self.panel.BestSize
# hack for some gtk themes that incorrectly calculate best size
best_size.IncBy(dx=0, dy=30)
self.SetClientSize(best_size)
self.SetTitle('InteractiveHtmlBom %s' % version)
# hack for new wxFormBuilder generating code incompatible with old wxPython
# noinspection PyMethodOverriding
def SetSizeHints(self, sz1, sz2):
try:
# wxPython 4
super(SettingsDialog, self).SetSizeHints(sz1, sz2)
except TypeError:
# wxPython 3
self.SetSizeHintsSz(sz1, sz2)
def set_extra_data_path(self, extra_data_file):
self.panel.fields.extraDataFilePicker.Path = extra_data_file
self.panel.fields.OnExtraDataFileChanged(None)
# Implementing settings_dialog
class SettingsDialogPanel(dialog_base.SettingsDialogPanel):
def __init__(self, parent, extra_data_func, extra_data_wildcard,
config_save_func, file_name_format_hint):
self.config_save_func = config_save_func
dialog_base.SettingsDialogPanel.__init__(self, parent)
self.general = GeneralSettingsPanel(self.notebook,
file_name_format_hint)
self.html = HtmlSettingsPanel(self.notebook)
self.fields = FieldsPanel(self.notebook, extra_data_func,
extra_data_wildcard)
self.notebook.AddPage(self.general, "General")
self.notebook.AddPage(self.html, "Html defaults")
self.notebook.AddPage(self.fields, "Fields")
def OnExit(self, event):
self.GetParent().EndModal(wx.ID_CANCEL)
def OnSaveSettings(self, event):
self.config_save_func(self)
def OnGenerateBom(self, event):
self.GetParent().EndModal(wx.ID_OK)
def finish_init(self):
self.html.OnBoardRotationSlider(None)
# Implementing HtmlSettingsPanelBase
class HtmlSettingsPanel(dialog_base.HtmlSettingsPanelBase):
def __init__(self, parent):
dialog_base.HtmlSettingsPanelBase.__init__(self, parent)
# Handlers for HtmlSettingsPanelBase events.
def OnBoardRotationSlider(self, event):
degrees = self.boardRotationSlider.Value * 5
self.rotationDegreeLabel.LabelText = u"{}\u00B0".format(degrees)
# Implementing GeneralSettingsPanelBase
class GeneralSettingsPanel(dialog_base.GeneralSettingsPanelBase):
def __init__(self, parent, file_name_format_hint):
dialog_base.GeneralSettingsPanelBase.__init__(self, parent)
self.file_name_format_hint = file_name_format_hint
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnSortUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
self.m_bpButton5.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-question.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
# Handlers for GeneralSettingsPanelBase events.
def OnComponentSortOrderUp(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND and selection > 0:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection - 1)
self.componentSortOrderBox.SetSelection(selection - 1)
def OnComponentSortOrderDown(self, event):
selection = self.componentSortOrderBox.Selection
size = self.componentSortOrderBox.Count
if selection != wx.NOT_FOUND and selection < size - 1:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection + 1)
self.componentSortOrderBox.SetSelection(selection + 1)
def OnComponentSortOrderAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z will be ignored.",
"Add sort order item")
item = re.sub('[^A-Z]', '', item.upper())
if item == '':
return
found = self.componentSortOrderBox.FindString(item)
if found != wx.NOT_FOUND:
self.componentSortOrderBox.SetSelection(found)
return
self.componentSortOrderBox.Append(item)
self.componentSortOrderBox.SetSelection(
self.componentSortOrderBox.Count - 1)
def OnComponentSortOrderRemove(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND:
item = self.componentSortOrderBox.GetString(selection)
if item == '~':
pop_error("You can not delete '~' item")
return
self.componentSortOrderBox.Delete(selection)
if self.componentSortOrderBox.Count > 0:
self.componentSortOrderBox.SetSelection(max(selection - 1, 0))
def OnComponentBlacklistAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z 0-9 and * will be ignored.",
"Add blacklist item")
item = re.sub('[^A-Z0-9*]', '', item.upper())
if item == '':
return
found = self.blacklistBox.FindString(item)
if found != wx.NOT_FOUND:
self.blacklistBox.SetSelection(found)
return
self.blacklistBox.Append(item)
self.blacklistBox.SetSelection(self.blacklistBox.Count - 1)
def OnComponentBlacklistRemove(self, event):
selection = self.blacklistBox.Selection
if selection != wx.NOT_FOUND:
self.blacklistBox.Delete(selection)
if self.blacklistBox.Count > 0:
self.blacklistBox.SetSelection(max(selection - 1, 0))
def OnNameFormatHintClick(self, event):
wx.MessageBox(self.file_name_format_hint, 'File name format help',
style=wx.ICON_NONE | wx.OK)
def OnSize(self, event):
# Trick the listCheckBox best size calculations
tmp = self.componentSortOrderBox.GetStrings()
self.componentSortOrderBox.SetItems([])
self.Layout()
self.componentSortOrderBox.SetItems(tmp)
# Implementing FieldsPanelBase
class FieldsPanel(dialog_base.FieldsPanelBase):
NONE_STRING = '<none>'
FIELDS_GRID_COLUMNS = 3
def __init__(self, parent, extra_data_func, extra_data_wildcard):
dialog_base.FieldsPanelBase.__init__(self, parent)
self.extra_data_func = extra_data_func
self.extra_field_data = None
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.set_file_picker_wildcard(extra_data_wildcard)
self._setFieldsList([])
for i in range(2):
box = self.GetTextExtent(self.fieldsGrid.GetColLabelValue(i))
if hasattr(box, "x"):
width = box.x
else:
width = box[0]
width = int(width * 1.1 + 5)
self.fieldsGrid.SetColMinimalWidth(i, width)
self.fieldsGrid.SetColSize(i, width)
def set_file_picker_wildcard(self, extra_data_wildcard):
if extra_data_wildcard is None:
self.extraDataFilePicker.Disable()
return
# wxFilePickerCtrl doesn't support changing wildcard at runtime
# so we have to replace it
picker_parent = self.extraDataFilePicker.GetParent()
new_picker = wx.FilePickerCtrl(
picker_parent, wx.ID_ANY, wx.EmptyString,
u"Select a file",
extra_data_wildcard,
wx.DefaultPosition, wx.DefaultSize,
(wx.FLP_DEFAULT_STYLE | wx.FLP_FILE_MUST_EXIST | wx.FLP_OPEN |
wx.FLP_SMALL | wx.FLP_USE_TEXTCTRL | wx.BORDER_SIMPLE))
self.GetSizer().Replace(self.extraDataFilePicker, new_picker,
recursive=True)
self.extraDataFilePicker.Destroy()
self.extraDataFilePicker = new_picker
self.Layout()
def _swapRows(self, a, b):
for i in range(self.FIELDS_GRID_COLUMNS):
va = self.fieldsGrid.GetCellValue(a, i)
vb = self.fieldsGrid.GetCellValue(b, i)
self.fieldsGrid.SetCellValue(a, i, vb)
self.fieldsGrid.SetCellValue(b, i, va)
# Handlers for FieldsPanelBase events.
def OnGridCellClicked(self, event):
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(event.Row)
if event.Col < 2:
# toggle checkbox
val = self.fieldsGrid.GetCellValue(event.Row, event.Col)
val = "" if val else "1"
self.fieldsGrid.SetCellValue(event.Row, event.Col, val)
# group shouldn't be enabled without show
if event.Col == 0 and val == "":
self.fieldsGrid.SetCellValue(event.Row, 1, val)
if event.Col == 1 and val == "1":
self.fieldsGrid.SetCellValue(event.Row, 0, val)
def OnFieldsUp(self, event):
selection = self.fieldsGrid.SelectedRows
if len(selection) == 1 and selection[0] > 0:
self._swapRows(selection[0], selection[0] - 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] - 1)
def OnFieldsDown(self, event):
selection = self.fieldsGrid.SelectedRows
size = self.fieldsGrid.NumberRows
if len(selection) == 1 and selection[0] < size - 1:
self._swapRows(selection[0], selection[0] + 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] + 1)
def _setFieldsList(self, fields):
if self.fieldsGrid.NumberRows:
self.fieldsGrid.DeleteRows(0, self.fieldsGrid.NumberRows)
self.fieldsGrid.AppendRows(len(fields))
row = 0
for f in fields:
self.fieldsGrid.SetCellValue(row, 0, "1")
self.fieldsGrid.SetCellValue(row, 1, "1")
self.fieldsGrid.SetCellRenderer(
row, 0, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellRenderer(
row, 1, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellValue(row, 2, f)
self.fieldsGrid.SetCellAlignment(
row, 2, wx.ALIGN_LEFT, wx.ALIGN_TOP)
self.fieldsGrid.SetReadOnly(row, 2)
row += 1
def OnExtraDataFileChanged(self, event):
extra_data_file = self.extraDataFilePicker.Path
if not os.path.isfile(extra_data_file):
return
self.extra_field_data = None
try:
self.extra_field_data = self.extra_data_func(
extra_data_file, self.normalizeCaseCheckbox.Value)
except Exception as e:
pop_error(
"Failed to parse file %s\n\n%s" % (extra_data_file, e))
self.extraDataFilePicker.Path = ''
if self.extra_field_data is not None:
field_list = list(self.extra_field_data[0])
self._setFieldsList(["Value", "Footprint"] + field_list)
field_list.append(self.NONE_STRING)
self.boardVariantFieldBox.SetItems(field_list)
self.boardVariantFieldBox.SetStringSelection(self.NONE_STRING)
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
self.dnpFieldBox.SetItems(field_list)
self.dnpFieldBox.SetStringSelection(self.NONE_STRING)
def OnBoardVariantFieldChange(self, event):
selection = self.boardVariantFieldBox.Value
if not selection or selection == self.NONE_STRING \
or self.extra_field_data is None:
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
return
variant_set = set()
for _, field_dict in self.extra_field_data[1].items():
if selection in field_dict:
variant_set.add(field_dict[selection])
self.boardVariantWhitelist.SetItems(list(variant_set))
self.boardVariantBlacklist.SetItems(list(variant_set))
def OnSize(self, event):
self.Layout()
g = self.fieldsGrid
g.SetColSize(
2, g.GetClientSize().x - g.GetColSize(0) - g.GetColSize(1) - 30)
def GetShowFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 0) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def GetGroupFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 1) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def SetCheckedFields(self, show, group):
group = [s for s in group if s in show]
current = []
for row in range(self.fieldsGrid.NumberRows):
current.append(self.fieldsGrid.GetCellValue(row, 2))
new = [s for s in current if s not in show]
self._setFieldsList(show + new)
for row in range(self.fieldsGrid.NumberRows):
field = self.fieldsGrid.GetCellValue(row, 2)
self.fieldsGrid.SetCellValue(row, 0, "1" if field in show else "")
self.fieldsGrid.SetCellValue(row, 1, "1" if field in group else "")
| InteractiveHtmlBom/dialog/settings_dialog.py | 14,769 | hack for some gtk themes that incorrectly calculate best size hack for new wxFormBuilder generating code incompatible with old wxPython noinspection PyMethodOverriding wxPython 4 wxPython 3 Implementing settings_dialog Implementing HtmlSettingsPanelBase Handlers for HtmlSettingsPanelBase events. Implementing GeneralSettingsPanelBase Handlers for GeneralSettingsPanelBase events. Trick the listCheckBox best size calculations Implementing FieldsPanelBase wxFilePickerCtrl doesn't support changing wildcard at runtime so we have to replace it Handlers for FieldsPanelBase events. toggle checkbox group shouldn't be enabled without show | 635 | en | 0.657373 |
from __future__ import unicode_literals, print_function
from libraries.lambda_handlers.register_module_handler import RegisterModuleHandler
def handle(event, context):
"""
Called by a module when it is deployed to register it
:param dict event:
:param context:
:return dict:
"""
return RegisterModuleHandler().handle(event, context)
| functions/register_module/main.py | 363 | Called by a module when it is deployed to register it
:param dict event:
:param context:
:return dict: | 102 | en | 0.931528 |
"""
Problem Statement:
Let the function f(s) be the frequency of the lexicographically smallest character in a non-empty string s. For example, if s = "dcce" then f(s) = 2 because the lexicographically smallest character is 'c', which has a frequency of 2.
You are given an array of strings words and another array of query strings queries. For each query queries[i], count the number of words in words such that f(queries[i]) < f(W) for each W in words.
Return an integer array answer, where each answer[i] is the answer to the ith query.
Example 1:
Input: queries = ["cbd"], words = ["zaaaz"]
Output: [1]
"""
from collections import Counter
def numSmallerByFrequency(queries, words):
# Calculate the frequency of smallest character for each word of query array
fre_queries = fre(queries)
# Calculate the frequency of smallest character for each word of words array & sort it in reverse order.
fre_words = sorted(fre(words))[::-1]
res = []
# compare reach frequency in fre_queries with each element of fre_words & increase count accordingly
for q in fre_queries:
count = 0
for w in fre_words:
if w <= q:
break
else:
count += 1
res.append(count)
return res
# A function to find the frequency of smallest character.
def fre(arrs):
# Sort the array
sorted_arrs = [sorted(arr) for arr in arrs]
fre = []
for arr in sorted_arrs:
fre.append(list(Counter(arr).items())[0][1])
return fre
# Main begins here
input_queries = input('Enter elements of a queries separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
queries_list = input_queries.split()
input_words = input('Enter elements of a words separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
words_list = input_words.split()
# print(queries_list)
# print(words_list)
ans = numSmallerByFrequency(queries_list,words_list)
print("Output: ",ans)
| Arrays/python/compareStringByFrequencyOfSmallestCharacter.py | 2,164 | Problem Statement:
Let the function f(s) be the frequency of the lexicographically smallest character in a non-empty string s. For example, if s = "dcce" then f(s) = 2 because the lexicographically smallest character is 'c', which has a frequency of 2.
You are given an array of strings words and another array of query strings queries. For each query queries[i], count the number of words in words such that f(queries[i]) < f(W) for each W in words.
Return an integer array answer, where each answer[i] is the answer to the ith query.
Example 1:
Input: queries = ["cbd"], words = ["zaaaz"]
Output: [1]
Calculate the frequency of smallest character for each word of query array Calculate the frequency of smallest character for each word of words array & sort it in reverse order. compare reach frequency in fre_queries with each element of fre_words & increase count accordingly A function to find the frequency of smallest character. Sort the array Main begins here This would split the input string separated by spaces into string array This would split the input string separated by spaces into string array print(queries_list) print(words_list) | 1,153 | en | 0.872966 |
class MetricHandler:
"""
Object meant to be used in the training loop to handle metrics logs
"""
def __init__(self):
pass
def add(self, outputs, targets):
"""
Adding metric for each batch
:param outputs: outputs of the model
:param targets: targets of the model
"""
raise NotImplementedError()
def compute(self, phase):
"""
Aggregate accumulated metrics over batches at the end of the epoch
:param phase: either 'train' or 'val'
"""
raise NotImplementedError()
def description(self, phase):
"""
Description of the current metrics
:param phase: either 'train' or 'val'
:return: str
"""
raise NotImplementedError()
def scalar_infos(self, phase):
"""
Return list of tuple to use with tensorboard writer object 'add_scalar' function
:param phase: either 'train' or 'val'
:return: [tuple(str, number)]
"""
raise NotImplementedError()
def description_best(self):
"""
Description of the best metrics
:return: str
"""
raise NotImplementedError()
class Epocher:
"""
An object which is used to print information about training without spamming the console. (WIP)
"""
def __init__(self, n_epoch, epoch_offset=1):
# epoch_offset += 1 # starting at 1 and not zero
self.n_epoch = n_epoch
self.epoch_offset = epoch_offset
self.s_more = ''
self.stats_string = ''
self.ls_string = ''
def __iter__(self):
self.n = self.epoch_offset - 1
self.stats_string = ''
self.ls_string = ''
self.s_more = ''
self.__update_stdout__()
return self
def __next__(self):
self.n += 1
if self.n >= self.n_epoch + self.epoch_offset:
raise StopIteration
self.__update_stdout__()
self.s_more = ''
return self.n
def update_stats(self, s):
self.stats_string = s
self.__update_stdout__()
def update_last_saved(self, s):
self.ls_string = s
self.__update_stdout__()
def print(self, s, sep=' '):
self.s_more = sep + s.replace('\n', '')
self.__update_stdout__()
def __update_stdout__(self):
s0 = 'Epoch [{}/{}]'.format(self.n, self.n_epoch + self.epoch_offset - 1)
s1, s2 = '', ''
if self.stats_string != '':
s1 = ' Stats [{}]'.format(self.stats_string).replace('\n', '')
if self.ls_string != '':
s2 = ' Last Saved [{}]'.format(self.ls_string).replace('\n', '')
print('\r{}'.format(s0), s1, s2, self.s_more, end='', sep='')
| facade_project/utils/ml_utils.py | 2,763 | An object which is used to print information about training without spamming the console. (WIP)
Object meant to be used in the training loop to handle metrics logs
Adding metric for each batch
:param outputs: outputs of the model
:param targets: targets of the model
Aggregate accumulated metrics over batches at the end of the epoch
:param phase: either 'train' or 'val'
Description of the current metrics
:param phase: either 'train' or 'val'
:return: str
Description of the best metrics
:return: str
Return list of tuple to use with tensorboard writer object 'add_scalar' function
:param phase: either 'train' or 'val'
:return: [tuple(str, number)]
epoch_offset += 1 starting at 1 and not zero | 704 | en | 0.768563 |
"""
Optimizers
----------
.. autosummary::
:template: template.rst
:toctree:
Solver
ScipySolver
CandidateSolver
GridSolver
"""
from .solver import Solver
from .scipy import ScipySolver
from .candidate import CandidateSolver, GridSolver, FiniteDomainSolver
| hdbo/febo/solvers/__init__.py | 279 | Optimizers
----------
.. autosummary::
:template: template.rst
:toctree:
Solver
ScipySolver
CandidateSolver
GridSolver | 138 | en | 0.66435 |
import grpc
from functools import wraps
class WalletEncryptedError(Exception):
def __init__(self, message=None):
message = message or 'Wallet is encrypted. Please unlock or set ' \
'password if this is the first time starting lnd. '
super().__init__(message)
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper
| lndgrpc/errors.py | 2,145 | Decorator to add more context to RPC errors
lnd might be active, but not possible to contact using RPC if the wallet is encrypted. If we get an rpc error code Unimplemented, it means that lnd is running, but the RPC server is not active yet (only WalletUnlocker server active) and most likely this is because of an encrypted wallet. raise WalletEncryptedError from None | 371 | en | 0.86211 |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcls.models.builder import BACKBONES
from mmcv.cnn import build_activation_layer, build_norm_layer
from ...utils import Placeholder
class FactorizedReduce(nn.Module):
"""Reduce feature map size by factorized pointwise (stride=2)."""
def __init__(self,
in_channels,
out_channels,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.relu = build_activation_layer(self.act_cfg)
self.conv1 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.conv2 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.bn = build_norm_layer(self.norm_cfg, self.out_channels)[1]
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class StandardConv(nn.Module):
"""
Standard conv: ReLU - Conv - BN
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
build_activation_layer(self.act_cfg),
nn.Conv2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
def forward(self, x):
return self.net(x)
class Node(nn.Module):
def __init__(self, node_id, num_prev_nodes, channels,
num_downsample_nodes):
super().__init__()
edges = nn.ModuleDict()
for i in range(num_prev_nodes):
if i < num_downsample_nodes:
stride = 2
else:
stride = 1
edge_id = '{}_p{}'.format(node_id, i)
edges.add_module(
edge_id,
nn.Sequential(
Placeholder(
group='node',
space_id=edge_id,
choice_args=dict(
stride=stride,
in_channels=channels,
out_channels=channels)), ))
self.edges = Placeholder(
group='node_edge', space_id=node_id, choices=edges)
def forward(self, prev_nodes):
return self.edges(prev_nodes)
class Cell(nn.Module):
def __init__(self,
num_nodes,
channels,
prev_channels,
prev_prev_channels,
reduction,
prev_reduction,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.reduction = reduction
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match
# with output size of cell[k-2]. So the output[k-2] should be reduced
# by preprocessing.
if prev_reduction:
self.preproc0 = FactorizedReduce(prev_prev_channels, channels,
self.act_cfg, self.norm_cfg)
else:
self.preproc0 = StandardConv(prev_prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
self.preproc1 = StandardConv(prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
# generate dag
self.nodes = nn.ModuleList()
for depth in range(2, self.num_nodes + 2):
if reduction:
node_id = f'reduce_n{depth}'
num_downsample_nodes = 2
else:
node_id = f'normal_n{depth}'
num_downsample_nodes = 0
self.nodes.append(
Node(node_id, depth, channels, num_downsample_nodes))
def forward(self, s0, s1):
# s0, s1 are the outputs of previous previous cell and previous cell,
# respectively.
tensors = [self.preproc0(s0), self.preproc1(s1)]
for node in self.nodes:
cur_tensor = node(tensors)
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class AuxiliaryModule(nn.Module):
"""Auxiliary head in 2/3 place of network to let the gradient flow well."""
def __init__(self,
in_channels,
base_channels,
out_channels,
norm_cfg=dict(type='BN')):
super().__init__()
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
nn.ReLU(),
nn.AvgPool2d(5, stride=2, padding=0,
count_include_pad=False), # 2x2 out
nn.Conv2d(in_channels, base_channels, kernel_size=1, bias=False),
build_norm_layer(self.norm_cfg, base_channels)[1],
nn.ReLU(inplace=True),
nn.Conv2d(base_channels, out_channels, kernel_size=2,
bias=False), # 1x1 out
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
def forward(self, x):
return self.net(x)
@BACKBONES.register_module()
class DartsBackbone(nn.Module):
def __init__(self,
in_channels,
base_channels,
num_layers=8,
num_nodes=4,
stem_multiplier=3,
out_indices=(7, ),
auxliary=False,
aux_channels=None,
aux_out_channels=None,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.base_channels = base_channels
self.num_layers = num_layers
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
self.out_indices = out_indices
assert self.out_indices[-1] == self.num_layers - 1
if auxliary:
assert aux_channels is not None
assert aux_out_channels is not None
self.aux_channels = aux_channels
self.aux_out_channels = aux_out_channels
self.auxliary_indice = 2 * self.num_layers // 3
else:
self.auxliary_indice = -1
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.out_channels = self.stem_multiplier * self.base_channels
stem_norm_cfg = copy.deepcopy(self.norm_cfg)
stem_norm_cfg.update(dict(affine=True))
self.stem = nn.Sequential(
nn.Conv2d(
self.in_channels, self.out_channels, 3, 1, 1, bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
# for the first cell, stem is used for both s0 and s1
# [!] prev_prev_channels and prev_channels is output channel size,
# but c_cur is input channel size.
prev_prev_channels = self.out_channels
prev_channels = self.out_channels
self.out_channels = self.base_channels
self.cells = nn.ModuleList()
prev_reduction, reduction = False, False
for i in range(self.num_layers):
prev_reduction, reduction = reduction, False
# Reduce featuremap size and double channels in 1/3
# and 2/3 layer.
if i == self.num_layers // 3 or i == 2 * self.num_layers // 3:
self.out_channels *= 2
reduction = True
cell = Cell(self.num_nodes, self.out_channels, prev_channels,
prev_prev_channels, reduction, prev_reduction,
self.act_cfg, self.norm_cfg)
self.cells.append(cell)
prev_prev_channels = prev_channels
prev_channels = self.out_channels * self.num_nodes
if i == self.auxliary_indice:
self.auxliary_module = AuxiliaryModule(prev_channels,
self.aux_channels,
self.aux_out_channels,
self.norm_cfg)
def forward(self, x):
outs = []
s0 = s1 = self.stem(x)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i in self.out_indices:
outs.append(s1)
if i == self.auxliary_indice and self.training:
aux_feature = self.auxliary_module(s1)
outs.insert(0, aux_feature)
return tuple(outs)
| mmcls/models/architectures/components/backbones/darts_backbone.py | 9,630 | Auxiliary head in 2/3 place of network to let the gradient flow well.
Reduce feature map size by factorized pointwise (stride=2).
Standard conv: ReLU - Conv - BN
Copyright (c) OpenMMLab. All rights reserved. If previous cell is reduction cell, current input size does not match with output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. generate dag s0, s1 are the outputs of previous previous cell and previous cell, respectively. 2x2 out 1x1 out for the first cell, stem is used for both s0 and s1 [!] prev_prev_channels and prev_channels is output channel size, but c_cur is input channel size. Reduce featuremap size and double channels in 1/3 and 2/3 layer. | 691 | en | 0.892811 |
"""Project signals"""
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""Add post-commit hook on project import"""
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if service is not None:
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed'))
| readthedocs/projects/signals.py | 1,186 | Add post-commit hook on project import
Project signals | 54 | en | 0.570502 |
#Author: Sepehr Roudini.
#Date: 02/05/2018.
#University of Iowa.
#Department of Chemical Engineering.
#Purpose: Calculating mean and Std
#--------------------------------------------------------------------------------------------#
#Defining function and importing necessary libraries.
#--------------------------------------------------------------------------------------------#
##############################################################################################
#Libraries used in this function are: numpy and math.
##############################################################################################
#Data: A 1d array of data.
##############################################################################################
#This functions returnes mean and standard
#deviation of data.
##############################################################################################
def Calculate_Mean_Std(Data):
#numpy is for data manipulationt
import numpy as np
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#Preparing data and quantile calculation
#--------------------------------------------------------------------------------------------#
#Calculating mean
mean = np.sum(Data)/len(Data)
#Calculating standard deviation
std = np.sqrt(np.sum(((Data-mean)**2))/(len(Data)-1))
return mean, std
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
| Mean_Std_Calculation.py | 1,795 | Author: Sepehr Roudini.Date: 02/05/2018.University of Iowa.Department of Chemical Engineering.Purpose: Calculating mean and Std--------------------------------------------------------------------------------------------Defining function and importing necessary libraries.--------------------------------------------------------------------------------------------Libraries used in this function are: numpy and math.Data: A 1d array of data.This functions returnes mean and standarddeviation of data.numpy is for data manipulationt------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------Preparing data and quantile calculation--------------------------------------------------------------------------------------------Calculating meanCalculating standard deviation---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 1,167 | en | 0.294317 |
"""Users serializers"""
# Django
from django.conf import settings
from django.contrib.auth import password_validation, authenticate
from django.core.validators import RegexValidator
# Serializers
from cride.users.serializers.profiles import ProfileModelSerializer
# Django REST Framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
# Models
from cride.users.models import User, Profile
# Task
from cride.taskapp.task import send_confirmation_email
# Utilities
import jwt
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
profile = ProfileModelSerializer(read_only=True)
class Meta:
"""Meta class."""
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'phone_number',
'profile'
)
class UserSignUpSerializer(serializers.Serializer):
"""User sign up serializer.
Handle sign up data validation and user/profile creation.
"""
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
# Phone number
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone_number = serializers.CharField(validators=[phone_regex])
# Password
password = serializers.CharField(min_length=8, max_length=64)
password_confirmation = serializers.CharField(min_length=8, max_length=64)
# Name
first_name = serializers.CharField(min_length=2, max_length=30)
last_name = serializers.CharField(min_length=2, max_length=30)
def validate(self, data):
"""Verify passwords match."""
passwd = data['password']
passwd_conf = data['password_confirmation']
if passwd != passwd_conf:
raise serializers.ValidationError("Passwords don't match.")
password_validation.validate_password(passwd)
return data
def create(self, data):
"""Handle user and profile creation."""
data.pop('password_confirmation')
user = User.objects.create_user(**data, is_verified=False, is_client=True)
profile = Profile.objects.create(user=user)
send_confirmation_email.delay(user_pk=user.pk)
return user
class UserLoginSerializer(serializers.Serializer):
"""User Login serializer
Handle the login request data.
"""
email = serializers.EmailField()
password = serializers.CharField(min_length=8, max_length=64)
def validate(self, data):
"""Check credentials"""
user = authenticate(username=data['email'], password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credentials')
if not user.is_verified:
raise serializers.ValidationError('Account is not active yet')
self.context['user'] = user
return data
def create(self, data):
"""Generate or retrieve new token"""
token, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], token.key
class AccountVerificationSerializer(serializers.Serializer):
"""Account verification serializer"""
token = serializers.CharField()
def validate_token(self, data):
"""Verify token is valid"""
try:
payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise serializers.ValidationError('Verification link has expired.')
except jwt.exceptions.PyJWTError:
raise serializers.ValidationError('Invalid token')
if payload['type'] != 'email_confirmation':
raise serializers.ValidationError('Invalid token')
self.context['payload'] = payload
return data
def save(self):
"""Update user's verified status"""
payload = self.context['payload']
user = User.objects.get(username=payload['user'])
user.is_verified = True
user.save()
| cride/users/serializers/users.py | 4,371 | Account verification serializer
Meta class.
User Login serializer
Handle the login request data.
User model serializer
User sign up serializer.
Handle sign up data validation and user/profile creation.
Handle user and profile creation.
Generate or retrieve new token
Update user's verified status
Verify passwords match.
Check credentials
Verify token is valid
Users serializers
Django Serializers Django REST Framework Models Task Utilities Phone number Password Name | 470 | en | 0.613533 |
import pyomo.environ as pe
import romodel as ro
feeds = range(5)
products = range(4)
pools = range(2)
qualities = range(4)
con_feed_pool = [(0, 0), (1, 0), (2, 0), (3, 1), (4, 1)]
con_pool_prod = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
con_feed_prod = []
price_product = [16, 25, 15, 10]
price_feed = [7, 3, 2, 10, 5]
max_flow = [float('inf'), float('inf'), float('inf'), float('inf'), float('inf')]
min_flow = [0, 0, 0, 0, 0]
pool_size = [float('inf'), float('inf')]
max_demand = [10, 25, 30, 10]
min_demand = [0, 0, 0, 0]
feed_cons = [[1.0, 6.0, 4.0, 0.5],
[4.0, 1.0, 3.0, 2.0],
[4.0, 5.5, 3.0, 0.9],
[3.0, 3.0, 3.0, 1.0],
[1.0, 2.7, 4.0, 1.6]]
max_cons = [[3.00, 3.00, 3.25, 0.75],
[4.00, 2.50, 3.50, 1.50],
[1.50, 5.50, 3.90, 0.80],
[3.00, 4.00, 4.00, 1.80]]
min_cons = [[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]]
m = pe.ConcreteModel()
m.q = pe.Var(con_feed_pool, bounds=(0, 1))
m.y = pe.Var(con_pool_prod, within=pe.NonNegativeReals)
m.z = pe.Var(con_feed_prod, within=pe.NonNegativeReals)
m.U = ro.UncSet()
m.price_product = ro.UncParam(products, nominal=price_product, uncset=m.U)
expr = 0
for j in products:
expr += (m.price_product[j] - price_product[j])**2
m.U.c = pe.Constraint(expr=expr <= 0.1)
price_product = m.price_product
obj = 0
for i, l in con_feed_pool:
for j in [jj for ll, jj in con_pool_prod if ll == l]:
obj += price_feed[j]*m.y[(l, j)]*m.q[i, l]
for l, j in con_pool_prod:
obj -= price_product[j]*m.y[(l, j)]
for i, j in con_feed_prod:
obj -= (price_product[j] - price_feed[i])*m.z[(i, j)]
m.obj = pe.Objective(expr=obj, sense=pe.minimize)
# Feed availability
def feed_availability_rule(m, i):
expr = 0
for l in [ll for ii, ll in con_feed_pool if ii == i]:
for j in [jj for ll, jj in con_pool_prod if ll == l]:
expr += m.q[(i, l)]*m.y[(l, j)]
for j in [jj for ii, jj in con_feed_prod if ii == i]:
expr += m.z[(i, l)]
return min_flow[i], expr, max_flow[i]
m.feed_availability = pe.Constraint(feeds, rule=feed_availability_rule)
# Pool capacity
def pool_capacity_rule(m, l):
expr = 0
for j in [jj for ll, jj in con_pool_prod if ll == l]:
expr += m.y[(l, j)]
return None, expr, pool_size[l]
m.pool_capacity = pe.Constraint(pools, rule=pool_capacity_rule)
# Product demand
def prod_demand_rule(m, j):
expr = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
expr += m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
expr += m.z[(i, j)]
return min_demand[j], expr, max_demand[j]
m.product_demand = pe.Constraint(products, rule=prod_demand_rule)
# Simplex
def simplex_rule(m, l):
return pe.quicksum(m.q[(i, l)] for i, ll in m.q if ll == l) == 1
m.simplex = pe.Constraint(pools, rule=simplex_rule)
# Product quality
def prod_quality_rule_upper(m, j, k):
expr = 0
flow = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
flow += m.y[l, j]
for i in [ii for ii, ll in con_feed_pool if ll == l]:
expr += feed_cons[i][k]*m.q[(i, l)]*m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
flow += m.z[i, j]
expr += feed_cons[i][k]*m.z[(i, j)]
return expr <= max_cons[j][k]*flow
def prod_quality_rule_lower(m, j, k):
expr = 0
flow = 0
for l in [ll for ll, jj in con_pool_prod if jj == j]:
flow += m.y[l, j]
for i in [ii for ii, ll in con_feed_pool if ll == l]:
expr += feed_cons[i][k]*m.q[(i, l)]*m.y[(l, j)]
for i in [ii for ii, jj in con_feed_prod if jj == j]:
flow += m.z[i, j]
expr += feed_cons[i][k]*m.z[(i, j)]
return min_cons[j][k]*flow <= expr
m.prod_quality_upper = pe.Constraint(products, qualities,
rule=prod_quality_rule_upper)
m.prod_quality_lower = pe.Constraint(products, qualities,
rule=prod_quality_rule_lower)
solver = pe.SolverFactory('romodel.cuts')
solver.options['NonConvex'] = 2
solver.solve(m, tee=True)
| examples/pooling.py | 4,233 | Feed availability Pool capacity Product demand Simplex Product quality | 70 | en | 0.815036 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "custom_scripts"
app_title = "Custom Scripts"
app_publisher = "C.R.I.O"
app_description = "For custom scripts"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "criogroups@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# app_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include js, css files in header of web template
# web_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# web_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "custom_scripts/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {"Sales Invoice" : "custom_scripts/custom/js/sales_invoice.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "custom_scripts.install.before_install"
# after_install = "custom_scripts.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "custom_scripts.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
override_doctype_class = {
#"Employee Advance": "custom_scripts.custom_scripts.custom.auto_additional_salary.ERPNextEmployeeAdvance",
"POS Invoice Merge Log": "custom_scripts.custom_scripts.custom.sales_invoice.ERPNextPOSInvoiceMergeLog"
}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "custom_scripts.tasks.all"
# ],
# "daily": [
# "custom_scripts.tasks.daily"
# ],
# "hourly": [
# "custom_scripts.tasks.hourly"
# ],
# "weekly": [
# "custom_scripts.tasks.weekly"
# ]
# "monthly": [
# "custom_scripts.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "custom_scripts.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "custom_scripts.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "custom_scripts.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
| custom_scripts/hooks.py | 4,188 | -*- coding: utf-8 -*- Includes in <head> ------------------ include js, css files in header of desk.html app_include_css = "/assets/custom_scripts/css/custom_scripts.css" app_include_js = "/assets/custom_scripts/js/custom_scripts.js" include js, css files in header of web template web_include_css = "/assets/custom_scripts/css/custom_scripts.css" web_include_js = "/assets/custom_scripts/js/custom_scripts.js" include custom scss in every website theme (without file extension ".scss") website_theme_scss = "custom_scripts/public/scss/website" include js, css files in header of web form webform_include_js = {"doctype": "public/js/doctype.js"} webform_include_css = {"doctype": "public/css/doctype.css"} include js in page page_js = {"page" : "public/js/file.js"} include js in doctype views doctype_list_js = {"doctype" : "public/js/doctype_list.js"} doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"} doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"} Home Pages ---------- application home page (will override Website Settings) home_page = "login" website user home page (by Role) role_home_page = { "Role": "home_page" } Generators ---------- automatically create page for each record of this doctype website_generators = ["Web Page"] Installation ------------ before_install = "custom_scripts.install.before_install" after_install = "custom_scripts.install.after_install" Desk Notifications ------------------ See frappe.core.notifications.get_notification_config notification_config = "custom_scripts.notifications.get_notification_config" Permissions ----------- Permissions evaluated in scripted ways permission_query_conditions = { "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions", } has_permission = { "Event": "frappe.desk.doctype.event.event.has_permission", } DocType Class --------------- Override standard doctype classes"Employee Advance": "custom_scripts.custom_scripts.custom.auto_additional_salary.ERPNextEmployeeAdvance", Document Events --------------- Hook on document methods and events doc_events = { "*": { "on_update": "method", "on_cancel": "method", "on_trash": "method" } } Scheduled Tasks --------------- scheduler_events = { "all": [ "custom_scripts.tasks.all" ], "daily": [ "custom_scripts.tasks.daily" ], "hourly": [ "custom_scripts.tasks.hourly" ], "weekly": [ "custom_scripts.tasks.weekly" ] "monthly": [ "custom_scripts.tasks.monthly" ] } Testing ------- before_tests = "custom_scripts.install.before_tests" Overriding Methods ------------------------------ override_whitelisted_methods = { "frappe.desk.doctype.event.event.get_events": "custom_scripts.event.get_events" } each overriding function accepts a `data` argument; generated from the base implementation of the doctype dashboard, along with any modifications made in other Frappe apps override_doctype_dashboards = { "Task": "custom_scripts.task.get_dashboard_data" } exempt linked doctypes from being automatically cancelled auto_cancel_exempted_doctypes = ["Auto Repeat"] User Data Protection -------------------- | 3,096 | en | 0.583827 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array(
[[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float
)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array(
[[2.0 / 16.0, 0.0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float,
)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32)
)
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]],
dtype=np.float32,
)
)
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0]
)
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
)
self.assertAllClose(
boxlist_concatenated_expected.get(), boxlist_concatenated.get()
)
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32)
)
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32)
)
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist.add_field("scores", np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32)
)
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, "labels")
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ["objectness"])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ["labels"])
self.assertFalse(subboxlist.has_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "objectness")
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "labels")
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "scores", "Descending")
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, "scores")
expected_boxes = np.array(
[[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, "scores", np_box_list_ops.SortOrder.ASCEND
)
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array(
[
[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
],
dtype=float,
)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 1.0 # No NMS
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array(10 * [0.8]))
iou_threshold = 0.5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = 0.4
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.8
expected_boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32,
)
)
scores = np.array(
[
[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31],
],
dtype=np.float32,
)
boxlist.add_field("scores", scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3
)
scores_clean = boxlist_clean.get_field("scores")
classes_clean = boxlist_clean.get_field("classes")
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array(
[
[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == "__main__":
tf.test.main()
| base2designs/utils/np_box_list_ops_test.py | 17,894 | Tests for object_detection.utils.np_box_list_ops.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== No NMS | 719 | en | 0.821082 |
# Generated by Django 3.1.7 on 2021-03-10 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Admins', '0036_auto_20210310_0337'),
]
operations = [
migrations.AlterField(
model_name='createpractioner',
name='id',
field=models.CharField(default='P27fc1', editable=False, max_length=6, primary_key=True, serialize=False),
),
]
| Admins/migrations/0037_auto_20210310_0337.py | 459 | Generated by Django 3.1.7 on 2021-03-10 03:37 | 45 | en | 0.655498 |
import random
def HiringProblem(score, n):
sample_size = int(round(n / e))
print(f"\nRejecting first {sample_size} candidates as sample")
#finding best candidate in the sample set for benchmark
best_candidate = 0;
for i in range(1, sample_size):
if (score[i] > score[best_candidate]):
best_candidate = i
#finding the first best candidate outside the sample set
for i in range(sample_size, n):
if (score[i] >= score[best_candidate]):
best_candidate = i
break
if (best_candidate >= int(sample_size)):
print(f"\nThe best Candidate found is {best_candidate+1} with score {score[best_candidate]}")
else:
print("Couldn't find a best candidate")
# Driver code
if __name__ == "__main__":
e = 2.71828
n = int(input("Enter number of candidates to simulate\n")) #total number of candidate
score = []
#populating the list
for i in range(n):
score.append(random.randint(1, n))
print("Candidate\tScore\n");
for i in range(n):
print(f"{i+1}\t\t{score[i]}");
HiringProblem(score, n);
| Semester I/Design and Analysis of Algorithm/Practical 04- Hiring Problem/HiringProblem.py | 1,205 | finding best candidate in the sample set for benchmarkfinding the first best candidate outside the sample set Driver codetotal number of candidate populating the list | 166 | en | 0.872793 |
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
| template/parser.py | 35,017 | A trivial local exception class.
This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
Tokenizes a comment.
Tokenizes a filename.
Tokenizes an identifier.
Tokenizes a number.
Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
Method used to handle errors encountered during the parse process
in the _parse() method.
Tokenizes a string.
Tokenizes an unquoted word or symbol .
Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
Examines text looking for any variable references embedded
like $this or like ${ this }.
Return Python comment indicating current parser file and line.
Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
Split input template text into directives and raw text chunks.
Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008, derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy Wardley. All Rights Reserved. The file "LICENSE" at the top level of this source distribution describes the terms under which this file may be distributed. Build a FACTORY object to include any NAMESPACE definitions, but only if FACTORY isn't already a (non-callable) object. might be 'n-n' commment out entire directive except for any end chomp flag PRE_CHOMP: process whitespace before tag POST_CHOMP: process whitespace after tag The TAGS directive is a compile-time switch. unescape " and \ but leave \$ escaped so that interpolate_text() doesn't incorrectly treat it as a variable reference Remove escaped single quotes and backslashes: DFA stack see if any lookaheads exist for the current state get next token and expand any directives (ie. token is a list) onto the front of the token list force redo don't perform interpolation in PYTHON blocks force redo toggle string flag to indicate if we're crossing a string boundary get the next state for the current lookahead token no lookahead assertions ERROR: no ACTION shift (positive ACTION) reduce (negative ACTION) no action implies ACCEPTance use dummy sub if code ref doesn't exist reduce stack by len_ ACCEPT ERROR preceding text variable reference other '$' reference - treated as text | 3,855 | en | 0.812422 |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to monalisa.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| share/rpcuser/rpcuser.py | 1,115 | !/usr/bin/env python2 Copyright (c) 2015-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.This uses os.urandom() underneathCreate 16 byte hex saltCreate 32 byte b64 password | 291 | en | 0.366729 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import cv2
import tensorflow as tf
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# In[3]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
# ### Load the Training Data
# In[4]:
# curwd = str(os.getcwd())
# targetwd = '\\data\\train'
# path_train = curwd + targetwd
path_train = '...s\\Documents\\whale_identification\\whale_identification\\data\\train\\'
train = [os.path.join(path_train,f) for f in os.listdir(path_train) if f.endswith('.jpg')]
# In[6]:
train_labels = pd.read_csv("df_train.csv")
# In[7]:
train_labels.head()
# In[8]:
unique_whales = train_labels['Id'].unique()
len(unique_whales)
# ### Train-Validation Split
# In[9]:
def train_valid_split(df):
# find unique categories of whales in our dataframe
unique_whales = train_labels['Id'].unique()
# map the images to categories
mapping = {}
for whale in unique_whales:
lst_of_images = list(train_labels[train_labels['Id'] == whale]['Image'].values)
mapping[whale] = lst_of_images
# perform manual train/validation split to ensure balanced data in both sets (i.e. all categories are represented)
train_revised = []
valid_revised = []
for v in mapping.values():
cut = int(0.2*len(v)) # sample & 80-20 split
cut2 = int(0.25*len(v))
tr = v[:cut]
val = v[cut:cut2]
train_revised.append(tr)
valid_revised.append(val)
return train_revised, valid_revised
def train_valid_dict_generator(train_list, valid_list, df):
# create a dictionary mapping new training set to correct labels
train_df = {}
for i in train_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
train_df[j] = lbl
# create a dictionary mapping new validation set to correct labels
valid_df = {}
for i in valid_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
valid_df[j] = lbl
return train_df, valid_df | train_valid_split.py | 2,189 | !/usr/bin/env python coding: utf-8 In[1]: In[2]: In[3]: Load the Training Data In[4]: curwd = str(os.getcwd()) targetwd = '\\data\\train' path_train = curwd + targetwd In[6]: In[7]: In[8]: Train-Validation Split In[9]: find unique categories of whales in our dataframe map the images to categories perform manual train/validation split to ensure balanced data in both sets (i.e. all categories are represented) sample & 80-20 split create a dictionary mapping new training set to correct labels create a dictionary mapping new validation set to correct labels | 561 | en | 0.629704 |
# -*- coding: utf-8 -*-
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = 'accountman@test.com'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
@contextmanager
def mocked_today(self, forced_today):
''' Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
'''
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
| odoo/base-addons/account/tests/account_test_savepoint.py | 18,071 | Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
-*- coding: utf-8 -*- skipTest raises exception Create user. Shadow the current environment/cursor with one having the report user. This is mandatory to test access rights. ==== Taxes ==== ==== Products ==== ==== Fiscal positions ==== ==== Payment terms ==== ==== Partners ==== ==== Cash rounding ==== The currency could be different after the installation of the chart template. /!\ No account set. /!\ No account set. Ensure the backward-compatibility before saas-13.2. | 1,047 | en | 0.860917 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############
## Imports ##
#############
import os
import sys ; sys.path.append("/home/developer/workspace/rklearn-lib")
import tensorflow as tf
from rklearn.tfoo_v1 import BaseModel
#################
## CIFAR10CNN ##
#################
class CIFAR10CNN(BaseModel):
################
## __init__() ##
################
def __init__(self, config, logger = None):
super().__init__(config, logger)
try:
# these parameters are sent to the trainer through the model because it is easier
self.num_epochs = self.config.cifar10_cnn["num_epochs"]
self.learning_rate = self.config.cifar10_cnn["learning_rate"]
self.max_to_keep = self.config.cifar10_cnn["max_to_keep"]
self.checkpoint_dir = self.config.cifar10_cnn["checkpoint_dir"]
self.model_dir = self.config.cifar10_cnn["model_dir"]
os.makedirs(self.checkpoint_dir, exist_ok = True)
os.makedirs(self.model_dir, exist_ok = True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!")
###################
## build_model() ##
###################
def build_model(self):
"""
Build the custom CNN for the CIFAR-10 dataset.
"""
# The input data holders (cf. shapes after prepa)
self.X = tf.compat.v1.placeholder(tf.float32, shape = (None,
self.config.data["image_size"],
self.config.data["image_size"],
self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3)
self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10)
self.train = tf.compat.v1.placeholder(tf.bool)
# The CNN architecture = conv/poo layers + flatten layer + connected layers
with tf.name_scope("cnn"):
# a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop
self.conv1 = tf.layers.conv2d(self.X,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
# b. Flatten input data
self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]])
# Create connected layers: fc1, fc2
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
# Compute loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Perf metrics
with tf.name_scope("accuracy"):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
| rklearn/tests/it/cifar10_cnn.py | 5,623 | Build the custom CNN for the CIFAR-10 dataset.
!/usr/bin/env python -*- coding: utf-8 -*- Imports CIFAR10CNN __init__() these parameters are sent to the trainer through the model because it is easier build_model() The input data holders (cf. shapes after prepa) ex. (50000, 32, 32, 3) ex. (50000, 10) The CNN architecture = conv/poo layers + flatten layer + connected layers a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop b. Flatten input data Create connected layers: fc1, fc2 Compute loss Optimizer Perf metrics | 584 | en | 0.61569 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/25 0025 上午 10:14
# @Author : Exchris Tsai
# @Site :
# @File : example52.py
# @Software: PyCharm
"""
题目:学习使用按位或 | 。
程序分析:0|0=0; 0|1=1; 1|0=1; 1|1=1
"""
__author__ = 'Exchris Tsai'
if __name__ == '__main__':
a = 0o77
b = a | 3
print('a | b is %d' %b)
b |= 7
print('a | b is %d' %b) | Old/exercise/example52.py | 412 | 题目:学习使用按位或 | 。
程序分析:0|0=0; 0|1=1; 1|0=1; 1|1=1
!/usr/bin/env python -*- coding: utf-8 -*- @Time : 2017/7/25 0025 上午 10:14 @Author : Exchris Tsai @Site : @File : example52.py @Software: PyCharm | 204 | zh | 0.574206 |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
import math
from PyQt5.QtCore import pyqtSignal, QSize, Qt, QTimer
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QAction, QApplication, QGridLayout, QLabel,
QLineEdit, QMainWindow, QMessageBox, QOpenGLWidget, QScrollArea,
QSizePolicy, QSlider, QWidget)
class GLWidget(QOpenGLWidget):
xRotationChanged = pyqtSignal(int)
yRotationChanged = pyqtSignal(int)
zRotationChanged = pyqtSignal(int)
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.gear1 = 0
self.gear2 = 0
self.gear3 = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.gear1Rot = 0
timer = QTimer(self)
timer.timeout.connect(self.advanceGears)
timer.start(20)
def setXRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.update()
def setYRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.update()
def setZRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.update()
def initializeGL(self):
self.gl = self.context().versionFunctions()
self.gl.initializeOpenGLFunctions()
lightPos = (5.0, 5.0, 10.0, 1.0)
reflectance1 = (0.8, 0.1, 0.0, 1.0)
reflectance2 = (0.0, 0.8, 0.2, 1.0)
reflectance3 = (0.2, 0.2, 1.0, 1.0)
self.gl.glLightfv(self.gl.GL_LIGHT0, self.gl.GL_POSITION, lightPos)
self.gl.glEnable(self.gl.GL_LIGHTING)
self.gl.glEnable(self.gl.GL_LIGHT0)
self.gl.glEnable(self.gl.GL_DEPTH_TEST)
self.gear1 = self.makeGear(reflectance1, 1.0, 4.0, 1.0, 0.7, 20)
self.gear2 = self.makeGear(reflectance2, 0.5, 2.0, 2.0, 0.7, 10)
self.gear3 = self.makeGear(reflectance3, 1.3, 2.0, 0.5, 0.7, 10)
self.gl.glEnable(self.gl.GL_NORMALIZE)
self.gl.glClearColor(0.0, 0.0, 0.0, 1.0)
def paintGL(self):
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | self.gl.GL_DEPTH_BUFFER_BIT)
self.gl.glPushMatrix()
self.gl.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
self.gl.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
self.gl.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
self.drawGear(self.gear1, -3.0, -2.0, 0.0, self.gear1Rot / 16.0)
self.drawGear(self.gear2, +3.1, -2.0, 0.0,
-2.0 * (self.gear1Rot / 16.0) - 9.0)
self.gl.glRotated(+90.0, 1.0, 0.0, 0.0)
self.drawGear(self.gear3, -3.1, -1.8, -2.2,
+2.0 * (self.gear1Rot / 16.0) - 2.0)
self.gl.glPopMatrix()
def resizeGL(self, width, height):
side = min(width, height)
if side < 0:
return
self.gl.glViewport((width - side) // 2, (height - side) // 2, side, side)
self.gl.glMatrixMode(self.gl.GL_PROJECTION)
self.gl.glLoadIdentity()
self.gl.glFrustum(-1.0, +1.0, -1.0, 1.0, 5.0, 60.0)
self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
self.gl.glLoadIdentity()
self.gl.glTranslated(0.0, 0.0, -40.0)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
self.lastPos = event.pos()
def advanceGears(self):
self.gear1Rot += 2 * 16
self.update()
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def makeGear(self, reflectance, innerRadius, outerRadius, thickness, toothSize, toothCount):
list = self.gl.glGenLists(1)
self.gl.glNewList(list, self.gl.GL_COMPILE)
self.gl.glMaterialfv(self.gl.GL_FRONT, self.gl.GL_AMBIENT_AND_DIFFUSE,
reflectance)
r0 = innerRadius
r1 = outerRadius - toothSize / 2.0
r2 = outerRadius + toothSize / 2.0
delta = (2.0 * math.pi / toothCount) / 4.0
z = thickness / 2.0
self.gl.glShadeModel(self.gl.GL_FLAT)
for i in range(2):
if i == 0:
sign = +1.0
else:
sign = -1.0
self.gl.glNormal3d(0.0, 0.0, sign)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for j in range(toothCount+1):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUADS)
for j in range(toothCount):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + delta), r2 * math.sin(angle + delta), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + 2 * delta), r2 * math.sin(angle + 2 * delta), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount):
for j in range(2):
angle = 2.0 * math.pi * (i + (j / 2.0)) / toothCount
s1 = r1
s2 = r2
if j == 1:
s1, s2 = s2, s1
self.gl.glNormal3d(math.cos(angle), math.sin(angle), 0.0)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), +z)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), -z)
self.gl.glNormal3d(s2 * math.sin(angle + delta) - s1 * math.sin(angle), s1 * math.cos(angle) - s2 * math.cos(angle + delta), 0.0)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), +z)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), -z)
self.gl.glVertex3d(r1, 0.0, +z)
self.gl.glVertex3d(r1, 0.0, -z)
self.gl.glEnd()
self.gl.glShadeModel(self.gl.GL_SMOOTH)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount+1):
angle = i * 2.0 * math.pi / toothCount
self.gl.glNormal3d(-math.cos(angle), -math.sin(angle), 0.0)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), +z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), -z)
self.gl.glEnd()
self.gl.glEndList()
return list
def drawGear(self, gear, dx, dy, dz, angle):
self.gl.glPushMatrix()
self.gl.glTranslated(dx, dy, dz)
self.gl.glRotated(angle, 0.0, 0.0, 1.0)
self.gl.glCallList(gear)
self.gl.glPopMatrix()
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
self.glWidget = GLWidget()
self.pixmapLabel = QLabel()
self.glWidgetArea = QScrollArea()
self.glWidgetArea.setWidget(self.glWidget)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
self.pixmapLabelArea = QScrollArea()
self.pixmapLabelArea.setWidget(self.pixmapLabel)
self.pixmapLabelArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.pixmapLabelArea.setMinimumSize(50, 50)
xSlider = self.createSlider(self.glWidget.xRotationChanged,
self.glWidget.setXRotation)
ySlider = self.createSlider(self.glWidget.yRotationChanged,
self.glWidget.setYRotation)
zSlider = self.createSlider(self.glWidget.zRotationChanged,
self.glWidget.setZRotation)
self.createActions()
self.createMenus()
centralLayout = QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralLayout.addWidget(self.pixmapLabelArea, 0, 1)
centralLayout.addWidget(xSlider, 1, 0, 1, 2)
centralLayout.addWidget(ySlider, 2, 0, 1, 2)
centralLayout.addWidget(zSlider, 3, 0, 1, 2)
centralWidget.setLayout(centralLayout)
xSlider.setValue(15 * 16)
ySlider.setValue(345 * 16)
zSlider.setValue(0 * 16)
self.setWindowTitle("Grabber")
self.resize(400, 300)
def grabFrameBuffer(self):
image = self.glWidget.grabFramebuffer()
self.setPixmap(QPixmap.fromImage(image))
def clearPixmap(self):
self.setPixmap(QPixmap())
def about(self):
QMessageBox.about(self, "About Grabber",
"The <b>Grabber</b> example demonstrates two approaches for "
"rendering OpenGL into a Qt pixmap.")
def createActions(self):
self.grabFrameBufferAct = QAction("&Grab Frame Buffer", self,
shortcut="Ctrl+G", triggered=self.grabFrameBuffer)
self.clearPixmapAct = QAction("&Clear Pixmap", self,
shortcut="Ctrl+L", triggered=self.clearPixmap)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.aboutAct = QAction("&About", self, triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
triggered=QApplication.instance().aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.grabFrameBufferAct)
self.fileMenu.addAction(self.clearPixmapAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createSlider(self, changedSignal, setterSlot):
slider = QSlider(Qt.Horizontal)
slider.setRange(0, 360 * 16)
slider.setSingleStep(16)
slider.setPageStep(15 * 16)
slider.setTickInterval(15 * 16)
slider.setTickPosition(QSlider.TicksRight)
slider.valueChanged.connect(setterSlot)
changedSignal.connect(slider.setValue)
return slider
def setPixmap(self, pixmap):
self.pixmapLabel.setPixmap(pixmap)
size = pixmap.size()
if size - QSize(1, 0) == self.pixmapLabelArea.maximumViewportSize():
size -= QSize(1, 0)
self.pixmapLabel.resize(size)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| PyQt5_gpl-5.8/examples/opengl/grabber.py | 14,067 | !/usr/bin/env python Copyright (C) 2015 Riverbank Computing Limited. Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. This file is part of the examples of PyQt. $QT_BEGIN_LICENSE:BSD$ You may use this file under the terms of the BSD license as follows: "Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." $QT_END_LICENSE$ | 1,785 | en | 0.886627 |
"""
Data processing routines
Deepak Baby, UGent, June 2018
deepak.baby@ugent.be
"""
import numpy as np
def reconstruct_wav(wavmat, stride_factor=0.5):
"""
Reconstructs the audiofile from sliced matrix wavmat
"""
window_length = wavmat.shape[1]
window_stride = int(stride_factor * window_length)
wav_length = (wavmat.shape[0] -1 ) * window_stride + window_length
wav_recon = np.zeros((1,wav_length))
#print ("wav recon shape " + str(wav_recon.shape))
for k in range (wavmat.shape[0]):
wav_beg = k * window_stride
wav_end = wav_beg + window_length
wav_recon[0, wav_beg:wav_end] += wavmat[k, :]
# now compute the scaling factor for multiple instances
noverlap = int(np.ceil(1/stride_factor))
scale_ = (1/float(noverlap)) * np.ones((1, wav_length))
for s in range(noverlap-1):
s_beg = s * window_stride
s_end = s_beg + window_stride
scale_[0, s_beg:s_end] = 1/ (s+1)
scale_[0, -s_beg - 1 : -s_end:-1] = 1/ (s+1)
return wav_recon * scale_
def pre_emph(x, coeff=0.95):
"""
Apply pre_emph on 2d data (batch_size x window_length)
"""
#print ("x shape: " + str(x.shape))
x0 = x[:, 0]
x0 = np.expand_dims(x0, axis=1)
diff = x[:, 1:] - coeff * x[:, :-1]
x_preemph = np.concatenate((x0, diff), axis=1)
if not x.shape == x_preemph.shape:
print ("ERROR: Pre-emphasis is wrong")
#print ("x_preemph shape: " + str(x_preemph.shape))
return x_preemph
def de_emph(y, coeff=0.95):
"""
Apply de_emphasis on test data: works only on 1d data
"""
if coeff <= 0:
return y
x = np.zeros((y.shape[0],), dtype=np.float32)
#print("in_shape" + str(y.shape))
x[0] = y[0]
for n in range(1, y.shape[0], 1):
x[n] = coeff * x[n - 1] + y[n]
return x
def data_preprocess(wav, preemph=0.95):
wav = (2./65535.) * (wav.astype('float32') - 32767) + 1.
if preemph > 0:
wav = pre_emph(wav, coeff=preemph)
return wav.astype('float32')
| data_ops.py | 1,919 | Apply de_emphasis on test data: works only on 1d data
Apply pre_emph on 2d data (batch_size x window_length)
Reconstructs the audiofile from sliced matrix wavmat
Data processing routines
Deepak Baby, UGent, June 2018
deepak.baby@ugent.be
print ("wav recon shape " + str(wav_recon.shape)) now compute the scaling factor for multiple instancesprint ("x shape: " + str(x.shape))print ("x_preemph shape: " + str(x_preemph.shape))print("in_shape" + str(y.shape)) | 460 | en | 0.724207 |
from django.views.generic import TemplateView, CreateView, UpdateView
from django.urls import reverse_lazy
from home_app import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from account_app.models import CustomUser
# Create your views here.
class IndexView(TemplateView):
template_name = 'home_app/index.html'
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'home_app/profile.html'
class RegistrationView(CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy('home-app:index')
template_name = 'registration/registration.html'
class UserUpdateView(UpdateView):
form_class = forms.UserUpdateForm
success_url = reverse_lazy('home-app:profile')
template_name = 'registration/registration_form.html'
model = CustomUser
class Page403View(TemplateView):
template_name = 'home_app/403.html'
| home_app/views.py | 894 | Create your views here. | 23 | en | 0.928092 |
# Generated by Django 3.2 on 2021-09-07 12:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payment', '0002_alter_invoice_address'),
('item', '0002_alter_item_upc'),
('accounts', '0002_auto_20210831_0046'),
('service', '0008_service_available'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('is_paid', models.BooleanField(default=False, verbose_name='is paid')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='carts', to='accounts.customer', verbose_name='customer')),
('invoice', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='cart', to='payment.invoice', verbose_name='invoice')),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='carts', to='service.service', verbose_name='service')),
],
options={
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
'db_table': 'cart',
},
),
migrations.CreateModel(
name='CartLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='cart.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='item.item', verbose_name='item')),
],
options={
'verbose_name': 'Cart line',
'verbose_name_plural': 'Cart lines',
'db_table': 'cart_line',
'ordering': ('created_time', 'modified_time'),
'unique_together': {('item', 'cart')},
},
),
] | cart/migrations/0001_initial.py | 2,766 | Generated by Django 3.2 on 2021-09-07 12:46 | 43 | en | 0.781417 |
# -*- coding: utf-8 -*-
from unittest import mock
from vispy.scene.visuals import Image
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved, downsample
import numpy as np
import pytest
@requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
"""Test image visual"""
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (size[1]-10, size[0]-10) + ((3,) if is_3d else ())
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), "visuals/image%s.png" %
("_rgb" if is_3d else "_mono"))
def _make_test_data(shape, input_dtype):
data = np.random.random_sample(shape)
if data.ndim == 3 and data.shape[-1] == 4:
# RGBA - make alpha fully opaque
data[..., -1] = 1.0
max_val = _max_for_dtype(input_dtype)
if max_val != 1:
data *= max_val
data = data.astype(input_dtype)
return data
def _compare_render(orig_data, rendered_data, previous_render=None, atol=1):
predicted = _make_rgba(orig_data)
np.testing.assert_allclose(rendered_data.astype(float), predicted.astype(float), atol=atol)
if previous_render is not None:
# assert not allclose
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_data, previous_render, atol=10)
def _set_image_data(image, data, should_fail):
if should_fail:
pytest.raises(ValueError, image.set_data, data)
return
image.set_data(data)
def _max_for_dtype(input_dtype):
if np.issubdtype(input_dtype, np.integer):
max_val = np.iinfo(input_dtype).max
else:
max_val = 1.0
return max_val
def _get_orig_and_new_clims(input_dtype):
new_clim = (0.3, 0.8)
max_val = _max_for_dtype(input_dtype)
if np.issubdtype(input_dtype, np.integer):
new_clim = (int(new_clim[0] * max_val), int(new_clim[1] * max_val))
return (0, max_val), new_clim
@requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels,
clim_on_init, data_on_init):
"""Test image visual with clims and gamma on shader."""
size = (40, 40)
if texture_format == '__dtype__':
texture_format = input_dtype
shape = size + (num_channels,) if num_channels > 0 else size
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
orig_clim, new_clim = _get_orig_and_new_clims(input_dtype)
# 16-bit integers and above seem to have precision loss when scaled on the CPU
is_16int_cpu_scaled = (np.dtype(input_dtype).itemsize >= 2 and
np.issubdtype(input_dtype, np.integer) and
texture_format is None)
clim_atol = 2 if is_16int_cpu_scaled else 1
gamma_atol = 3 if is_16int_cpu_scaled else 2
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
# default is RGBA, anything except auto requires reformat
set_data_fails = (num_channels != 4 and
texture_format is not None and
texture_format != 'auto')
with TestingCanvas(size=size[::-1], bgcolor="w") as c:
image = Image(cmap='grays', texture_format=texture_format,
parent=c.scene, **kwargs)
if not data_on_init:
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = rendered.shape[0] // data.shape[0]
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
# adjust color limits
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = (np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0])
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
# adjust gamma
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(scaled_data ** 2, rendered3, rendered2, atol=gamma_atol)
@requires_application()
def test_image_vertex_updates():
"""Test image visual coordinates are only built when needed."""
size = (40, 40)
with TestingCanvas(size=size, bgcolor="w") as c:
shape = size + (3,)
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(
image, '_build_vertex_data',
wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock() # reset the count to 0
# rendering again shouldn't cause vertex coordinates to be built
c.render()
build_vertex_mock.assert_not_called()
# changing to data of the same shape shouldn't cause it
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
# changing to another shape should
data = data[:-5, :-5]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
def _make_rgba(data_in):
max_val = _max_for_dtype(data_in.dtype)
if data_in.ndim == 3 and data_in.shape[-1] == 1:
data_in = data_in.squeeze()
if data_in.ndim == 2:
out = np.stack([data_in] * 4, axis=2)
out[:, :, 3] = max_val
elif data_in.shape[-1] == 3:
out = np.concatenate((data_in, np.ones((*data_in.shape[:2], 1)) * max_val), axis=2)
else:
out = data_in
return np.round((out.astype(np.float) * 255 / max_val)).astype(np.uint8)
run_tests_if_main()
| vispy/visuals/tests/test_image.py | 6,614 | Test image visual
Test image visual with clims and gamma on shader.
Test image visual coordinates are only built when needed.
-*- coding: utf-8 -*- RGBA - make alpha fully opaque assert not allclose 16-bit integers and above seem to have precision loss when scaled on the CPU default is RGBA, anything except auto requires reformat adjust color limits adjust gamma reset the count to 0 rendering again shouldn't cause vertex coordinates to be built changing to data of the same shape shouldn't cause it changing to another shape should | 537 | en | 0.82938 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.model.base_position import BasePosition
from rqalpha.environment import Environment
from rqalpha.const import SIDE, POSITION_EFFECT, DEFAULT_ACCOUNT_TYPE
class FuturePosition(BasePosition):
__abandon_properties__ = []
def __init__(self, order_book_id):
super(FuturePosition, self).__init__(order_book_id)
self._buy_old_holding_list = []
self._sell_old_holding_list = []
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
self._buy_avg_open_price = 0.
self._sell_avg_open_price = 0.
def __repr__(self):
return 'FuturePosition({})'.format(self.__dict__)
def get_state(self):
return {
'order_book_id': self._order_book_id,
'buy_old_holding_list': self._buy_old_holding_list,
'sell_old_holding_list': self._sell_old_holding_list,
'buy_today_holding_list': self._buy_today_holding_list,
'sell_today_holding_list': self._sell_today_holding_list,
'buy_transaction_cost': self._buy_transaction_cost,
'sell_transaction_cost': self._sell_transaction_cost,
'buy_realized_pnl': self._buy_realized_pnl,
'sell_realized_pnl': self._sell_realized_pnl,
'buy_avg_open_price': self._buy_avg_open_price,
'sell_avg_open_price': self._sell_avg_open_price,
# margin rate may change
'margin_rate': self.margin_rate,
}
def set_state(self, state):
assert self._order_book_id == state['order_book_id']
self._buy_old_holding_list = state['buy_old_holding_list']
self._sell_old_holding_list = state['sell_old_holding_list']
self._buy_today_holding_list = state['buy_today_holding_list']
self._sell_today_holding_list = state['sell_today_holding_list']
self._buy_transaction_cost = state['buy_transaction_cost']
self._sell_transaction_cost = state['sell_transaction_cost']
self._buy_avg_open_price = state['buy_avg_open_price']
self._sell_avg_open_price = state['sell_avg_open_price']
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@property
def margin_rate(self):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(self.order_book_id)
margin_multiplier = env.config.base.margin_multiplier
return margin_info['long_margin_ratio'] * margin_multiplier
@property
def market_value(self):
return (self.buy_quantity - self.sell_quantity) * self.last_price * self.contract_multiplier
@property
def buy_market_value(self):
return self.buy_quantity * self.last_price * self.contract_multiplier
@property
def sell_market_value(self):
return self.sell_quantity * self.last_price * self.contract_multiplier
# -- PNL 相关
@property
def contract_multiplier(self):
return Environment.get_instance().get_instrument(self.order_book_id).contract_multiplier
@property
def open_orders(self):
return Environment.get_instance().broker.get_open_orders(self.order_book_id)
@property
def buy_holding_pnl(self):
"""
[float] 买方向当日持仓盈亏
"""
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_holding_pnl(self):
"""
[float] 卖方向当日持仓盈亏
"""
return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def buy_realized_pnl(self):
"""
[float] 买方向平仓盈亏
"""
return self._buy_realized_pnl
@property
def sell_realized_pnl(self):
"""
[float] 卖方向平仓盈亏
"""
return self._sell_realized_pnl
@property
def holding_pnl(self):
"""
[float] 当日持仓盈亏
"""
return self.buy_holding_pnl + self.sell_holding_pnl
@property
def realized_pnl(self):
"""
[float] 当日平仓盈亏
"""
return self.buy_realized_pnl + self.sell_realized_pnl
@property
def buy_daily_pnl(self):
"""
[float] 当日买方向盈亏
"""
return self.buy_holding_pnl + self.buy_realized_pnl
@property
def sell_daily_pnl(self):
"""
[float] 当日卖方向盈亏
"""
return self.sell_holding_pnl + self.sell_realized_pnl
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.holding_pnl + self.realized_pnl
@property
def buy_pnl(self):
"""
[float] 买方向累计盈亏
"""
return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_pnl(self):
"""
[float] 卖方向累计盈亏
"""
return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def pnl(self):
"""
[float] 累计盈亏
"""
return self.buy_pnl + self.sell_pnl
# -- Quantity 相关
@property
def buy_open_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
@property
def sell_open_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
@property
def buy_close_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def sell_close_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def _buy_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _sell_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _closable_today_sell_quantity(self):
return self.sell_today_quantity - self._buy_close_today_order_quantity
@property
def _closable_today_buy_quantity(self):
return self.buy_today_quantity - self._sell_close_today_order_quantity
@property
def buy_old_quantity(self):
"""
[int] 买方向昨仓
"""
return sum(amount for price, amount in self._buy_old_holding_list)
@property
def sell_old_quantity(self):
"""
[int] 卖方向昨仓
"""
return sum(amount for price, amount in self._sell_old_holding_list)
@property
def buy_today_quantity(self):
"""
[int] 买方向今仓
"""
return sum(amount for price, amount in self._buy_today_holding_list)
@property
def sell_today_quantity(self):
"""
[int] 卖方向今仓
"""
return sum(amount for price, amount in self._sell_today_holding_list)
@property
def buy_quantity(self):
"""
[int] 买方向持仓
"""
return self.buy_old_quantity + self.buy_today_quantity
@property
def sell_quantity(self):
"""
[int] 卖方向持仓
"""
return self.sell_old_quantity + self.sell_today_quantity
@property
def closable_buy_quantity(self):
"""
[float] 可平买方向持仓
"""
return self.buy_quantity - self.sell_close_order_quantity
@property
def closable_sell_quantity(self):
"""
[float] 可平卖方向持仓
"""
return self.sell_quantity - self.buy_close_order_quantity
# -- Margin 相关
@property
def buy_margin(self):
"""
[float] 买方向持仓保证金
"""
return self._buy_holding_cost * self.margin_rate
@property
def sell_margin(self):
"""
[float] 卖方向持仓保证金
"""
return self._sell_holding_cost * self.margin_rate
@property
def margin(self):
"""
[float] 保证金
"""
# TODO: 需要添加单向大边相关的处理逻辑
return self.buy_margin + self.sell_margin
@property
def buy_avg_holding_price(self):
"""
[float] 买方向持仓均价
"""
return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
@property
def sell_avg_holding_price(self):
"""
[float] 卖方向持仓均价
"""
return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
@property
def _buy_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.buy_holding_list)
@property
def _sell_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.sell_holding_list)
@property
def buy_holding_list(self):
return self._buy_old_holding_list + self._buy_today_holding_list
@property
def sell_holding_list(self):
return self._sell_old_holding_list + self._sell_today_holding_list
@property
def buy_avg_open_price(self):
return self._buy_avg_open_price
@property
def sell_avg_open_price(self):
return self._sell_avg_open_price
@property
def buy_transaction_cost(self):
return self._buy_transaction_cost
@property
def sell_transaction_cost(self):
return self._sell_transaction_cost
@property
def transaction_cost(self):
return self._buy_transaction_cost + self._sell_transaction_cost
# -- Function
def cal_close_today_amount(self, trade_amount, trade_side):
if trade_side == SIDE.SELL:
close_today_amount = trade_amount - self.buy_old_quantity
else:
close_today_amount = trade_amount - self.sell_old_quantity
return max(close_today_amount, 0)
def apply_settlement(self):
env = Environment.get_instance()
data_proxy = env.data_proxy
trading_date = env.trading_dt.date()
settle_price = data_proxy.get_settle_price(self.order_book_id, trading_date)
self._buy_old_holding_list = [(settle_price, self.buy_quantity)]
self._sell_old_holding_list = [(settle_price, self.sell_quantity)]
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
def _margin_of(self, quantity, price):
env = Environment.get_instance()
instrument = env.data_proxy.instruments(self.order_book_id)
return quantity * instrument.contract_multiplier * price * self.margin_rate
def apply_trade(self, trade):
trade_quantity = trade.last_quantity
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity +
trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity)
self._buy_transaction_cost += trade.transaction_cost
self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._sell_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._sell_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
else:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity +
trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity)
self._sell_transaction_cost += trade.transaction_cost
self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._buy_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._buy_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
def _close_holding(self, trade):
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta
def _cal_realized_pnl(self, cost_price, trade_price, side, consumed_quantity):
if side == SIDE.BUY:
return (cost_price - trade_price) * consumed_quantity * self.contract_multiplier
else:
return (trade_price - cost_price) * consumed_quantity * self.contract_multiplier
| rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | 17,514 | [float] 买方向持仓均价
[int] 买方向挂单量
[float] 当日买方向盈亏
[float] 买方向当日持仓盈亏
[float] 买方向持仓保证金
[int] 买方向昨仓
[int] 买方向挂单量
[float] 买方向累计盈亏
[int] 买方向持仓
[float] 买方向平仓盈亏
[int] 买方向今仓
[float] 可平买方向持仓
[float] 可平卖方向持仓
[float] 当日盈亏
[float] 当日持仓盈亏
[float] 保证金
[float] 累计盈亏
[float] 当日平仓盈亏
[float] 卖方向持仓均价
[int] 卖方向挂单量
[float] 当日卖方向盈亏
[float] 卖方向当日持仓盈亏
[float] 卖方向持仓保证金
[int] 卖方向昨仓
[int] 卖方向挂单量
[float] 卖方向累计盈亏
[int] 卖方向持仓
[float] 卖方向平仓盈亏
[int] 卖方向今仓
-*- coding: utf-8 -*- Copyright 2017 Ricequant, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. margin rate may change -- PNL 相关 -- Quantity 相关 -- Margin 相关 TODO: 需要添加单向大边相关的处理逻辑 -- Function 先平昨仓 再平今仓 先平昨仓 再平今仓 | 1,112 | en | 0.414527 |
import asyncio
import functools
import logging
from types import FunctionType, ModuleType
from typing import Type
from prometheus_client import Histogram, Counter
logger = logging.getLogger(__name__)
H = Histogram(f"management_layer_call_duration_seconds", "API call duration (s)",
["call"])
def _prometheus_module_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
module_ = f.__module__.split(".")[-1]
call_key = "{}_{}".format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def _prometheus_class_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def add_prometheus_metrics_for_module(module_: ModuleType):
"""
Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied
"""
decorate_all_in_module(module_, _prometheus_module_metric_decorator, [])
def add_prometheus_metrics_for_class(klass: Type):
"""
Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied
"""
decorate_all_in_class(klass, _prometheus_class_metric_decorator, [])
def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(module_):
if name not in whitelist:
obj = getattr(module_, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
# We only check functions that are defined in the module we
# specified. Some of the functions in the module may have been
# imported from other modules. These are ignored.
if obj.__module__ == module_.__name__:
logger.debug(f"Adding metrics to {module_}:{name}")
setattr(module_, name, decorator(obj))
else:
logger.debug(f"No metrics on {module_}:{name} because it belongs to another "
f"module")
else:
logger.debug(f"No metrics on {module_}:{name} because it is not a coroutine or "
f"function")
def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(klass):
if name not in whitelist:
obj = getattr(klass, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
logger.debug(f"Adding metrics to {klass}:{name}")
setattr(klass, name, decorator(obj))
else:
logger.debug(f"No metrics on {klass}:{name} because it is not a coroutine or "
f"function")
| management_layer/metrics.py | 4,436 | A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied
Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied
Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
We only check functions that are defined in the module we specified. Some of the functions in the module may have been imported from other modules. These are ignored. | 1,505 | en | 0.781973 |
from typing import Callable
import numpy as np
from manimlib.utils.bezier import bezier
def linear(t: float) -> float:
return t
def smooth(t: float) -> float:
# Zero first and second derivatives at t=0 and t=1.
# Equivalent to bezier([0, 0, 0, 1, 1, 1])
s = 1 - t
return (t**3) * (10 * s * s + 5 * s * t + t * t)
def rush_into(t: float) -> float:
return 2 * smooth(0.5 * t)
def rush_from(t: float) -> float:
return 2 * smooth(0.5 * (t + 1)) - 1
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
def there_and_back(t: float) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t)
def there_and_back_with_pause(t: float, pause_ratio: float = 1. / 3) -> float:
a = 1. / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
def running_start(t: float, pull_factor: float = -0.5) -> float:
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: Callable[[float], float] = smooth,
proportion: float = 0.7
) -> Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: Callable[[float], float],
a: float = 0.4,
b: float = 0.6
) -> Callable[[float], float]:
def result(t):
if a == b:
return a
elif t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
# Stylistically, should this take parameters (with default values)?
# Ultimately, the functionality is entirely subsumed by squish_rate_func,
# but it may be useful to have a nice name for with nice default params for
# "lingering", different from squish_rate_func's default params
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
| manimlib/utils/rate_functions.py | 2,453 | Zero first and second derivatives at t=0 and t=1. Equivalent to bezier([0, 0, 0, 1, 1, 1]) Stylistically, should this take parameters (with default values)? Ultimately, the functionality is entirely subsumed by squish_rate_func, but it may be useful to have a nice name for with nice default params for "lingering", different from squish_rate_func's default params The half-life should be rather small to minimize the cut-off error at the end | 442 | en | 0.823491 |
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
symbols = frozenset([pc, x])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = 11
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x <= 10) -> pc' = 1
cond = mgr.LE(x, ints[10])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x <= 10) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 & (x > 6) -> pc' = 2
cond = mgr.GT(x, ints[6])
cfg.append(mgr.Implies(mgr.And(pcs[1], cond), x_pcs[0]))
# pc = 1 & !(x > 6) -> pc' = 0
cfg.append(mgr.Implies(mgr.And(pcs[1], mgr.Not(cond)), x_pcs[0]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same = mgr.Equals(x_x, x)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> same
trans.append(mgr.Implies(pcs[1], same))
# pc = 2 -> x' = x + 2
trans.append(mgr.Implies(pcs[2], mgr.Equals(x_x, mgr.Plus(x, ints[2]))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
symbs = frozenset([pc, x])
i_5 = mgr.Int(5)
x_x = symb_to_next(mgr, x)
loc = Location(env, mgr.Equals(x, i_5))
loc.set_progress(0, mgr.Equals(x_x, x))
h_x = Hint("h_x", env, frozenset([x]), symbs)
h_x.set_locs([loc])
return frozenset([h_x])
| benchmarks/software_nontermination/f3_hints/C_Integer/Stroeder_15/Urban-WST2013-Fig1_false-termination.py | 2,581 | pc = 0 & (x <= 10) -> pc' = 1 pc = 0 & !(x <= 10) -> pc' = -1 pc = 1 & (x > 6) -> pc' = 2 pc = 1 & !(x > 6) -> pc' = 0 pc = 2 -> pc' = 0 pc = -1 -> pc' = -1 pc = 0 -> same pc = 1 -> same pc = 2 -> x' = x + 2 pc = end -> same | 224 | en | 0.808315 |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
import re
import io
from os import PathLike
from typing import (
Dict,
TYPE_CHECKING,
Sequence,
Union,
List,
Optional,
Any,
Callable,
Tuple,
ClassVar,
Type,
overload,
)
from . import utils
from .reaction import Reaction
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .enums import InteractionType, MessageType, ChannelType, try_enum
from .errors import HTTPException
from .components import _component_factory
from .embeds import Embed
from .member import Member
from .flags import MessageFlags
from .file import File
from .utils import escape_mentions, MISSING
from .http import handle_message_parameters
from .guild import Guild
from .mixins import Hashable
from .sticker import StickerItem
from .threads import Thread
from .user import User
from .channel import PartialMessageable
if TYPE_CHECKING:
from typing_extensions import Self
from .types.message import (
Message as MessagePayload,
Attachment as AttachmentPayload,
MessageReference as MessageReferencePayload,
MessageApplication as MessageApplicationPayload,
MessageActivity as MessageActivityPayload,
)
from .types.interactions import MessageInteraction as MessageInteractionPayload
from .types.components import Component as ComponentPayload
from .types.threads import ThreadArchiveDuration
from .types.member import (
Member as MemberPayload,
UserWithMember as UserWithMemberPayload,
)
from .types.user import User as UserPayload
from .types.embed import Embed as EmbedPayload
from .types.gateway import MessageReactionRemoveEvent, MessageUpdateEvent
from .abc import Snowflake
from .abc import GuildChannel, MessageableChannel
from .components import Component
from .state import ConnectionState
from .channel import TextChannel
from .mentions import AllowedMentions
from .user import User
from .role import Role
from .ui.view import View
EmojiInputType = Union[Emoji, PartialEmoji, str]
__all__ = (
'Attachment',
'Message',
'PartialMessage',
'MessageInteraction',
'MessageReference',
'DeletedReferencedMessage',
)
def convert_emoji_reaction(emoji: Union[EmojiInputType, Reaction]) -> str:
if isinstance(emoji, Reaction):
emoji = emoji.emoji
if isinstance(emoji, Emoji):
return f'{emoji.name}:{emoji.id}'
if isinstance(emoji, PartialEmoji):
return emoji._as_reaction()
if isinstance(emoji, str):
# Reactions can be in :name:id format, but not <:name:id>.
# No existing emojis have <> in them, so this should be okay.
return emoji.strip('<>')
raise TypeError(f'emoji argument must be str, Emoji, or Reaction not {emoji.__class__.__name__}.')
class Attachment(Hashable):
"""Represents an attachment from Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the attachment.
.. describe:: x == y
Checks if the attachment is equal to another attachment.
.. describe:: x != y
Checks if the attachment is not equal to another attachment.
.. describe:: hash(x)
Returns the hash of the attachment.
.. versionchanged:: 1.7
Attachment can now be casted to :class:`str` and is hashable.
Attributes
------------
id: :class:`int`
The attachment ID.
size: :class:`int`
The attachment size in bytes.
height: Optional[:class:`int`]
The attachment's height, in pixels. Only applicable to images and videos.
width: Optional[:class:`int`]
The attachment's width, in pixels. Only applicable to images and videos.
filename: :class:`str`
The attachment's filename.
url: :class:`str`
The attachment URL. If the message this attachment was attached
to is deleted, then this will 404.
proxy_url: :class:`str`
The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the
case of images. When the message is deleted, this URL might be valid for a few
minutes or not valid at all.
content_type: Optional[:class:`str`]
The attachment's `media type <https://en.wikipedia.org/wiki/Media_type>`_
.. versionadded:: 1.7
description: Optional[:class:`str`]
The attachment's description. Only applicable to images.
.. versionadded:: 2.0
ephemeral: :class:`bool`
Whether the attachment is ephemeral.
.. versionadded:: 2.0
"""
__slots__ = (
'id',
'size',
'height',
'width',
'filename',
'url',
'proxy_url',
'_http',
'content_type',
'description',
'ephemeral',
)
def __init__(self, *, data: AttachmentPayload, state: ConnectionState):
self.id: int = int(data['id'])
self.size: int = data['size']
self.height: Optional[int] = data.get('height')
self.width: Optional[int] = data.get('width')
self.filename: str = data['filename']
self.url: str = data['url']
self.proxy_url: str = data['proxy_url']
self._http = state.http
self.content_type: Optional[str] = data.get('content_type')
self.description: Optional[str] = data.get('description')
self.ephemeral: bool = data.get('ephemeral', False)
def is_spoiler(self) -> bool:
""":class:`bool`: Whether this attachment contains a spoiler."""
return self.filename.startswith('SPOILER_')
def __repr__(self) -> str:
return f'<Attachment id={self.id} filename={self.filename!r} url={self.url!r}>'
def __str__(self) -> str:
return self.url or ''
async def save(
self,
fp: Union[io.BufferedIOBase, PathLike[Any]],
*,
seek_begin: bool = True,
use_cached: bool = False,
) -> int:
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read(use_cached=use_cached)
if isinstance(fp, io.BufferedIOBase):
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
async def read(self, *, use_cached: bool = False) -> bytes:
"""|coro|
Retrieves the content of this attachment as a :class:`bytes` object.
.. versionadded:: 1.1
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`bytes`
The contents of the attachment.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
return data
async def to_file(self, *, use_cached: bool = False, spoiler: bool = False) -> File:
"""|coro|
Converts the attachment into a :class:`File` suitable for sending via
:meth:`abc.Messageable.send`.
.. versionadded:: 1.3
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
.. versionadded:: 1.4
spoiler: :class:`bool`
Whether the file is a spoiler.
.. versionadded:: 1.4
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`File`
The attachment as a file suitable for sending.
"""
data = await self.read(use_cached=use_cached)
return File(io.BytesIO(data), filename=self.filename, description=self.description, spoiler=spoiler)
def to_dict(self) -> AttachmentPayload:
result: AttachmentPayload = {
'filename': self.filename,
'id': self.id,
'proxy_url': self.proxy_url,
'size': self.size,
'url': self.url,
'spoiler': self.is_spoiler(),
}
if self.height:
result['height'] = self.height
if self.width:
result['width'] = self.width
if self.content_type:
result['content_type'] = self.content_type
if self.description is not None:
result['description'] = self.description
return result
class DeletedReferencedMessage:
"""A special sentinel type given when the resolved message reference
points to a deleted message.
The purpose of this class is to separate referenced messages that could not be
fetched and those that were previously fetched but have since been deleted.
.. versionadded:: 1.6
"""
__slots__ = ('_parent',)
def __init__(self, parent: MessageReference):
self._parent: MessageReference = parent
def __repr__(self) -> str:
return f"<DeletedReferencedMessage id={self.id} channel_id={self.channel_id} guild_id={self.guild_id!r}>"
@property
def id(self) -> int:
""":class:`int`: The message ID of the deleted referenced message."""
# the parent's message id won't be None here
return self._parent.message_id # type: ignore
@property
def channel_id(self) -> int:
""":class:`int`: The channel ID of the deleted referenced message."""
return self._parent.channel_id
@property
def guild_id(self) -> Optional[int]:
"""Optional[:class:`int`]: The guild ID of the deleted referenced message."""
return self._parent.guild_id
class MessageReference:
"""Represents a reference to a :class:`~discord.Message`.
.. versionadded:: 1.5
.. versionchanged:: 1.6
This class can now be constructed by users.
Attributes
-----------
message_id: Optional[:class:`int`]
The id of the message referenced.
channel_id: :class:`int`
The channel id of the message referenced.
guild_id: Optional[:class:`int`]
The guild id of the message referenced.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
resolved: Optional[Union[:class:`Message`, :class:`DeletedReferencedMessage`]]
The message that this reference resolved to. If this is ``None``
then the original message was not fetched either due to the Discord API
not attempting to resolve it or it not being available at the time of creation.
If the message was resolved at a prior point but has since been deleted then
this will be of type :class:`DeletedReferencedMessage`.
Currently, this is mainly the replied to message when a user replies to a message.
.. versionadded:: 1.6
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'fail_if_not_exists', 'resolved', '_state')
def __init__(self, *, message_id: int, channel_id: int, guild_id: Optional[int] = None, fail_if_not_exists: bool = True):
self._state: Optional[ConnectionState] = None
self.resolved: Optional[Union[Message, DeletedReferencedMessage]] = None
self.message_id: Optional[int] = message_id
self.channel_id: int = channel_id
self.guild_id: Optional[int] = guild_id
self.fail_if_not_exists: bool = fail_if_not_exists
@classmethod
def with_state(cls, state: ConnectionState, data: MessageReferencePayload) -> Self:
self = cls.__new__(cls)
self.message_id = utils._get_as_snowflake(data, 'message_id')
self.channel_id = int(data.pop('channel_id'))
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.fail_if_not_exists = data.get('fail_if_not_exists', True)
self._state = state
self.resolved = None
return self
@classmethod
def from_message(cls, message: PartialMessage, *, fail_if_not_exists: bool = True) -> Self:
"""Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.
.. versionadded:: 1.6
Parameters
----------
message: :class:`~discord.Message`
The message to be converted into a reference.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
-------
:class:`MessageReference`
A reference to the message.
"""
self = cls(
message_id=message.id,
channel_id=message.channel.id,
guild_id=getattr(message.guild, 'id', None),
fail_if_not_exists=fail_if_not_exists,
)
self._state = message._state
return self
@property
def cached_message(self) -> Optional[Message]:
"""Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache."""
return self._state and self._state._get_message(self.message_id)
@property
def jump_url(self) -> str:
""":class:`str`: Returns a URL that allows the client to jump to the referenced message.
.. versionadded:: 1.7
"""
guild_id = self.guild_id if self.guild_id is not None else '@me'
return f'https://discord.com/channels/{guild_id}/{self.channel_id}/{self.message_id}'
def __repr__(self) -> str:
return f'<MessageReference message_id={self.message_id!r} channel_id={self.channel_id!r} guild_id={self.guild_id!r}>'
def to_dict(self) -> MessageReferencePayload:
result: Dict[str, Any] = {'message_id': self.message_id} if self.message_id is not None else {}
result['channel_id'] = self.channel_id
if self.guild_id is not None:
result['guild_id'] = self.guild_id
if self.fail_if_not_exists is not None:
result['fail_if_not_exists'] = self.fail_if_not_exists
return result # type: ignore # Type checker doesn't understand these are the same.
to_message_reference_dict = to_dict
class MessageInteraction(Hashable):
"""Represents the interaction that a :class:`Message` is a response to.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two message interactions are equal.
.. describe:: x != y
Checks if two message interactions are not equal.
.. describe:: hash(x)
Returns the message interaction's hash.
Attributes
-----------
id: :class:`int`
The interaction ID.
type: :class:`InteractionType`
The interaction type.
name: :class:`str`
The name of the interaction.
user: Union[:class:`User`, :class:`Member`]
The user or member that invoked the interaction.
"""
__slots__: Tuple[str, ...] = ('id', 'type', 'name', 'user')
def __init__(self, *, state: ConnectionState, guild: Optional[Guild], data: MessageInteractionPayload) -> None:
self.id: int = int(data['id'])
self.type: InteractionType = try_enum(InteractionType, data['type'])
self.name: str = data['name']
self.user: Union[User, Member] = MISSING
try:
payload = data['member']
except KeyError:
self.user = state.create_user(data['user'])
else:
if guild is None:
# This is an unfortunate data loss, but it's better than giving bad data
# This is also an incredibly rare scenario.
self.user = state.create_user(data['user'])
else:
payload['user'] = data['user']
self.user = Member(data=payload, guild=guild, state=state) # type: ignore
def __repr__(self) -> str:
return f'<MessageInteraction id={self.id} name={self.name!r} type={self.type!r} user={self.user!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The interaction's creation time in UTC."""
return utils.snowflake_time(self.id)
def flatten_handlers(cls: Type[Message]) -> Type[Message]:
prefix = len('_handle_')
handlers = [
(key[prefix:], value)
for key, value in cls.__dict__.items()
if key.startswith('_handle_') and key != '_handle_member'
]
# store _handle_member last
handlers.append(('member', cls._handle_member))
cls._HANDLERS = handlers
cls._CACHED_SLOTS = [attr for attr in cls.__slots__ if attr.startswith('_cs_')]
return cls
class PartialMessage(Hashable):
"""Represents a partial message to aid with working messages when only
a message and channel ID are present.
There are two ways to construct this class. The first one is through
the constructor itself, and the second is via the following:
- :meth:`TextChannel.get_partial_message`
- :meth:`VoiceChannel.get_partial_message`
- :meth:`Thread.get_partial_message`
- :meth:`DMChannel.get_partial_message`
Note that this class is trimmed down and has no rich attributes.
.. versionadded:: 1.6
.. container:: operations
.. describe:: x == y
Checks if two partial messages are equal.
.. describe:: x != y
Checks if two partial messages are not equal.
.. describe:: hash(x)
Returns the partial message's hash.
Attributes
-----------
channel: Union[:class:`PartialMessageable`, :class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`]
The channel associated with this partial message.
id: :class:`int`
The message ID.
guild: Optional[:class:`Guild`]
The guild that the partial message belongs to, if applicable.
"""
__slots__ = ('channel', 'id', '_cs_guild', '_state', 'guild')
def __init__(self, *, channel: MessageableChannel, id: int) -> None:
if not isinstance(channel, PartialMessageable) and channel.type not in (
ChannelType.text,
ChannelType.voice,
ChannelType.news,
ChannelType.private,
ChannelType.news_thread,
ChannelType.public_thread,
ChannelType.private_thread,
):
raise TypeError(
f'expected PartialMessageable, TextChannel, VoiceChannel, DMChannel or Thread not {type(channel)!r}'
)
self.channel: MessageableChannel = channel
self._state: ConnectionState = channel._state
self.id: int = id
self.guild: Optional[Guild] = getattr(channel, 'guild', None)
def _update(self, data: MessageUpdateEvent) -> None:
# This is used for duck typing purposes.
# Just do nothing with the data.
pass
# Also needed for duck typing purposes
# n.b. not exposed
pinned: Any = property(None, lambda x, y: None)
def __repr__(self) -> str:
return f'<PartialMessage id={self.id} channel={self.channel!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The partial message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def jump_url(self) -> str:
""":class:`str`: Returns a URL that allows the client to jump to this message."""
guild_id = getattr(self.guild, 'id', '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel.id}/{self.id}'
async def fetch(self) -> Message:
"""|coro|
Fetches the partial message to a full :class:`Message`.
Raises
--------
NotFound
The message was not found.
Forbidden
You do not have the permissions required to get a message.
HTTPException
Retrieving the message failed.
Returns
--------
:class:`Message`
The full message.
"""
data = await self._state.http.get_message(self.channel.id, self.id)
return self._state.create_message(channel=self.channel, data=data)
async def delete(self, *, delay: Optional[float] = None) -> None:
"""|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message. If the deletion fails then it is silently ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def delete(delay: float):
await asyncio.sleep(delay)
try:
await self._state.http.delete_message(self.channel.id, self.id)
except HTTPException:
pass
asyncio.create_task(delete(delay))
else:
await self._state.http.delete_message(self.channel.id, self.id)
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embed: Optional[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embeds: Sequence[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
async def edit(
self,
content: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: Sequence[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
delete_after: Optional[float] = None,
allowed_mentions: Optional[AllowedMentions] = MISSING,
view: Optional[View] = MISSING,
) -> Message:
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited message is returned instead.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
embeds: List[:class:`Embed`]
The new embeds to replace the original with. Must be a maximum of 10.
To remove all embeds ``[]`` should be passed.
.. versionadded:: 2.0
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
TypeError
You specified both ``embed`` and ``embeds``
Returns
--------
:class:`Message`
The newly edited message.
"""
if content is not MISSING:
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if view is not MISSING:
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(
content=content,
embed=embed,
embeds=embeds,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_allowed_mentions,
)
data = await self._state.http.edit_message(self.channel.id, self.id, params=params)
message = Message(state=self._state, channel=self.channel, data=data)
if view and not view.is_finished():
self._state.store_view(view, self.id)
if delete_after is not None:
await self.delete(delay=delete_after)
return message
async def publish(self) -> None:
"""|coro|
Publishes this message to your announcement channel.
You must have the :attr:`~Permissions.send_messages` permission to do this.
If the message is not your own then the :attr:`~Permissions.manage_messages`
permission is also needed.
Raises
-------
Forbidden
You do not have the proper permissions to publish this message.
HTTPException
Publishing the message failed.
"""
await self._state.http.publish_message(self.channel.id, self.id)
async def pin(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Pins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for pinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to pin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Pinning the message failed, probably due to the channel
having more than 50 pinned messages.
"""
await self._state.http.pin_message(self.channel.id, self.id, reason=reason)
# pinned exists on PartialMessage for duck typing purposes
self.pinned = True
async def unpin(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Unpins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for unpinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to unpin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Unpinning the message failed.
"""
await self._state.http.unpin_message(self.channel.id, self.id, reason=reason)
# pinned exists on PartialMessage for duck typing purposes
self.pinned = False
async def add_reaction(self, emoji: EmojiInputType, /) -> None:
"""|coro|
Adds a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
.. versionchanged:: 2.0
``emoji`` parameter is now positional-only.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(self, emoji: Union[EmojiInputType, Reaction], member: Snowflake) -> None:
"""|coro|
Remove a reaction by the member from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
If the reaction is not your own (i.e. ``member`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``member`` parameter must represent a member and meet
the :class:`abc.Snowflake` abc.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to remove.
member: :class:`abc.Snowflake`
The member for which to remove the reaction.
Raises
--------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The member or emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
if member.id == self._state.self_id:
await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)
else:
await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id)
async def clear_reaction(self, emoji: Union[EmojiInputType, Reaction]) -> None:
"""|coro|
Clears a specific reaction from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You need the :attr:`~Permissions.manage_messages` permission to use this.
.. versionadded:: 1.3
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to clear.
Raises
--------
HTTPException
Clearing the reaction failed.
Forbidden
You do not have the proper permissions to clear the reaction.
NotFound
The emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji)
async def clear_reactions(self) -> None:
"""|coro|
Removes all the reactions from the message.
You need the :attr:`~Permissions.manage_messages` permission to use this.
Raises
--------
HTTPException
Removing the reactions failed.
Forbidden
You do not have the proper permissions to remove all the reactions.
"""
await self._state.http.clear_reactions(self.channel.id, self.id)
async def create_thread(
self,
*,
name: str,
auto_archive_duration: ThreadArchiveDuration = MISSING,
slowmode_delay: Optional[int] = None,
reason: Optional[str] = None,
) -> Thread:
"""|coro|
Creates a public thread from this message.
You must have :attr:`~discord.Permissions.create_public_threads` in order to
create a public thread from a message.
The channel this message belongs in must be a :class:`TextChannel`.
.. versionadded:: 2.0
Parameters
-----------
name: :class:`str`
The name of the thread.
auto_archive_duration: :class:`int`
The duration in minutes before a thread is automatically archived for inactivity.
If not provided, the channel's default auto archive duration is used.
slowmode_delay: Optional[:class:`int`]
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`. By default no slowmode rate limit
if this is ``None``.
reason: Optional[:class:`str`]
The reason for creating a new thread. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create a thread.
HTTPException
Creating the thread failed.
ValueError
This message does not have guild info attached.
Returns
--------
:class:`.Thread`
The created thread.
"""
if self.guild is None:
raise ValueError('This message does not have guild info attached.')
default_auto_archive_duration: ThreadArchiveDuration = getattr(self.channel, 'default_auto_archive_duration', 1440)
data = await self._state.http.start_thread_with_message(
self.channel.id,
self.id,
name=name,
auto_archive_duration=auto_archive_duration or default_auto_archive_duration,
rate_limit_per_user=slowmode_delay,
reason=reason,
)
return Thread(guild=self.guild, state=self._state, data=data)
async def reply(self, content: Optional[str] = None, **kwargs: Any) -> Message:
"""|coro|
A shortcut method to :meth:`.abc.Messageable.send` to reply to the
:class:`.Message`.
.. versionadded:: 1.6
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` or
:exc:`ValueError` instead of ``InvalidArgument``.
Raises
--------
~discord.HTTPException
Sending the message failed.
~discord.Forbidden
You do not have the proper permissions to send the message.
ValueError
The ``files`` list is not of the appropriate size
TypeError
You specified both ``file`` and ``files``.
Returns
---------
:class:`.Message`
The message that was sent.
"""
return await self.channel.send(content, reference=self, **kwargs)
def to_reference(self, *, fail_if_not_exists: bool = True) -> MessageReference:
"""Creates a :class:`~discord.MessageReference` from the current message.
.. versionadded:: 1.6
Parameters
----------
fail_if_not_exists: :class:`bool`
Whether replying using the message reference should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
---------
:class:`~discord.MessageReference`
The reference to this message.
"""
return MessageReference.from_message(self, fail_if_not_exists=fail_if_not_exists)
def to_message_reference_dict(self) -> MessageReferencePayload:
data: MessageReferencePayload = {
'message_id': self.id,
'channel_id': self.channel.id,
}
if self.guild is not None:
data['guild_id'] = self.guild.id
return data
@flatten_handlers
class Message(PartialMessage, Hashable):
r"""Represents a message from Discord.
.. container:: operations
.. describe:: x == y
Checks if two messages are equal.
.. describe:: x != y
Checks if two messages are not equal.
.. describe:: hash(x)
Returns the message's hash.
Attributes
-----------
tts: :class:`bool`
Specifies if the message was done with text-to-speech.
This can only be accurately received in :func:`on_message` due to
a discord limitation.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author: Union[:class:`Member`, :class:`abc.User`]
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel or the user has the left the guild, then it is a :class:`User` instead.
content: :class:`str`
The actual contents of the message.
nonce: Optional[Union[:class:`str`, :class:`int`]]
The value used by the discord guild and the client to verify that the message is successfully sent.
This is not stored long term within Discord's servers and is only used ephemerally.
embeds: List[:class:`Embed`]
A list of embeds the message has.
channel: Union[:class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`, :class:`GroupChannel`, :class:`PartialMessageable`]
The :class:`TextChannel` or :class:`Thread` that the message was sent from.
Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message.
reference: Optional[:class:`~discord.MessageReference`]
The message that this message references. This is only applicable to messages of
type :attr:`MessageType.pins_add`, crossposted messages created by a
followed channel integration, or message replies.
.. versionadded:: 1.5
mention_everyone: :class:`bool`
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` or the ``@here`` text is in the message itself.
Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message
**and** it did end up mentioning.
mentions: List[:class:`abc.User`]
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a Discord limitation, not one with the library.
channel_mentions: List[Union[:class:`abc.GuildChannel`, :class:`Thread`]]
A list of :class:`abc.GuildChannel` or :class:`Thread` that were mentioned. If the message is
in a private message then the list is always empty.
role_mentions: List[:class:`Role`]
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id: :class:`int`
The message ID.
webhook_id: Optional[:class:`int`]
If this message was sent by a webhook, then this is the webhook ID's that sent this
message.
attachments: List[:class:`Attachment`]
A list of attachments given to a message.
pinned: :class:`bool`
Specifies if the message is currently pinned.
flags: :class:`MessageFlags`
Extra features of the message.
.. versionadded:: 1.3
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
activity: Optional[:class:`dict`]
The activity associated with this message. Sent with Rich-Presence related messages that for
example, request joining, spectating, or listening to or with another member.
It is a dictionary with the following optional keys:
- ``type``: An integer denoting the type of message activity being requested.
- ``party_id``: The party ID associated with the party.
application: Optional[:class:`dict`]
The rich presence enabled application associated with this message.
It is a dictionary with the following keys:
- ``id``: A string representing the application's ID.
- ``name``: A string representing the application's name.
- ``description``: A string representing the application's description.
- ``icon``: A string representing the icon ID of the application.
- ``cover_image``: A string representing the embed's image asset ID.
stickers: List[:class:`StickerItem`]
A list of sticker items given to the message.
.. versionadded:: 1.6
components: List[:class:`Component`]
A list of components in the message.
.. versionadded:: 2.0
interaction: Optional[:class:`MessageInteraction`]
The interaction that this message is a response to.
.. versionadded:: 2.0
guild: Optional[:class:`Guild`]
The guild that the message belongs to, if applicable.
"""
__slots__ = (
'_state',
'_edited_timestamp',
'_cs_channel_mentions',
'_cs_raw_mentions',
'_cs_clean_content',
'_cs_raw_channel_mentions',
'_cs_raw_role_mentions',
'_cs_system_content',
'tts',
'content',
'channel',
'webhook_id',
'mention_everyone',
'embeds',
'mentions',
'author',
'attachments',
'nonce',
'pinned',
'role_mentions',
'type',
'flags',
'reactions',
'reference',
'application',
'activity',
'stickers',
'components',
'interaction',
)
if TYPE_CHECKING:
_HANDLERS: ClassVar[List[Tuple[str, Callable[..., None]]]]
_CACHED_SLOTS: ClassVar[List[str]]
# guild: Optional[Guild]
reference: Optional[MessageReference]
mentions: List[Union[User, Member]]
author: Union[User, Member]
role_mentions: List[Role]
def __init__(
self,
*,
state: ConnectionState,
channel: MessageableChannel,
data: MessagePayload,
) -> None:
self.channel: MessageableChannel = channel
self.id: int = int(data['id'])
self._state: ConnectionState = state
self.webhook_id: Optional[int] = utils._get_as_snowflake(data, 'webhook_id')
self.reactions: List[Reaction] = [Reaction(message=self, data=d) for d in data.get('reactions', [])]
self.attachments: List[Attachment] = [Attachment(data=a, state=self._state) for a in data['attachments']]
self.embeds: List[Embed] = [Embed.from_dict(a) for a in data['embeds']]
self.application: Optional[MessageApplicationPayload] = data.get('application')
self.activity: Optional[MessageActivityPayload] = data.get('activity')
self.channel: MessageableChannel = channel
self._edited_timestamp: Optional[datetime.datetime] = utils.parse_time(data['edited_timestamp'])
self.type: MessageType = try_enum(MessageType, data['type'])
self.pinned: bool = data['pinned']
self.flags: MessageFlags = MessageFlags._from_value(data.get('flags', 0))
self.mention_everyone: bool = data['mention_everyone']
self.tts: bool = data['tts']
self.content: str = data['content']
self.nonce: Optional[Union[int, str]] = data.get('nonce')
self.stickers: List[StickerItem] = [StickerItem(data=d, state=state) for d in data.get('sticker_items', [])]
self.components: List[Component] = [_component_factory(d) for d in data.get('components', [])]
try:
# if the channel doesn't have a guild attribute, we handle that
self.guild = channel.guild # type: ignore
except AttributeError:
self.guild = state._get_guild(utils._get_as_snowflake(data, 'guild_id'))
self.interaction: Optional[MessageInteraction] = None
try:
interaction = data['interaction']
except KeyError:
pass
else:
self.interaction = MessageInteraction(state=state, guild=self.guild, data=interaction)
try:
ref = data['message_reference']
except KeyError:
self.reference = None
else:
self.reference = ref = MessageReference.with_state(state, ref)
try:
resolved = data['referenced_message']
except KeyError:
pass
else:
if resolved is None:
ref.resolved = DeletedReferencedMessage(ref)
else:
# Right now the channel IDs match but maybe in the future they won't.
if ref.channel_id == channel.id:
chan = channel
elif isinstance(channel, Thread) and channel.parent_id == ref.channel_id:
chan = channel
else:
chan, _ = state._get_guild_channel(resolved, ref.guild_id)
# the channel will be the correct type here
ref.resolved = self.__class__(channel=chan, data=resolved, state=state) # type: ignore
for handler in ('author', 'member', 'mentions', 'mention_roles'):
try:
getattr(self, f'_handle_{handler}')(data[handler])
except KeyError:
continue
def __repr__(self) -> str:
name = self.__class__.__name__
return (
f'<{name} id={self.id} channel={self.channel!r} type={self.type!r} author={self.author!r} flags={self.flags!r}>'
)
def _try_patch(self, data, key, transform=None) -> None:
try:
value = data[key]
except KeyError:
pass
else:
if transform is None:
setattr(self, key, value)
else:
setattr(self, key, transform(value))
def _add_reaction(self, data, emoji, user_id) -> Reaction:
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
is_me = data['me'] = user_id == self._state.self_id
if reaction is None:
reaction = Reaction(message=self, data=data, emoji=emoji)
self.reactions.append(reaction)
else:
reaction.count += 1
if is_me:
reaction.me = is_me
return reaction
def _remove_reaction(self, data: MessageReactionRemoveEvent, emoji: EmojiInputType, user_id: int) -> Reaction:
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
if reaction is None:
# already removed?
raise ValueError('Emoji already removed?')
# if reaction isn't in the list, we crash. This means discord
# sent bad data, or we stored improperly
reaction.count -= 1
if user_id == self._state.self_id:
reaction.me = False
if reaction.count == 0:
# this raises ValueError if something went wrong as well.
self.reactions.remove(reaction)
return reaction
def _clear_emoji(self, emoji: PartialEmoji) -> Optional[Reaction]:
to_check = str(emoji)
for index, reaction in enumerate(self.reactions):
if str(reaction.emoji) == to_check:
break
else:
# didn't find anything so just return
return
del self.reactions[index]
return reaction
def _update(self, data: MessageUpdateEvent) -> None:
# In an update scheme, 'author' key has to be handled before 'member'
# otherwise they overwrite each other which is undesirable.
# Since there's no good way to do this we have to iterate over every
# handler rather than iterating over the keys which is a little slower
for key, handler in self._HANDLERS:
try:
value = data[key]
except KeyError:
continue
else:
handler(self, value)
# clear the cached properties
for attr in self._CACHED_SLOTS:
try:
delattr(self, attr)
except AttributeError:
pass
def _handle_edited_timestamp(self, value: str) -> None:
self._edited_timestamp = utils.parse_time(value)
def _handle_pinned(self, value: bool) -> None:
self.pinned = value
def _handle_flags(self, value: int) -> None:
self.flags = MessageFlags._from_value(value)
def _handle_application(self, value: MessageApplicationPayload) -> None:
self.application = value
def _handle_activity(self, value: MessageActivityPayload) -> None:
self.activity = value
def _handle_mention_everyone(self, value: bool) -> None:
self.mention_everyone = value
def _handle_tts(self, value: bool) -> None:
self.tts = value
def _handle_type(self, value: int) -> None:
self.type = try_enum(MessageType, value)
def _handle_content(self, value: str) -> None:
self.content = value
def _handle_attachments(self, value: List[AttachmentPayload]) -> None:
self.attachments = [Attachment(data=a, state=self._state) for a in value]
def _handle_embeds(self, value: List[EmbedPayload]) -> None:
self.embeds = [Embed.from_dict(data) for data in value]
def _handle_nonce(self, value: Union[str, int]) -> None:
self.nonce = value
def _handle_author(self, author: UserPayload) -> None:
self.author = User(state=self._state, data=author)
def _handle_member(self, member: MemberPayload) -> None:
member["user"] = self.author._to_minimal_user_json()
self.author = Member(data=member, guild=self.guild, state=self._state)
def _handle_mentions(self, mentions: List[UserWithMemberPayload]) -> None:
self.mentions = r = []
guild = self.guild
state = self._state
if not isinstance(guild, Guild):
self.mentions = [state.store_user(m) for m in mentions]
return
for mention in filter(None, mentions):
id_search = int(mention['id'])
member = guild.get_member(id_search)
if member is not None:
r.append(member)
else:
r.append(Member._try_upgrade(data=mention, guild=guild, state=state))
def _handle_mention_roles(self, role_mentions: List[int]) -> None:
self.role_mentions = []
if isinstance(self.guild, Guild):
for role_id in map(int, role_mentions):
role = self.guild.get_role(role_id)
if role is not None:
self.role_mentions.append(role)
def _handle_components(self, components: List[ComponentPayload]):
pass
def _handle_interaction(self, data: MessageInteractionPayload):
self.interaction = MessageInteraction(state=self._state, guild=self.guild, data=data)
def _rebind_cached_references(self, new_guild: Guild, new_channel: Union[TextChannel, Thread]) -> None:
self.guild = new_guild
self.channel = new_channel
@utils.cached_slot_property('_cs_raw_mentions')
def raw_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of user IDs matched with
the syntax of ``<@user_id>`` in the message content.
This allows you to receive the user IDs of mentioned users
even in a private message context.
"""
return [int(x) for x in re.findall(r'<@!?([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_channel_mentions')
def raw_channel_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of channel IDs matched with
the syntax of ``<#channel_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_role_mentions')
def raw_role_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of role IDs matched with
the syntax of ``<@&role_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<@&([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_channel_mentions')
def channel_mentions(self) -> List[Union[GuildChannel, Thread]]:
if self.guild is None:
return []
it = filter(None, map(self.guild._resolve_channel, self.raw_channel_mentions))
return utils._unique(it)
@utils.cached_slot_property('_cs_clean_content')
def clean_content(self) -> str:
""":class:`str`: A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
.. note::
This *does not* affect markdown. If you want to escape
or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown`
respectively, along with this function.
"""
if self.guild:
def resolve_member(id: int) -> str:
m = self.guild.get_member(id) or utils.get(self.mentions, id=id) # type: ignore
return f'@{m.display_name}' if m else '@deleted-user'
def resolve_role(id: int) -> str:
r = self.guild.get_role(id) or utils.get(self.role_mentions, id=id) # type: ignore
return f'@{r.name}' if r else '@deleted-role'
def resolve_channel(id: int) -> str:
c = self.guild._resolve_channel(id) # type: ignore
return f'#{c.name}' if c else '#deleted-channel'
else:
def resolve_member(id: int) -> str:
m = utils.get(self.mentions, id=id)
return f'@{m.display_name}' if m else '@deleted-user'
def resolve_role(id: int) -> str:
return '@deleted-role'
def resolve_channel(id: int) -> str:
return f'#deleted-channel'
transforms = {
'@': resolve_member,
'@!': resolve_member,
'#': resolve_channel,
'@&': resolve_role,
}
def repl(match: re.Match) -> str:
type = match[1]
id = int(match[2])
transformed = transforms[type](id)
return transformed
result = re.sub(r'<(@[!&]?|#)([0-9]{15,20})>', repl, self.content)
return escape_mentions(result)
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def edited_at(self) -> Optional[datetime.datetime]:
"""Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message."""
return self._edited_timestamp
def is_system(self) -> bool:
""":class:`bool`: Whether the message is a system message.
A system message is a message that is constructed entirely by the Discord API
in response to something.
.. versionadded:: 1.3
"""
return self.type not in (
MessageType.default,
MessageType.reply,
MessageType.chat_input_command,
MessageType.context_menu_command,
MessageType.thread_starter_message,
)
@utils.cached_slot_property('_cs_system_content')
def system_content(self) -> Optional[str]:
r""":class:`str`: A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\,
this just returns the regular :attr:`Message.content`. Otherwise this
returns an English message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.recipient_add:
if self.channel.type is ChannelType.group:
return f'{self.author.name} added {self.mentions[0].name} to the group.'
else:
return f'{self.author.name} added {self.mentions[0].name} to the thread.'
if self.type is MessageType.recipient_remove:
if self.channel.type is ChannelType.group:
return f'{self.author.name} removed {self.mentions[0].name} from the group.'
else:
return f'{self.author.name} removed {self.mentions[0].name} from the thread.'
if self.type is MessageType.channel_name_change:
return f'{self.author.name} changed the channel name: **{self.content}**'
if self.type is MessageType.channel_icon_change:
return f'{self.author.name} changed the channel icon.'
if self.type is MessageType.pins_add:
return f'{self.author.name} pinned a message to this channel.'
if self.type is MessageType.new_member:
formats = [
"{0} joined the party.",
"{0} is here.",
"Welcome, {0}. We hope you brought pizza.",
"A wild {0} appeared.",
"{0} just landed.",
"{0} just slid into the server.",
"{0} just showed up!",
"Welcome {0}. Say hi!",
"{0} hopped into the server.",
"Everyone welcome {0}!",
"Glad you're here, {0}.",
"Good to see you, {0}.",
"Yay you made it, {0}!",
]
created_at_ms = int(self.created_at.timestamp() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.premium_guild_subscription:
if not self.content:
return f'{self.author.name} just boosted the server!'
else:
return f'{self.author.name} just boosted the server **{self.content}** times!'
if self.type is MessageType.premium_guild_tier_1:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 1!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 1!**'
if self.type is MessageType.premium_guild_tier_2:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 2!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 2!**'
if self.type is MessageType.premium_guild_tier_3:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 3!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 3!**'
if self.type is MessageType.channel_follow_add:
return (
f'{self.author.name} has added {self.content} to this channel. Its most important updates will show up here.'
)
if self.type is MessageType.guild_stream:
# the author will be a Member
return f'{self.author.name} is live! Now streaming {self.author.activity.name}' # type: ignore
if self.type is MessageType.guild_discovery_disqualified:
return 'This server has been removed from Server Discovery because it no longer passes all the requirements. Check Server Settings for more details.'
if self.type is MessageType.guild_discovery_requalified:
return 'This server is eligible for Server Discovery again and has been automatically relisted!'
if self.type is MessageType.guild_discovery_grace_period_initial_warning:
return 'This server has failed Discovery activity requirements for 1 week. If this server fails for 4 weeks in a row, it will be automatically removed from Discovery.'
if self.type is MessageType.guild_discovery_grace_period_final_warning:
return 'This server has failed Discovery activity requirements for 3 weeks in a row. If this server fails for 1 more week, it will be removed from Discovery.'
if self.type is MessageType.thread_created:
return f'{self.author.name} started a thread: **{self.content}**. See all **threads**.'
if self.type is MessageType.reply:
return self.content
if self.type is MessageType.thread_starter_message:
if self.reference is None or self.reference.resolved is None:
return 'Sorry, we couldn\'t load the first message in this thread'
# the resolved message for the reference will be a Message
return self.reference.resolved.content # type: ignore
if self.type is MessageType.guild_invite_reminder:
return 'Wondering who to invite?\nStart by inviting anyone who can help you build the server!'
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embed: Optional[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
suppress: bool = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embeds: Sequence[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
suppress: bool = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
async def edit(
self,
content: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: Sequence[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
suppress: bool = False,
delete_after: Optional[float] = None,
allowed_mentions: Optional[AllowedMentions] = MISSING,
view: Optional[View] = MISSING,
) -> Message:
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 1.3
The ``suppress`` keyword-only parameter was added.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited message is returned instead.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
embeds: List[:class:`Embed`]
The new embeds to replace the original with. Must be a maximum of 10.
To remove all embeds ``[]`` should be passed.
.. versionadded:: 2.0
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
suppress: :class:`bool`
Whether to suppress embeds for the message. This removes
all the embeds if set to ``True``. If set to ``False``
this brings the embeds back if they were suppressed.
Using this parameter requires :attr:`~.Permissions.manage_messages`.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
TypeError
You specified both ``embed`` and ``embeds``
Returns
--------
:class:`Message`
The newly edited message.
"""
if content is not MISSING:
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if suppress is not MISSING:
flags = MessageFlags._from_value(self.flags.value)
flags.suppress_embeds = suppress
else:
flags = MISSING
if view is not MISSING:
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(
content=content,
flags=flags,
embed=embed,
embeds=embeds,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_allowed_mentions,
)
data = await self._state.http.edit_message(self.channel.id, self.id, params=params)
message = Message(state=self._state, channel=self.channel, data=data)
if view and not view.is_finished():
self._state.store_view(view, self.id)
if delete_after is not None:
await self.delete(delay=delete_after)
return message
async def add_files(self, *files: File) -> Message:
r"""|coro|
Adds new files to the end of the message attachments.
.. versionadded:: 2.0
Parameters
-----------
\*files: :class:`File`
New files to add to the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`Message`
The newly edited message.
"""
return await self.edit(attachments=[*self.attachments, *files])
async def remove_attachments(self, *attachments: Attachment) -> Message:
r"""|coro|
Removes attachments from the message.
.. versionadded:: 2.0
Parameters
-----------
\*attachments: :class:`Attachment`
Attachments to remove from the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`Message`
The newly edited message.
"""
return await self.edit(attachments=[a for a in self.attachments if a not in attachments])
| discord/message.py | 73,893 | Represents an attachment from Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the attachment.
.. describe:: x == y
Checks if the attachment is equal to another attachment.
.. describe:: x != y
Checks if the attachment is not equal to another attachment.
.. describe:: hash(x)
Returns the hash of the attachment.
.. versionchanged:: 1.7
Attachment can now be casted to :class:`str` and is hashable.
Attributes
------------
id: :class:`int`
The attachment ID.
size: :class:`int`
The attachment size in bytes.
height: Optional[:class:`int`]
The attachment's height, in pixels. Only applicable to images and videos.
width: Optional[:class:`int`]
The attachment's width, in pixels. Only applicable to images and videos.
filename: :class:`str`
The attachment's filename.
url: :class:`str`
The attachment URL. If the message this attachment was attached
to is deleted, then this will 404.
proxy_url: :class:`str`
The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the
case of images. When the message is deleted, this URL might be valid for a few
minutes or not valid at all.
content_type: Optional[:class:`str`]
The attachment's `media type <https://en.wikipedia.org/wiki/Media_type>`_
.. versionadded:: 1.7
description: Optional[:class:`str`]
The attachment's description. Only applicable to images.
.. versionadded:: 2.0
ephemeral: :class:`bool`
Whether the attachment is ephemeral.
.. versionadded:: 2.0
A special sentinel type given when the resolved message reference
points to a deleted message.
The purpose of this class is to separate referenced messages that could not be
fetched and those that were previously fetched but have since been deleted.
.. versionadded:: 1.6
Represents a message from Discord.
.. container:: operations
.. describe:: x == y
Checks if two messages are equal.
.. describe:: x != y
Checks if two messages are not equal.
.. describe:: hash(x)
Returns the message's hash.
Attributes
-----------
tts: :class:`bool`
Specifies if the message was done with text-to-speech.
This can only be accurately received in :func:`on_message` due to
a discord limitation.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author: Union[:class:`Member`, :class:`abc.User`]
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel or the user has the left the guild, then it is a :class:`User` instead.
content: :class:`str`
The actual contents of the message.
nonce: Optional[Union[:class:`str`, :class:`int`]]
The value used by the discord guild and the client to verify that the message is successfully sent.
This is not stored long term within Discord's servers and is only used ephemerally.
embeds: List[:class:`Embed`]
A list of embeds the message has.
channel: Union[:class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`, :class:`GroupChannel`, :class:`PartialMessageable`]
The :class:`TextChannel` or :class:`Thread` that the message was sent from.
Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message.
reference: Optional[:class:`~discord.MessageReference`]
The message that this message references. This is only applicable to messages of
type :attr:`MessageType.pins_add`, crossposted messages created by a
followed channel integration, or message replies.
.. versionadded:: 1.5
mention_everyone: :class:`bool`
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` or the ``@here`` text is in the message itself.
Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message
**and** it did end up mentioning.
mentions: List[:class:`abc.User`]
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a Discord limitation, not one with the library.
channel_mentions: List[Union[:class:`abc.GuildChannel`, :class:`Thread`]]
A list of :class:`abc.GuildChannel` or :class:`Thread` that were mentioned. If the message is
in a private message then the list is always empty.
role_mentions: List[:class:`Role`]
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id: :class:`int`
The message ID.
webhook_id: Optional[:class:`int`]
If this message was sent by a webhook, then this is the webhook ID's that sent this
message.
attachments: List[:class:`Attachment`]
A list of attachments given to a message.
pinned: :class:`bool`
Specifies if the message is currently pinned.
flags: :class:`MessageFlags`
Extra features of the message.
.. versionadded:: 1.3
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
activity: Optional[:class:`dict`]
The activity associated with this message. Sent with Rich-Presence related messages that for
example, request joining, spectating, or listening to or with another member.
It is a dictionary with the following optional keys:
- ``type``: An integer denoting the type of message activity being requested.
- ``party_id``: The party ID associated with the party.
application: Optional[:class:`dict`]
The rich presence enabled application associated with this message.
It is a dictionary with the following keys:
- ``id``: A string representing the application's ID.
- ``name``: A string representing the application's name.
- ``description``: A string representing the application's description.
- ``icon``: A string representing the icon ID of the application.
- ``cover_image``: A string representing the embed's image asset ID.
stickers: List[:class:`StickerItem`]
A list of sticker items given to the message.
.. versionadded:: 1.6
components: List[:class:`Component`]
A list of components in the message.
.. versionadded:: 2.0
interaction: Optional[:class:`MessageInteraction`]
The interaction that this message is a response to.
.. versionadded:: 2.0
guild: Optional[:class:`Guild`]
The guild that the message belongs to, if applicable.
Represents the interaction that a :class:`Message` is a response to.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two message interactions are equal.
.. describe:: x != y
Checks if two message interactions are not equal.
.. describe:: hash(x)
Returns the message interaction's hash.
Attributes
-----------
id: :class:`int`
The interaction ID.
type: :class:`InteractionType`
The interaction type.
name: :class:`str`
The name of the interaction.
user: Union[:class:`User`, :class:`Member`]
The user or member that invoked the interaction.
Represents a reference to a :class:`~discord.Message`.
.. versionadded:: 1.5
.. versionchanged:: 1.6
This class can now be constructed by users.
Attributes
-----------
message_id: Optional[:class:`int`]
The id of the message referenced.
channel_id: :class:`int`
The channel id of the message referenced.
guild_id: Optional[:class:`int`]
The guild id of the message referenced.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
resolved: Optional[Union[:class:`Message`, :class:`DeletedReferencedMessage`]]
The message that this reference resolved to. If this is ``None``
then the original message was not fetched either due to the Discord API
not attempting to resolve it or it not being available at the time of creation.
If the message was resolved at a prior point but has since been deleted then
this will be of type :class:`DeletedReferencedMessage`.
Currently, this is mainly the replied to message when a user replies to a message.
.. versionadded:: 1.6
Represents a partial message to aid with working messages when only
a message and channel ID are present.
There are two ways to construct this class. The first one is through
the constructor itself, and the second is via the following:
- :meth:`TextChannel.get_partial_message`
- :meth:`VoiceChannel.get_partial_message`
- :meth:`Thread.get_partial_message`
- :meth:`DMChannel.get_partial_message`
Note that this class is trimmed down and has no rich attributes.
.. versionadded:: 1.6
.. container:: operations
.. describe:: x == y
Checks if two partial messages are equal.
.. describe:: x != y
Checks if two partial messages are not equal.
.. describe:: hash(x)
Returns the partial message's hash.
Attributes
-----------
channel: Union[:class:`PartialMessageable`, :class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`]
The channel associated with this partial message.
id: :class:`int`
The message ID.
guild: Optional[:class:`Guild`]
The guild that the partial message belongs to, if applicable.
Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache.
:class:`int`: The channel ID of the deleted referenced message.
:class:`str`: A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
.. note::
This *does not* affect markdown. If you want to escape
or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown`
respectively, along with this function.
:class:`datetime.datetime`: The interaction's creation time in UTC.
:class:`datetime.datetime`: The partial message's creation time in UTC.
:class:`datetime.datetime`: The message's creation time in UTC.
Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message.
Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.
.. versionadded:: 1.6
Parameters
----------
message: :class:`~discord.Message`
The message to be converted into a reference.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
-------
:class:`MessageReference`
A reference to the message.
Optional[:class:`int`]: The guild ID of the deleted referenced message.
:class:`int`: The message ID of the deleted referenced message.
:class:`bool`: Whether this attachment contains a spoiler.
:class:`bool`: Whether the message is a system message.
A system message is a message that is constructed entirely by the Discord API
in response to something.
.. versionadded:: 1.3
:class:`str`: Returns a URL that allows the client to jump to the referenced message.
.. versionadded:: 1.7
:class:`str`: Returns a URL that allows the client to jump to this message.
List[:class:`int`]: A property that returns an array of channel IDs matched with
the syntax of ``<#channel_id>`` in the message content.
List[:class:`int`]: A property that returns an array of user IDs matched with
the syntax of ``<@user_id>`` in the message content.
This allows you to receive the user IDs of mentioned users
even in a private message context.
List[:class:`int`]: A property that returns an array of role IDs matched with
the syntax of ``<@&role_id>`` in the message content.
:class:`str`: A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\,
this just returns the regular :attr:`Message.content`. Otherwise this
returns an English message denoting the contents of the system message.
Creates a :class:`~discord.MessageReference` from the current message.
.. versionadded:: 1.6
Parameters
----------
fail_if_not_exists: :class:`bool`
Whether replying using the message reference should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
---------
:class:`~discord.MessageReference`
The reference to this message.
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Reactions can be in :name:id format, but not <:name:id>. No existing emojis have <> in them, so this should be okay. the parent's message id won't be None here type: ignore type: ignore Type checker doesn't understand these are the same. This is an unfortunate data loss, but it's better than giving bad data This is also an incredibly rare scenario. type: ignore store _handle_member last This is used for duck typing purposes. Just do nothing with the data. Also needed for duck typing purposes n.b. not exposed pinned exists on PartialMessage for duck typing purposes pinned exists on PartialMessage for duck typing purposes guild: Optional[Guild] if the channel doesn't have a guild attribute, we handle that type: ignore Right now the channel IDs match but maybe in the future they won't. the channel will be the correct type here type: ignore already removed? if reaction isn't in the list, we crash. This means discord sent bad data, or we stored improperly this raises ValueError if something went wrong as well. didn't find anything so just return In an update scheme, 'author' key has to be handled before 'member' otherwise they overwrite each other which is undesirable. Since there's no good way to do this we have to iterate over every handler rather than iterating over the keys which is a little slower clear the cached properties type: ignore type: ignore type: ignore the author will be a Member type: ignore the resolved message for the reference will be a Message type: ignore | 15,628 | en | 0.747377 |
# -*- coding: utf-8 -*-
"""Utilities for calculation job resources."""
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
def seconds_to_timelimit(seconds: int) -> str:
"""Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
| aiida_abinit/utils/resources.py | 1,411 | Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
Utilities for calculation job resources.
-*- coding: utf-8 -*- | 561 | en | 0.529731 |
"""On-premise Gitlab clients
"""
# from .v4 import *
| tapis_cli/clients/services/gitlab/__init__.py | 53 | On-premise Gitlab clients
from .v4 import * | 45 | en | 0.522795 |
import json
import re
class FieldValidationException(Exception):
pass
class Field(object):
"""
This is the base class that should be used to create field validators. Sub-class this and override to_python if you
need custom validation.
"""
DATA_TYPE_STRING = 'string'
DATA_TYPE_NUMBER = 'number'
DATA_TYPE_BOOLEAN = 'boolean'
def get_data_type(self):
"""
Get the type of the field.
"""
return Field.DATA_TYPE_STRING
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
"""
# Note: there is no distinction between a None value and blank value,
# as modular input UIs does not recognize such a distinction.
if name is None or len(name.strip()) == 0:
raise ValueError("The name parameter cannot be empty.")
if title is None or len(title.strip()) == 0:
raise ValueError("The title parameter cannot be empty.")
if description is None or len(description.strip()) == 0:
raise ValueError("The description parameter cannot be empty.")
self.name = name
self.title = title
self.description = description
self.required_on_create = required_on_create
self.required_on_edit = required_on_edit
def to_python(self, value):
"""
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
"""
# No standard validation here; the modular input framework handles empty values.
return value
def to_string(self, value):
"""
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is
invalid.
Arguments:
value -- The value to convert
"""
return str(value)
class BooleanField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value in [True, False]:
return value
elif str(value).strip().lower() in ["true", "t", "1"]:
return True
elif str(value).strip().lower() in ["false", "f", "0"]:
return False
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name))
def to_string(self, value):
if value is True:
return "1"
elif value is False:
return "0"
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_BOOLEAN
class DelimitedField(Field):
def __init__(self, name, title, description, delim, required_on_create=True, required_on_edit=False):
super(DelimitedField, self).__init__(name, title, description, required_on_create, required_on_edit)
self._delim = delim
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = value.split(self._delim)
return tmp
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_STRING
class DurationField(Field):
"""
The duration field represents a duration as represented by a string such as 1d for a 24 hour period.
The string is converted to an integer indicating the number of seconds.
"""
DURATION_RE = re.compile(r"(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE)
MINUTE = 60
HOUR = 3600
DAY = 86400
WEEK = 604800
UNITS = {
'w': WEEK, 'week': WEEK, 'd': DAY, 'day': DAY, 'h': HOUR, 'hour': HOUR, 'm': MINUTE, 'min': MINUTE, 'minute':
MINUTE, 's': 1}
def to_python(self, value):
Field.to_python(self, value)
# Parse the duration
m = DurationField.DURATION_RE.match(value)
# Make sure the duration could be parsed
if m is None:
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name))
# Get the units and duration
d = m.groupdict()
units = d['units']
# Parse the value provided
try:
duration = int(d['duration'])
except ValueError:
raise FieldValidationException(
"The duration '%s' for the '%s' parameter is not a valid number" % (d['duration'], self.name))
# Make sure the units are valid
if len(units) > 0 and units not in DurationField.UNITS:
raise FieldValidationException(
"The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name))
# Convert the units to seconds
if len(units) > 0:
return duration * DurationField.UNITS[units]
else:
return duration
def to_string(self, value):
return str(value)
class FloatField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return float(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntegerField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return int(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntervalField(Field):
'''Class for handling Splunk's "interval" field, which typically accepts
an integer value OR a cron-style string. Note that this means that the
data type returned is a string, so the modular input must handle conversion
of this string to an integer at runtime.'''
# Accepted cron field formats:
# Asterisk: * (equivalent to first-last range)
# Lists: 1,2,3,4,5
# Ranges: 1-60
#
# and combinations of the above:
#
# Ranges followed by steps: 0-23/2
# Asterisks followed by steps: */2
#
# Note that we don't check explicitly for correct numeric values for each
# cron field.
cron_rx = re.compile(
r'''
(
\d{1,2} # A digit.
|\d{1,2}-\d{1,2} # A range.
|(\d{1,2},)+\d{1,2} # A list of digits.
|\d{1,2}-\d{1,2}/\d{1,2} # A range followed by a step.
|\* # The asterisk character.
|\*/\d{1,2} # An asterisk followed by a step.
)
''', re.VERBOSE)
def to_python(self, value):
try:
# Try parsing the string as an integer.
return int(value)
except ValueError:
# Try parsing the string as a cron schedule.
if self.parse_cron(value):
return value
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def get_data_type(self):
return Field.DATA_TYPE_STRING
def parse_cron(self, value):
'''Check for valid cron string.'''
fields = value.split()
if len(fields) == 5 and all([self.cron_rx.match(i) for i in fields]):
return True
return False
class JsonField(Field):
def to_python(self, value):
Field.to_python(self, value)
try:
return json.loads(value)
except (TypeError, ValueError):
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid JSON object" % (str(value), self.name))
def to_string(self, value):
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_STRING
class ListField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
return value.split(",")
else:
return []
def to_string(self, value):
if value is not None:
return ",".join(value)
return ""
class RangeField(Field):
def __init__(self, name, title, description, low, high, required_on_create=True, required_on_edit=False):
super(RangeField, self).__init__(name, title, description, required_on_create, required_on_edit)
self.low = low
self.high = high
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = int(value)
if tmp >= self.low and tmp <= self.high:
return tmp
else:
raise FieldValidationException("Value out of range.")
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class RegexField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return re.compile(value)
except Exception as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class SeverityField(Field):
# Note: We ignore "FATAL" severity since Python's logging assigns it the
# same value as "CRITICAL".
SEVERITIES = {'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'ERROR': 40, 'CRITICAL': 50}
SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.items()}
def to_python(self, value):
try:
if value in SeverityField.SEVERITIES:
return SeverityField.SEVERITIES[value]
except AttributeError:
# Did not receive a string for some reason.
pass
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def to_string(self, value):
if value in SeverityField.SEVERITIES_BY_INT:
return SeverityField.SEVERITIES_BY_INT[value]
else:
raise ValueError('Invalid value provided for severity.')
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class VerbosityField(Field):
def to_python(self, value):
Field.to_python(self, value)
value = int(value)
if value is not None:
if value in [10, 20, 30, 40, 50]:
return value
else:
raise FieldValidationException('Invalid value provided for verbosity, must be one of the following: ' +
'{10, 20, 30, 40, 50}')
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
| splunk_eventgen/splunk_app/lib/mod_input/fields.py | 12,593 | The duration field represents a duration as represented by a string such as 1d for a 24 hour period.
The string is converted to an integer indicating the number of seconds.
This is the base class that should be used to create field validators. Sub-class this and override to_python if you
need custom validation.
Class for handling Splunk's "interval" field, which typically accepts
an integer value OR a cron-style string. Note that this means that the
data type returned is a string, so the modular input must handle conversion
of this string to an integer at runtime.
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
Get the type of the field.
Check for valid cron string.
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is
invalid.
Arguments:
value -- The value to convert
Note: there is no distinction between a None value and blank value, as modular input UIs does not recognize such a distinction. No standard validation here; the modular input framework handles empty values. Parse the duration Make sure the duration could be parsed Get the units and duration Parse the value provided Make sure the units are valid Convert the units to seconds Accepted cron field formats: Asterisk: * (equivalent to first-last range) Lists: 1,2,3,4,5 Ranges: 1-60 and combinations of the above: Ranges followed by steps: 0-23/2 Asterisks followed by steps: */2 Note that we don't check explicitly for correct numeric values for each cron field. Try parsing the string as an integer. Try parsing the string as a cron schedule. Note: We ignore "FATAL" severity since Python's logging assigns it the same value as "CRITICAL". Did not receive a string for some reason. | 2,490 | en | 0.733836 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
| gensim/gensim/corpora/hashdictionary.py | 13,162 | Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
Get the number of distinct ids = the entire dictionary size.
Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
Get a list of all token ids.
Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
!/usr/bin/env python -*- coding: utf-8 -*- Copyright (C) 2012 Homer Strong, Radim Rehurek Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html hash fnc: string->integer hash range: id = myhash(key) % id_range the following (potentially massive!) dictionaries are only formed if `debug` is True reverse mapping int->set(words) token_id -> how many documents this token_id appeared in token_string->how many documents this word appeared in number of documents processed total number of corpus positions total number of non-zeroes in the BOW matrix ignore the result, here we only care about updating token ids convert the input to plain list (needed below) how many times does this word appear in the input document increment document count for each unique token that appeared in the document increment document count for each unique tokenid that appeared in the document done here, because several words may map to the same tokenid return tokenids, in ascending id order convert fractional threshold to absolute threshold for word->document frequency | 7,492 | en | 0.699109 |
import configparser
import logging
def dict_url(conf):
"""Add all url from file url.ini with
key = name of the parking end value is
the url.
:returns: dictionnary with all parking and url
:rtype: dict
"""
url = configparser.ConfigParser()
logging.debug("initializing the variable url")
url.read(conf)
logging.debug("read the file")
logging.debug("all url in file %s", list(url["url"]))
res = {}
for simple_url in list(url["url"]):
parking = url["name"][simple_url]
link = url["url"][simple_url]
adress = url["adress"][simple_url]
res[parking] = link, adress
logging.info("this is the dict with keys and urls %s", res)
return res
| backend/function_park/dict_url.py | 724 | Add all url from file url.ini with
key = name of the parking end value is
the url.
:returns: dictionnary with all parking and url
:rtype: dict | 143 | en | 0.869082 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.sources.tas.api import _load_data, process_csv
def test_load_data():
data = _load_data()
assert len(data) > 100, len(data)
def test_processor():
tp = process_csv(affinity_class_limit=10)
assert tp
assert tp.statements
num_stmts = len(tp.statements)
# This is the total number of statements about human genes
assert num_stmts == 51722, num_stmts
assert all(len(s.evidence) == 1 for s in tp.statements), \
"Some statements lack evidence, or have extra evidence."
| indra/tests/test_tas.py | 625 | This is the total number of statements about human genes | 56 | en | 0.877267 |
from typing import Callable, Iterable, Sequence
import numpy as np
from dpipe.im.axes import AxesLike, AxesParams
from dpipe.itertools import lmap, squeeze_first
from dpipe.im import pad_to_shape
def pad_batch_equal(batch, padding_values: AxesParams = 0, ratio: AxesParams = 0.5):
"""
Pad each element of ``batch`` to obtain a correctly shaped array.
References
----------
`pad_to_shape`
"""
max_shapes = np.max(lmap(np.shape, batch), axis=0)
# if not scalars
if max_shapes.size != 0:
batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch]
return np.array(batch)
def unpack_args(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and unpacks it while calling ``func``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> def add(x, y):
>>> return x + y
>>>
>>> add_ = unpack_args(add)
>>> add(1, 2) == add_([1, 2])
>>> True
"""
def wrapper(xs, *args_, **kwargs_):
return func(*xs, *args_, *args, **kwargs_, **kwargs)
return wrapper
def multiply(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and maps ``func`` over it.
Useful when multiple batches require the same function.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple:
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)
return wrapped
def apply_at(index: AxesLike, func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
"""
index = set(np.atleast_1d(index).tolist())
def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple:
index_ = {i + len(xs) if i < 0 else i for i in index}
for idx in index_:
if idx < 0 or idx >= len(xs):
raise IndexError(f'Index {idx} out of bounds.')
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) if i in index_ else x for i, x in enumerate(xs))
return wrapped
def zip_apply(*functions: Callable, **kwargs):
"""
Returns a function that takes an iterable and zips ``functions`` over it.
``kwargs`` are passed to each function as additional arguments.
Examples
--------
>>> zipper = zip_apply(np.square, np.sqrt)
>>> zipper([4, 9])
>>> (16, 3)
"""
def wrapped(xs: Sequence, *args, **kwargs_) -> tuple:
return tuple(func(x, *args, **kwargs_, **kwargs) for func, x in zip(functions, xs))
return wrapped
def random_apply(p: float, func: Callable, *args, **kwargs):
"""
Returns a function that applies ``func`` with a given probability ``p``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(*args_, **kwargs_):
if np.random.binomial(1, p):
return func(*args_, *args, **kwargs_, **kwargs)
return squeeze_first(args_)
return wrapped
def sample_args(func: Callable, *args: Callable, **kwargs: Callable):
"""
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.
Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.
Examples
--------
>>> from scipy.ndimage import rotate
>>>
>>> random_rotate = sample_args(rotate, angle=np.random.normal)
>>> random_rotate(x)
>>> # same as
>>> rotate(x, angle=np.random.normal())
"""
def wrapped(*args_, **kwargs_):
return func(*args_, *([arg() for arg in args]), **kwargs_, **{name: arg() for name, arg in kwargs.items()})
return wrapped
| dpipe/batch_iter/utils.py | 4,026 | Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
Returns a function that takes an iterable and maps ``func`` over it.
Useful when multiple batches require the same function.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Pad each element of ``batch`` to obtain a correctly shaped array.
References
----------
`pad_to_shape`
Returns a function that applies ``func`` with a given probability ``p``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.
Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.
Examples
--------
>>> from scipy.ndimage import rotate
>>>
>>> random_rotate = sample_args(rotate, angle=np.random.normal)
>>> random_rotate(x)
>>> # same as
>>> rotate(x, angle=np.random.normal())
Returns a function that takes an iterable and unpacks it while calling ``func``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> def add(x, y):
>>> return x + y
>>>
>>> add_ = unpack_args(add)
>>> add(1, 2) == add_([1, 2])
>>> True
Returns a function that takes an iterable and zips ``functions`` over it.
``kwargs`` are passed to each function as additional arguments.
Examples
--------
>>> zipper = zip_apply(np.square, np.sqrt)
>>> zipper([4, 9])
>>> (16, 3)
if not scalars | 1,634 | en | 0.645214 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
| aiida/cmdline/params/types/plugin.py | 9,286 | AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
Return a list of plugins starting with incomplete
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
Click parameter type for AiiDA Plugins.
-*- coding: utf-8 -*- Copyright (c), The AiiDA team. All rights reserved. This file is part of the AiiDA code. The code is hosted on GitHub at https://github.com/aiidateam/aiida_core For further information on the license, see the LICENSE.txt file For further information please visit http://www.aiida.net pylint: disable=keyword-arg-before-vararg If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise return the possibilities in the same format as the incomplete. Note that this may have some unexpected effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we cannot know that for sure at this time pylint: disable=unused-argument | 3,089 | en | 0.786626 |
from BoundingBox import *
from eval_utils import *
class BoundingBoxes:
def __init__(self):
self._boundingBoxes = []
def addBoundingBox(self, bb):
self._boundingBoxes.append(bb)
def removeBoundingBox(self, _boundingBox):
for d in self._boundingBoxes:
if BoundingBox.compare(d, _boundingBox):
del self._boundingBoxes[d]
return
def removeAllBoundingBoxes(self):
self._boundingBoxes = []
def getBoundingBoxes(self):
return self._boundingBoxes
def getBoundingBoxByClass(self, classId):
boundingBoxes = []
for d in self._boundingBoxes:
if d.getClassId() == classId: # get only specified bounding box type
boundingBoxes.append(d)
return boundingBoxes
def getClasses(self):
classes = []
for d in self._boundingBoxes:
c = d.getClassId()
if c not in classes:
classes.append(c)
return classes
def getBoundingBoxesByType(self, bbType):
# get only specified bb type
return [d for d in self._boundingBoxes if d.getBBType() == bbType]
def getBoundingBoxesByImageName(self, imageName):
# get only specified bb type
return [d for d in self._boundingBoxes if d.getImageName() == imageName]
def count(self, bbType=None):
if bbType is None: # Return all bounding boxes
return len(self._boundingBoxes)
count = 0
for d in self._boundingBoxes:
if d.getBBType() == bbType: # get only specified bb type
count += 1
return count
def clone(self):
newBoundingBoxes = BoundingBoxes()
for d in self._boundingBoxes:
det = BoundingBox.clone(d)
newBoundingBoxes.addBoundingBox(det)
return newBoundingBoxes
def drawAllBoundingBoxes(self, image, imageName):
bbxes = self.getBoundingBoxesByImageName(imageName)
for bb in bbxes:
if bb.getBBType() == BBType.GroundTruth: # if ground truth
image = add_bb_into_image(image, bb, color=(0, 255, 0)) # green
else: # if detection
image = add_bb_into_image(image, bb, color=(255, 0, 0)) # red
return image
# def drawAllBoundingBoxes(self, image):
# for gt in self.getBoundingBoxesByType(BBType.GroundTruth):
# image = add_bb_into_image(image, gt ,color=(0,255,0))
# for det in self.getBoundingBoxesByType(BBType.Detected):
# image = add_bb_into_image(image, det ,color=(255,0,0))
# return image
| ssd_mobilenetv2/BoundingBoxes.py | 2,653 | get only specified bounding box type get only specified bb type get only specified bb type Return all bounding boxes get only specified bb type if ground truth green if detection red def drawAllBoundingBoxes(self, image): for gt in self.getBoundingBoxesByType(BBType.GroundTruth): image = add_bb_into_image(image, gt ,color=(0,255,0)) for det in self.getBoundingBoxesByType(BBType.Detected): image = add_bb_into_image(image, det ,color=(255,0,0)) return image | 487 | en | 0.338807 |
#!/usr/bin/env python
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
import os
import imp
import re
import codecs
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# backwards compatibility with pre-multi-homing bundles
conf_path = '/etc/opt/microsoft/omsagent/conf/omsagent.conf'
omi_map_path = '/etc/opt/microsoft/omsagent/conf/omsagent.d/omi_mapping.json'
omi_map = None
multi_homed = None
non_mh_heartbeat_cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -b'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart'
def init_paths(WorkspaceID):
global conf_path
global omi_map_path
global multi_homed
omsagent_dir = '/etc/opt/microsoft/omsagent/'
mh_conf_dir = omsagent_dir + WorkspaceID + '/conf'
multi_homed = os.path.isdir(mh_conf_dir)
if multi_homed:
LG().Log('INFO', 'OMSAgent is multi-homed and resource is updating workspace ' + WorkspaceID)
conf_path = mh_conf_dir + '/omsagent.conf'
omi_map_path = mh_conf_dir + '/omsagent.d/omi_mapping.json'
def init_omi_map():
global omi_map
txt = codecs.open(omi_map_path, 'r', 'utf8').read()
omi_map = eval(txt)
def init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_paths(WorkspaceID)
init_omi_map()
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if PerfCounterObject is not None:
for perf in PerfCounterObject:
new_perfs = []
if len(perf['PerformanceCounter'].value):
for perf_counter in perf['PerformanceCounter'].value:
new_perfs.append(perf_counter.encode('ascii', 'ignore'))
perf['PerformanceCounter'] = new_perfs
if perf['InstanceName'].value is None:
perf['InstanceName'] = ''
else:
perf['InstanceName'] = perf[
'InstanceName'].value.encode('ascii', 'ignore')
if perf['ObjectName'].value is None:
perf['ObjectName'] = ''
else:
perf['ObjectName'] = perf[
'ObjectName'].value.encode('ascii', 'ignore')
if perf['AllInstances'].value is None:
perf['AllInstances'] = False
else:
if perf['AllInstances'].value.value == 1:
perf['AllInstances'] = True
else:
perf['AllInstances'] = False
perf['IntervalSeconds'] = perf['IntervalSeconds'].value.value
def Set_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
def Test_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Test(HeartbeatIntervalSeconds, PerfCounterObject)
def Get_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
arg_names = list(locals().keys())
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
retval = 0
NewHeartbeatIntervalSeconds, NewPerf = Get(
HeartbeatIntervalSeconds, PerfCounterObject)
for perf in NewPerf:
if len(perf['PerformanceCounter']):
perf['PerformanceCounter'] = protocol.MI_StringA(
perf['PerformanceCounter'])
perf['ObjectName'] = protocol.MI_String(perf['ObjectName'])
perf['InstanceName'] = protocol.MI_String(perf['InstanceName'])
perf['AllInstances'] = protocol.MI_Boolean(perf['AllInstances'])
perf['IntervalSeconds'] = protocol.MI_Uint16(perf['IntervalSeconds'])
PerfCounterObject = protocol.MI_InstanceA(NewPerf)
HeartbeatIntervalSeconds = protocol.MI_Uint16(NewHeartbeatIntervalSeconds)
WorkspaceID = protocol.MI_String(WorkspaceID)
Name = protocol.MI_String(Name)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if Test(HeartbeatIntervalSeconds, PerfCounterObject) == [0]:
return [0]
if UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
return [0]
else:
return [-1]
def Test(HeartbeatIntervalSeconds, PerfCounterObject):
prune_perfs(PerfCounterObject)
NewHeartbeatIntervalSeconds, NewPerfs = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
if NewHeartbeatIntervalSeconds != HeartbeatIntervalSeconds:
return [-1]
PerfCounterObject.sort()
for perf in PerfCounterObject:
perf['PerformanceCounter'].sort()
perf['AllInstances'] = True
NewPerfs.sort()
for perf in NewPerfs:
perf['PerformanceCounter'].sort()
if PerfCounterObject != NewPerfs:
return [-1]
return [0]
def Get(HeartbeatIntervalSeconds, PerfCounterObject):
NewHeartbeatIntervalSeconds, NewPerf = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
return NewHeartbeatIntervalSeconds, NewPerf
def TranslatePerfs(object_name, perfs):
d = {}
for p in perfs:
for cname in omi_map:
for prop in cname['CimProperties']:
if (p == prop['CounterName'] or p == prop['CimPropertyName']) and cname['ObjectName'] == object_name:
if cname['ObjectName'] not in d.keys():
d[cname['ObjectName']] = [p]
else:
d[cname['ObjectName']].append(p)
return d
def ReadOMSAgentConf(HeartbeatIntervalSeconds, PerfCounterObject):
txt = ''
try:
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to read omsagent configuration ' + conf_path + '.')
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?run_interval ([0-9]+[a-z])\n</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
m = heartbeat_srch.search(txt)
if m is not None:
interval = int(m.group(1)[:-1])
if m.group(1)[-1:] == 'm':
interval *= 60
else:
interval = None
new_heartbeat = interval
perf_src_srch_str = r'\n<source>\n type oms_omi.*?object_name "(.*?)".*?instance_regex "(.*?)".*?counter_name_regex "(.*?)".*?interval ([0-9]+[a-z]).*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
new_perfobj = []
sources = perf_src_srch.findall(txt)
inst = ''
interval = 0
for source in sources:
s_perf = []
if len(source[2]):
s_perf = source[2].strip('(').strip(')').split('|')
object_name = source[0]
interval = int(source[3][:-1])
if source[3][-1:] == 'm':
interval *= 60
inst = source[1]
inst = inst.replace('.*', '*')
new_perfobj.append({'PerformanceCounter': s_perf, 'InstanceName': inst,
'IntervalSeconds': interval, 'AllInstances': True, 'ObjectName': object_name})
return new_heartbeat, new_perfobj
def UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if os.path.exists(conf_path):
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
else:
LG().Log(
'INFO', 'No omsagent configuration file present. Will create new configuration file at ' + conf_path + '.')
txt = ''
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
heartbeat_cmd = non_mh_heartbeat_cmd
if multi_homed:
heartbeat_cmd = 'echo'
heartbeat_src = '<source>\n type exec\n tag heartbeat.output\n command ' + heartbeat_cmd + ' > /dev/null\n format tsv\n keys severity,message\n run_interval ' + \
str(HeartbeatIntervalSeconds) + 's\n</source>\n'
txt = heartbeat_srch.sub(heartbeat_src, txt)
d = {}
perf_src_srch_str = r'\n<source>\n type oms_omi.*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
for source in perf_src_srch.findall(txt):
txt = txt.replace(source, '')
new_source = ''
for perf in PerfCounterObject:
d = TranslatePerfs(perf['ObjectName'], perf['PerformanceCounter'])
for k in d.keys():
names = '(' + reduce(lambda x, y: x + '|' + y, d[k]) + ')'
instances = re.sub(r'([><]|>|<)', '', perf['InstanceName'])
instances = re.sub(r'([*])', '.*', instances)
new_source += '\n<source>\n type oms_omi\n object_name "' + k + '"\n instance_regex "' + instances + \
'"\n counter_name_regex "' + names + '"\n interval ' + \
str(perf['IntervalSeconds']) + 's\n</source>\n'
m = heartbeat_srch.search(txt)
if m is not None:
i = m.end(0) + 1
txt = txt[:i] + new_source + txt[i:]
else:
txt = new_source
try:
codecs.open(conf_path, 'w', 'utf8').write(txt)
LG().Log(
'INFO', 'Created omsagent configuration at ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to create omsagent configuration at ' + conf_path + '.')
return False
global oms_restart_cmd
process_to_restart = 'omsagent'
if multi_homed:
restart_cmd += ' ' + WorkspaceID
process_to_restart += '-' + WorkspaceID
if os.system(restart_cmd) == 0:
LG().Log('INFO', 'Successfully restarted ' + process_to_restart + '.')
else:
LG().Log('ERROR', 'Error restarting ' + process_to_restart + '.')
return False
return True
def rm_unicode(obj):
if isinstance(obj, dict):
d = {}
for k, v in obj.iteritems():
d[rm_unicode(k)] = rm_unicode(v)
return d
elif isinstance(obj, list):
return [rm_unicode(i) for i in obj]
elif isinstance(obj, unicode):
return obj.encode('ascii', 'ignore')
else:
return obj
def prune_perfs(PerfCounterObject):
l = len(PerfCounterObject)
i = 0
while i < l:
d = TranslatePerfs(PerfCounterObject[i]['ObjectName'], PerfCounterObject[i]['PerformanceCounter'])
if PerfCounterObject[i]['ObjectName'] in d.keys():
for p in PerfCounterObject[i]['PerformanceCounter']:
if p not in d[PerfCounterObject[i]['ObjectName']]:
LG().Log('INFO', 'No match for PerformanceCounter \'' \
+ p + '\' in ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject[i]['PerformanceCounter'].remove(p)
if len(PerfCounterObject[i]['PerformanceCounter']) == 0:
PerfCounterObject.pop(i)
l -= 1
i -= 1
else:
LG().Log('INFO', 'No matches for ObjectName ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' and PerformanceCounter ' \
+ repr(PerfCounterObject[i]['PerformanceCounter']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject.pop(i)
l -= 1
i -= 1
i += 1
| Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py | 11,843 | !/usr/bin/env python============================================================================ Copyright (C) Microsoft Corporation, All rights reserved.============================================================================ backwards compatibility with pre-multi-homing bundles | 284 | en | 0.49184 |
"""Parses the arguments passed to the bash script and returns them back to the bash script."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import sys
# Technique for printing custom error and help
# Source: https://stackoverflow.com/a/4042861/862857
class CustomParser(argparse.ArgumentParser):
def error(self, message):
print('{}: error: {}'.format(self.prog, message), file=sys.stderr)
self.print_help()
sys.exit(1)
parser = CustomParser(prog='create_binauthz_attestation')
# By default, arguments with "--" are optional, so we have
# to make our own argument group so they are required
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument(
'--artifact-url',
type=str,
help='Registry URL for container image',
required=True)
attestor_args = parser.add_argument_group('Attestor arguments')
attestor_args.add_argument(
'--attestor',
type=str,
help='Fully qualified attestor name or just the attestor name',
required=True)
attestor_args.add_argument(
'--attestor-project',
type=str,
help='The project that the attestor is a part of')
pgp_args = parser.add_argument_group('PGP key arguments')
pgp_args.add_argument(
'--pgp-key-fingerprint',
type=str,
help='The fingerprint of the PGP key you plan to use')
# If the user is using KMS, they should provide:
kms_args = parser.add_argument_group('KMS key arguments')
kms_args.add_argument(
'--keyversion',
type=str,
help='The fully qualified keyversion or the version number of the KMS key')
kms_args.add_argument(
'--keyversion-key', type=str, help='The name of the KMS key')
kms_args.add_argument(
'--keyversion-keyring', type=str, help='The keyring for the KMS key')
kms_args.add_argument(
'--keyversion-location', type=str, help='The location of the KMS key')
kms_args.add_argument(
'--keyversion-project',
type=str,
help='The project that the KMS key belongs to')
args = parser.parse_args()
# Validate and parse attestor resource flags.
if '/' not in args.attestor:
if not args.attestor_project:
parser.error('The --attestor-project option is required if '
'--attestor is not a fully qualified '
'Attestor resource identifier')
else:
args.attestor = 'projects/{project}/attestors/{attestor}'.format(
project=args.attestor_project, attestor=args.attestor)
attestor_regex = re.compile(r'^projects/[a-z0-9-]*/attestors/[a-zA-Z0-9-_]*$')
if not attestor_regex.search(args.attestor):
parser.error('Attestor "{attestor}" is not '
'a valid attestor name'.format(attestor=args.attestor))
# Enforce mutual exclusion of key flag types.
keyversion_args = [
args.keyversion, args.keyversion_key, args.keyversion_keyring,
args.keyversion_location, args.keyversion_project
]
if args.pgp_key_fingerprint and any(keyversion_args):
parser.error('You cannot set --pgp-key-fingerprint and --keyversion related'
' options at the same time.')
if not args.pgp_key_fingerprint and not any(keyversion_args):
parser.error('Either --pgp-key-fingerprint or --keyversion related'
' options must be set.')
# Validate and parse keyversion resource flags.
if args.keyversion is not None and '/' not in args.keyversion:
if not all(keyversion_args):
parser.error(
'The --keyversion-key, --keyversion-keyring, --keyversion-location, '
'and --keyversion-project options are required if --keyversion '
'is not a fully qualified KMS key resource identifier.')
else:
args.keyversion = (
'projects/{project}/locations/{location}/keyRings/{keyRing}/'
'cryptoKeys/{cryptoKey}/cryptoKeyVersions/{keyversion}').format(
project=args.keyversion_project,
location=args.keyversion_location,
keyRing=args.keyversion_keyring,
cryptoKey=args.keyversion_key,
keyversion=args.keyversion)
keyversion_regex = re.compile(r'^projects/[a-z0-9-]*/locations/[a-z0-9-]*'
r'/keyRings/[a-zA-Z0-9-_]*/cryptoKeys/'
r'[a-zA-Z0-9-_]*/cryptoKeyVersions/[1-9][0-9]*$')
if args.keyversion is not None and not keyversion_regex.search(args.keyversion):
parser.error('"{}" is not a valid fully qualified KMS key identifier.'.format(
args.keyversion))
arguments_list = []
for arg_name, value in args.__dict__.items():
arguments_list.append('[{name}]="{value}"'.format(
name=arg_name, value=value or ''))
print('\n'.join(arguments_list))
| binauthz-attestation/parse_arguments.py | 4,687 | Parses the arguments passed to the bash script and returns them back to the bash script.
Technique for printing custom error and help Source: https://stackoverflow.com/a/4042861/862857 By default, arguments with "--" are optional, so we have to make our own argument group so they are required If the user is using KMS, they should provide: Validate and parse attestor resource flags. Enforce mutual exclusion of key flag types. Validate and parse keyversion resource flags. | 476 | en | 0.744523 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import typing
from . import abstract
from . import base_con
from . import enums
from . import errors
from . import options
from .datatypes import datatypes
from .protocol import protocol
__all__ = ('Transaction', 'AsyncIOTransaction')
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
class BaseTransaction:
__slots__ = (
'_connection',
'_connection_inner',
'_connection_impl',
'_pool',
'_options',
'_state',
'_managed',
)
def __init__(self, owner, options: options.TransactionOptions):
if isinstance(owner, base_con.BaseConnection):
self._connection = owner
self._connection_inner = owner._inner
self._pool = None
else:
self._connection = None
self._connection_inner = None
self._pool = owner
self._connection_impl = None
self._options = options
self._state = TransactionState.NEW
self._managed = False
def is_active(self) -> bool:
return self._state is TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise errors.InterfaceError(
'cannot {}; the transaction is already committed'.format(
opname))
if self._state is TransactionState.ROLLEDBACK:
raise errors.InterfaceError(
'cannot {}; the transaction is already rolled back'.format(
opname))
if self._state is TransactionState.FAILED:
raise errors.InterfaceError(
'cannot {}; the transaction is in error state'.format(
opname))
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise errors.InterfaceError(
'cannot {}; the transaction is not yet started'.format(
opname))
self.__check_state_base(opname)
def _make_start_query(self):
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise errors.InterfaceError(
'cannot start; the transaction is already started')
return self._options.start_transaction_query()
def _make_commit_query(self):
self.__check_state('commit')
return 'COMMIT;'
def _make_rollback_query(self):
self.__check_state('rollback')
return 'ROLLBACK;'
def _borrow(self):
inner = self._connection_inner
if inner._borrowed_for:
raise base_con.borrow_error(inner._borrowed_for)
inner._borrowed_for = base_con.BorrowReason.TRANSACTION
def _maybe_return(self):
if self._connection_inner is not None:
self._connection_inner._borrowed_for = None
def __repr__(self):
attrs = []
attrs.append('state:{}'.format(self._state.name.lower()))
attrs.append(repr(self._options))
if self.__class__.__module__.startswith('edgedb.'):
mod = 'edgedb'
else:
mod = self.__class__.__module__
return '<{}.{} {} {:#x}>'.format(
mod, self.__class__.__name__, ' '.join(attrs), id(self))
class BaseAsyncIOTransaction(BaseTransaction, abstract.AsyncIOExecutor):
__slots__ = ()
async def _start(self, single_connect=False) -> None:
query = self._make_start_query()
if self._pool is not None:
self._connection = await self._pool._acquire()
self._connection_inner = self._connection._inner
inner = self._connection_inner
if not inner._impl or inner._impl.is_closed():
await self._connection._reconnect(single_attempt=single_connect)
self._connection_impl = self._connection._inner._impl
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
async def _commit(self):
try:
query = self._make_commit_query()
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
finally:
self._maybe_return()
if self._pool is not None:
await self._pool._release(self._connection)
async def _rollback(self):
try:
query = self._make_rollback_query()
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
finally:
self._maybe_return()
if self._pool is not None:
await self._pool._release(self._connection)
async def _ensure_transaction(self):
pass
async def query(self, query: str, *args, **kwargs) -> datatypes.Set:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.BINARY,
)
return result
async def query_single(self, query: str, *args, **kwargs) -> typing.Any:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.BINARY,
)
return result
async def query_json(self, query: str, *args, **kwargs) -> str:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.JSON,
)
return result
async def query_single_json(self, query: str, *args, **kwargs) -> str:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.JSON,
)
return result
async def execute(self, query: str) -> None:
"""Execute an EdgeQL command (or commands).
Example:
.. code-block:: pycon
>>> await con.execute('''
... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };
... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };
... ''')
"""
await self._ensure_transaction()
await self._connection_impl._protocol.simple_query(
query, enums.Capability.EXECUTE)
class AsyncIOTransaction(BaseAsyncIOTransaction):
__slots__ = ()
async def __aenter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
return self
async def __aexit__(self, extype, ex, tb):
try:
if extype is not None:
await self._rollback()
else:
await self._commit()
finally:
self._managed = False
async def start(self) -> None:
"""Enter the transaction or savepoint block."""
await self._start()
self._borrow()
async def commit(self) -> None:
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self._commit()
async def rollback(self) -> None:
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self._rollback()
class BaseBlockingIOTransaction(BaseTransaction, abstract.Executor):
__slots__ = ()
def _start(self, single_connect=False) -> None:
query = self._make_start_query()
# no pools supported for blocking con
inner = self._connection_inner
if not inner._impl or inner._impl.is_closed():
self._connection._reconnect(single_attempt=single_connect)
self._connection_inner = self._connection._inner
self._connection_impl = self._connection_inner._impl
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
def _commit(self):
try:
query = self._make_commit_query()
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
finally:
self._maybe_return()
def _rollback(self):
try:
query = self._make_rollback_query()
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
finally:
self._maybe_return()
def _ensure_transaction(self):
pass
def query(self, query: str, *args, **kwargs) -> datatypes.Set:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.BINARY,
)
def query_single(self, query: str, *args, **kwargs) -> typing.Any:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.BINARY,
)
def query_json(self, query: str, *args, **kwargs) -> str:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.JSON,
)
def query_single_json(self, query: str, *args, **kwargs) -> str:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.JSON,
)
def execute(self, query: str) -> None:
self._ensure_transaction()
self._connection_impl._protocol.sync_simple_query(
query, enums.Capability.EXECUTE)
class Transaction(BaseBlockingIOTransaction):
def __enter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in a `with` block')
self._managed = True
self.start()
return self
def __exit__(self, extype, ex, tb):
try:
if extype is not None:
self._rollback()
else:
self._commit()
finally:
self._managed = False
def start(self) -> None:
"""Enter the transaction or savepoint block."""
self._start()
self._borrow()
def commit(self) -> None:
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually commit from within a `with` block')
self._commit()
def rollback(self) -> None:
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually rollback from within a `with` block')
self._rollback()
| edgedb/transaction.py | 14,321 | Exit the transaction or savepoint block and commit changes.
Exit the transaction or savepoint block and rollback changes.
Enter the transaction or savepoint block.
This source file is part of the EdgeDB open source project. Copyright 2016-present MagicStack Inc. and the EdgeDB authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. no pools supported for blocking con | 846 | en | 0.848688 |
import os
import datetime
import logging
import sqlite3
import pytest
from utils import setup_mdb_dir, all_book_info, load_db_from_sql_file, TESTS_DIR
from manga_db.manga_db import MangaDB
from manga_db.manga import Book
from manga_db.ext_info import ExternalInfo
from manga_db.constants import LANG_IDS
@pytest.mark.parametrize("title_eng, title_foreign, expected", [
("English", "Foreign", "English / Foreign"),
("English", None, "English"),
(None, "Foreign", "Foreign")])
def test_build_title(title_eng, title_foreign, expected):
assert Book.build_title(title_eng, title_foreign) == expected
def test_fetch_extinfo(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=16)
assert b.ext_infos == []
db_con = memdb
ei_rows_man = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows_man[0])
ei2 = ExternalInfo(mdb, b, **ei_rows_man[1])
assert b._fetch_external_infos() == [ei1, ei2]
def test_fetch_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=14)
tags = ["Ahegao", "Anal", "Collar", "Large Breasts", "Maid", "Mind Break",
"Mind Control", "Nakadashi", "Office Lady", "Pantyhose", "Rape", "Stockings",
"X-ray"]
assert sorted(b._fetch_associated_column("tag")) == sorted(tags)
assert b._fetch_associated_column("character") == []
assert b._fetch_associated_column("artist") == ["Fan no Hitori"]
def test_upd_assoc_col(monkeypatch, setup_mdb_dir):
# update_assoc_columns/get_assoc_cols
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# pass last_change kwarg so it doesnt get auto set and counts as change
b = Book(mdb, in_db=False, id=12, last_change=datetime.date.today())
ei_row = db_con.execute("SELECT * FROM ExternalInfo WHERE id = 12").fetchone()
ei = ExternalInfo(mdb, b, **ei_row)
tags = ("Anal;Femdom;Large Breasts;Nakadashi;Straight Shota;Big Ass;Short Hair;Hat"
";Royalty;Dark Skin;Huge Penis;Big Areola;Defloration;Double Penetration;"
"Elder Sister;Tall Girl".split(";"))
artists = ["Kaneda Asou"]
category = ["Doujinshi"]
groups = ["Dokumushi Shokeitai"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == groups
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == []
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == []
assert assoc_cols["ext_infos"] == [ei]
# upd
# changes
b.tag = ["delchange1", "delchange"]
b.category = ["testcat"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == groups
assert b.list == lists
assert b.character == []
assert b.collection == []
assert b.parody == []
assert b.ext_infos == [ei]
b = Book(mdb, in_db=False, id=16, last_change=datetime.date.today())
ei_rows = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows[0])
ei2 = ExternalInfo(mdb, b, **ei_rows[1])
tags = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;Layer Cake;Selfcest".split(";"))
artists = ["bariun"]
category = ["Doujinshi"]
characters = ["Akira Kurusu", "Futaba Sakura"]
parodies = ["Persona 5 / ペルソナ5"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == []
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == characters
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == parodies
assert assoc_cols["ext_infos"] == [ei1, ei2]
# upd
# changes
b.groups = ["delchange1", "delchange"]
b.artist = ["tartist"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == []
assert b.list == lists
assert b.character == characters
assert b.collection == []
assert b.parody == parodies
assert b.ext_infos == [ei1, ei2]
def test_diff(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
# not testing change_str
b1_data = dict(
id=None,
title_eng="Same",
title_foreign="Different1",
language_id=1,
pages=25,
status_id=1,
my_rating=4.3,
category=["Manga"],
collection=["Diff collection1"],
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=0
)
b1 = Book(mdb, **b1_data)
b2_data = dict(
id=None,
title_eng="Same",
title_foreign="Different2",
language_id=1,
pages=27,
status_id=1,
my_rating=None,
category=["Manga"],
collection=["Diff collection2"],
groups=["Artistgroup"],
artist=["Diff", "Diff2", "Diff3"],
parody=["Blabla"],
character=["Char1", "Char5", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 4, 3),
note=None,
favorite=1
)
b2 = Book(mdb, **b2_data)
changes, change_str = b1.diff(b2)
changes_expected = dict(
title_foreign="Different2",
pages=27,
my_rating=None,
# added removed
collection=({"Diff collection2"}, {"Diff collection1"}),
artist=({"Diff", "Diff3"}, {"Diff1"}),
character=({"Char5"}, {"Char2"}),
last_change=datetime.date(2018, 4, 3),
favorite=1
)
assert changes == changes_expected
def test_add_rem_assoc(monkeypatch, setup_mdb_dir):
# _add/_remove assoc col
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = mdb.get_book(5)
tag_before = b.tag.copy()
tag_change = ["Test1", "Test2", "Blabla"]
# _add_associated_column_values doesnt commit
with mdb.db_con:
b._add_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
with mdb.db_con:
b._remove_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";") == tag_before
def test_static_db_methods(monkeypatch, setup_mdb_dir):
# static db methods
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
tag_before = "Large Breasts;Nakadashi;Blowjob;Threesome;Bikini;Group Sex;Swimsuit".split(";")
tag_change = ["Test1", "Test2", "Blabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";") == tag_before
# load book so its in id_map and make sure add_remove_assoc also sets attr on book
b = mdb.get_book(16)
tag_before = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;"
"Layer Cake;Selfcest".split(";"))
tag_change = ["Test3", "Test4", "Blablabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
# also set attr on book
assert b.tag[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";") == tag_before
# also set attr on book
assert b.tag == tag_before
Book.set_favorite_id(mdb, 2, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 2").fetchone()
assert 1 == fav[0]
b = mdb.get_book(7)
Book.set_favorite_id(mdb, 7, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 7").fetchone()
assert 1 == fav[0]
# also set on book
assert b.favorite == 1
Book.rate_book_id(mdb, 3, 3.5)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 3").fetchone()
assert 3.5 == rat[0]
b = mdb.get_book(8)
Book.rate_book_id(mdb, 8, 4.25)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 8").fetchone()
assert 4.25 == rat[0]
# also set on book
assert b.my_rating == 4.25
def test_remove_book(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
import shutil
# copy cover
os.makedirs(os.path.join(tmpdir, "thumbs"))
cover_path = os.path.join(tmpdir, "thumbs", "16")
shutil.copyfile(os.path.join(tmpdir, os.pardir, "book_test_files", "16"), cover_path)
db_con = memdb
# book removed and all ext infos
b = mdb.get_book(16)
b.remove()
assert b._in_db is False
# deleted from id map
with pytest.raises(KeyError):
mdb.id_map[b.key]
b_row = db_con.execute("SELECT id FROM Books WHERE id = 16").fetchall()
assert not b_row
ei_rows = db_con.execute("SELECT id FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
assert not ei_rows
# cover deleted
assert not os.path.exists(cover_path)
def test_remove_extinfo(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = mdb.get_book(16)
caplog.clear()
assert b.remove_ext_info(99) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.ERROR, "No external info with id 99 found!")
]
assert b.remove_ext_info(18) == "https://www.tsumino.com/entry/43454"
assert len(b.ext_infos) == 1
assert b.ext_infos[0].id == 16
assert b.remove_ext_info(16)
assert not b.ext_infos
caplog.clear()
assert b.remove_ext_info(4939) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.WARNING, "No external infos on book with id 16 or not"
" fetched from DB yet!")
]
def test_save_book(monkeypatch, setup_mdb_dir, caplog):
# save: _add _update
# incl! _update_assoc_cols -> "
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# _add
ei_data = dict(
id=None,
book_id=None,
url="http://test1.com",
id_onpage='1111',
imported_from=1,
upload_date=datetime.date(2018, 4, 13),
uploader="Uploader",
censor_id=1,
rating=4.19,
ratings=165,
favorites=300,
downloaded=None,
last_update=None,
outdated=None,
)
b1_data = dict(
id=None,
title_eng="Add1",
title_foreign="Foreign1",
language_id=1,
pages=25,
chapter_status="Vol. 2 Ch. 14",
read_status=13,
status_id=1,
my_rating=None,
category=["Manga"],
collection=None,
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=None,
cover_timestamp=None,
nsfw=1
)
b1 = Book(mdb, **b1_data)
# since we later check that cover_timestamp gets saved as 0.0 if None
b1_data['cover_timestamp'] = 0.0
ei1 = ExternalInfo(mdb, b1, **ei_data)
ei2 = ExternalInfo(mdb, b1, **ei_data)
# will outdate extinfo 8
ei2.id_onpage = '43506'
b1.ext_infos = [ei1, ei2]
assert b1._in_db is False
bid, outdated = b1.save()
assert bid == 18
assert b1.id == 18
# in_db + id_map, committed reset
assert b1._in_db is True
assert mdb.id_map[b1.key] is b1
assert not b1._committed_state
book_info_db = all_book_info(db_con, 18, include_id=True)
assert len(book_info_db) == 2
# fav set correctly
assert book_info_db[0]["favorite"] == 0
assert b1.favorite == 0
compare_cols_row_book_data(b1, book_info_db[0], b1_data, special={"favorite": 0})
# outdated, list of ext info ids that outdated others
assert outdated == [20]
# extinfo saved
eis = db_con.execute("SELECT id, book_id, id_onpage FROM ExternalInfo "
"WHERE id > 18").fetchall()
assert len(eis) == 2
assert eis[0]["book_id"] == 18
assert eis[1]["book_id"] == 18
assert eis[0]["id_onpage"] == '1111'
assert eis[1]["id_onpage"] == '43506'
# add book with new lang
b2 = Book(mdb, title_eng="Test2", favorite=1, pages=11, status_id=1, nsfw=0)
b2.language = "Krababbl"
bid, _ = b2.save()
assert bid == 19
assert b2.id == 19
# /2 since we have double indirection id->name name->id
expected_lang_id = len(LANG_IDS) / 2 + 1
assert b2.language_id == expected_lang_id
lang = db_con.execute("SELECT id FROM Languages WHERE name = 'Krababbl'").fetchall()
assert lang
assert lang[0][0] == expected_lang_id
brow = db_con.execute("SELECT title_eng, favorite FROM Books WHERE id = 19").fetchone()
assert brow[0] == "Test2"
assert brow["favorite"] == 1
assert b2.favorite == 1
assert b2._in_db is True
assert not b2._committed_state
assert mdb.id_map[b2.key] is b2
# _update
bu1 = Book(mdb, id=None, title_eng="Kangofu-san ni Kintama Sakusei Saremashita",
title_foreign="看護婦さんにキンタマ搾精されました", in_db=False)
bu1.in_db = True
# test not updating when block_update kwarg is true
caplog.clear()
assert bu1.save(block_update=True) == (None, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG,
f"Book was found in DB(id 15) but saving was blocked due to "
"block_update option!")
]
bu2 = mdb.get_book(11)
# dont do anything if no changes
caplog.clear()
assert not bu2._committed_state
assert bu2.save() == (11, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG, "No changes to save for book with id 11")
]
assert not bu2._committed_state
before = bu2.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu2, col) if getattr(bu2, col) else None
for col in bu2.ASSOCIATED_COLUMNS})
bu2.language = "adlalad"
change = {
"title_eng": "Altered",
"language_id": 3,
"my_rating": 4.75,
"favorite": 1,
# removed and added
"tag": ("Large Breasts;Test33;Nakadashi;Ahegao;Gender Bender;Dark Skin;Elf;Body Swap"
";Bondage;Filming;Test Tag".split(";")),
# added
"artist": ["Taniguchi-san", "Newartist"],
# same
"category": ["Manga"],
# none added
"character": ["Char111", "Char222"]
}
bu2.update_from_dict(change)
before.update(change)
bid, _ = bu2.save()
book_info_db = all_book_info(db_con, 11, include_id=True)
compare_cols_row_book_data(bu2, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu2._committed_state
# last_change
assert bu2.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
bu3 = mdb.get_book(7)
assert not bu3._committed_state
before = bu3.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu3, col) if getattr(bu3, col) else None
for col in bu3.ASSOCIATED_COLUMNS})
change = {
"title_foreign": "ForeignAltered",
"pages": 13,
"note": "Note blabla",
# set None
"tag": None,
# set None
"artist": None,
# changed
"category": ["Manga"],
# none added
"collection": ["Col1", "Col2"],
"groups": ["Grp1", "Grp2", "Senpenbankashiki"]
}
bu3.update_from_dict(change)
before.update(change)
bid, _ = bu3.save()
book_info_db = all_book_info(db_con, 7, include_id=True)
compare_cols_row_book_data(bu3, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu3._committed_state
# last_change
assert bu3.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
assoc_concat = {
"tag": "tags", "artist": "artists", "category": "categories", "character": "characters",
"collection": "collections", "groups": "groups", "list": "lists", "parody": "parodies"
}
def compare_cols_row_book_data(book, row, data, special=None):
if special is None:
special = {}
for col in Book.COLUMNS:
row_val = row[col]
data_val = data[col]
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert getattr(book, col) == special[col]
elif data_val is None:
# use is comparison for None
assert row_val is None
assert getattr(book, col) is None
else:
assert row_val == data_val
assert getattr(book, col) == data_val
for col in Book.ASSOCIATED_COLUMNS:
if col == "ext_infos":
continue
# look up plural of col to get name of concat assoc col
col_assoc_concat = assoc_concat[col]
row_val = row[col_assoc_concat]
if row_val is not None:
# row_val is concatted values
# need sorted to compare (or use set)
row_val = sorted(row_val.split(";")) if ";" in row_val else [row_val]
# need sorted to compare (or use set)
data_val = sorted(data[col]) if data[col] else None
book_val = getattr(book, col)
book_val = sorted(book_val) if book_val else book_val
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert book_val == special[col]
elif data_val is None:
# assoc col doesnt return None only empty trackable
assert row_val is None
assert book_val == []
else:
assert row_val == data_val
assert book_val == data_val
| tests/test_book.py | 23,697 | update_assoc_columns/get_assoc_cols pass last_change kwarg so it doesnt get auto set and counts as change upd changes changes should be reset upd changes changes should be reset not testing change_str added removed _add/_remove assoc col _add_associated_column_values doesnt commit static db methods before is last arg so staticmethod can set attr on book if its loaded (in id_map) load book so its in id_map and make sure add_remove_assoc also sets attr on book before is last arg so staticmethod can set attr on book if its loaded (in id_map) also set attr on book also set attr on book also set on book also set on book copy cover book removed and all ext infos deleted from id map cover deleted save: _add _update incl! _update_assoc_cols -> " _add since we later check that cover_timestamp gets saved as 0.0 if None will outdate extinfo 8 in_db + id_map, committed reset fav set correctly outdated, list of ext info ids that outdated others extinfo saved add book with new lang /2 since we have double indirection id->name name->id _update test not updating when block_update kwarg is true dont do anything if no changes empty assoc list to None removed and added added same none added committed reset last_change empty assoc list to None set None set None changed none added committed reset last_change specific values that are incorrect in data use is comparison for None look up plural of col to get name of concat assoc col row_val is concatted values need sorted to compare (or use set) need sorted to compare (or use set) specific values that are incorrect in data assoc col doesnt return None only empty trackable | 1,637 | en | 0.836662 |
from datetime import timedelta
import json
from os import listdir
from os.path import isfile, join
import pr0gramm
import logging
__author__ = "Peter Wolf"
__mail__ = "pwolf2310@gmail.com"
__date__ = "2016-12-26"
LOG = logging.getLogger(__name__)
class DataSources:
IMAGE, THUMBNAIL, FULL_SIZE = range(3)
class DataCollector:
""" The DataCollector retrieves relevant data from
pr0gramm and saves it locally.
"""
def __init__(self, api, last_id=None):
self.api = api
self.last_id = last_id
self.age_threshold = timedelta(hours=5)
self.min_num_of_tags = 5
self.search_forwards = True
self.media_directory = "/tmp"
self.data_source = DataSources.IMAGE
self.annotation_file = "/tmp/annotation.txt"
self.json_dir = "/tmp"
self.download_media = True
self.save_json = False
self.use_local_storage = False
self.last_batch_size = None
def setAgeThreshold(self, days=0, hours=5, minutes=0, seconds=0):
self.age_threshold = timedelta(
days=days, hours=hours, minutes=minutes, seconds=seconds)
def setMinimumNumberOfTags(self, threshold):
self.min_num_of_tags = threshold
def setLastId(self, last_id):
self.last_id = last_id
def getLastId(self):
return self.last_id
def useBackwardsSearch(self):
self.search_forwards = False
def useForwardsSearch(self):
self.search_forwards = True
def setMediaDirectory(self, directory):
self.media_directory = directory
def setDataSource(self, source):
self.data_source = source
def setAnnotationFile(self, annotation_file):
self.annotation_file = annotation_file
def setJsonDir(self, directory):
self.json_dir = directory
def setDownloadMedia(self, download_media):
self.download_media = download_media
def setSaveJSON(self, save_json):
self.save_json = save_json
def setUseLocalStorage(self, use_local_storage):
self.use_local_storage = use_local_storage
def getSizeOfLastBatch(self):
return self.last_batch_size
def download(self, item):
if self.data_source == DataSources.IMAGE:
return self.api.downloadMedia(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.THUMBNAIL:
return self.api.downloadThumbnail(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.FULL_SIZE:
return self.api.downloadFullsize(
item, save_dir=self.media_directory, file_name=item.id)
else:
print "No valid data source chosen:", str(self.data_source)
return None
def writeAnnotation(self, item, media_path):
# Read the current annotation file
content = []
if isfile(self.annotation_file):
with open(self.annotation_file, "r") as f:
content = f.readlines()
# write every item as a line with the following structure:
# ID;IMAGE_PATH;AMOUNT_OF_TAGS;...TAG_TEXT;TAG_CONFIDENCE;...
new_line = str(item.id) + ";"
new_line += str(media_path) + ";"
new_line += str(len(item.tags)) + ";"
new_line += ";".join([str(tag.getText()) + ";" +
str(tag.getConfidence()) for tag in item.tags])
# Check if the item already has an entry in the annotation file
# and replace it.
contained = False
for i in range(len(content)):
if content[i].strip().startswith(str(item.id)):
content[i] = new_line
contained = True
break
# If no entry already exists, add a new line for the item
if not contained:
content.append(new_line)
# Write the new content to the file.
with open(self.annotation_file, "w") as f:
for line in content:
f.write(line.strip() + "\n")
def getItemsFromAPI(self):
if self.search_forwards:
return self.api.getItemsNewer(self.last_id)
else:
return self.api.getItemsOlder(self.last_id)
def getItemsFromLocalStorage(self):
json_files = [join(self.json_dir, f) for f in listdir(self.json_dir)
if isfile(join(self.json_dir, f)) and f.endswith(".json")]
data = []
for json_file in json_files:
with open(json_file, "r") as f:
json_item = json.load(f)
item = pr0gramm.Item.Item.parseFromJSON(json_item)
if not self.last_id \
or (self.search_forwards and item.getSortId() > self.last_id) \
or (not self.search_forwards and item.getSortId() < self.last_id):
data.append(item)
data.sort(reverse=True)
return data
def collectDataBatch(self, data=[]):
# retrieve data if none has been given
if not data:
if self.use_local_storage:
data = self.getItemsFromLocalStorage()
else:
data = self.getItemsFromAPI()
if not data:
return
# filter data based on age and tags
valid_data = []
for item in data:
if item.getAge() >= self.age_threshold and len(item.tags) > 0:
valid_data.append(item)
# save size of collected data batch
self.last_batch_size = len(valid_data)
if not valid_data:
return
# save id of last item to fit age criteria in search direction
if self.search_forwards:
self.last_id = valid_data[0].getSortId()
else:
self.last_id = valid_data[-1].getSortId()
for item in valid_data:
if self.download:
# download media
target_path = self.download(item)
if target_path:
# write id(s), link to media and tags to file
self.writeAnnotation(item, target_path)
if self.save_json:
with open(self.json_dir + "/" + str(item.id) + ".json", "w") as f:
json.dump(item.asDict(), f)
return self.last_id
| src/data_collection/data_collector.py | 6,376 | Read the current annotation file write every item as a line with the following structure: ID;IMAGE_PATH;AMOUNT_OF_TAGS;...TAG_TEXT;TAG_CONFIDENCE;... Check if the item already has an entry in the annotation file and replace it. If no entry already exists, add a new line for the item Write the new content to the file. retrieve data if none has been given filter data based on age and tags save size of collected data batch save id of last item to fit age criteria in search direction download media write id(s), link to media and tags to file | 543 | en | 0.857904 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.qtumconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.qtum import convert_btc_address_to_qtum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import CBlockHeader
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Bitcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = convert_btc_address_to_qtum("2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ")
self.nodes[0].generate(1)
self.sync_all()
for i in range(0, COINBASE_MATURITY, 100):
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("<i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, COINBASE_MATURITY+2) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), 181)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:181], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| test/functional/interface_rest.py | 14,958 | Test the REST API.
!/usr/bin/env python3 Copyright (c) 2014-2019 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Random address so node1's balance doesn't increase Check hex format response get the vin to later check for utxo (should be spent by then) get n of 0.1 outpoint Check chainTip response Make sure there is one utxo Check chainTip response Make sure there is no utxo in the response because this outpoint has been spent Check bitmap check if getutxo's chaintip during calculation was fine chain height must be 102 Create a transaction, check that it's found with /checkmempool, but not found without. Then confirm the transaction and check that it's found with or without /checkmempool. do a tx and don't sync get the spent output to later check for utxo (should be spent by then) get n of 0.1 outpoint Do some invalid requests Test limits generate block to not affect upcoming tests Check result if block does not exists Check result if block is not in the active chain Check binary format Compare with block header Check block hex format Compare with hex block header Check json format Check hex/bin format Check invalid blockhashbyheight requests Compare with json block header ensure that there is one header in the json response request/response hash should be the same Compare with normal RPC block response See if we can get 5 headers in one response now we should have 5 header objects Make 3 tx and mine them on node 1 Check that there are exactly 3 transactions in the TX memory pool before generating the block the size of the memory pool should be greater than 3x ~100 bytes Check that there are our submitted transactions in the TX memory pool Now mine the transactions Check if the 3 tx show up in the new block Check the same but without tx details | 1,887 | en | 0.860252 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
MAPPING = {
"dynamic": False,
"properties": {
"classification_type": {"type": "keyword"},
"date": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
"global_metrics": {
"dynamic": False,
"properties": {
"field": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {
"type": "text",
"fields": {
# subfield
"raw": {"type": "keyword"}
},
},
"type": {"type": "keyword"},
},
},
"dataset": {
"dynamic": False,
"properties": {
"nb_classes": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"macro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
"micro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
},
}
},
},
},
},
"id": {"type": "keyword"},
"language": {"type": "keyword"},
"local_metrics": {
"type": "nested",
"dynamic": False,
"properties": {
"dataset": {
"dynamic": False,
"properties": {
"support_test": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"field_class": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {"type": "keyword"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
}
},
},
},
},
"workflow": {"type": "keyword"},
},
}
EXPECTED_MAPPING_REPR = """_
├── classification_type Keyword
├── date Date
├── global_metrics {Object}
│ ├── dataset {Object}
│ │ ├── nb_classes Integer
│ │ └── support_train Integer
│ ├── field {Object}
│ │ ├── id Integer
│ │ ├── name Text
│ │ │ └── raw ~ Keyword
│ │ └── type Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── macro {Object}
│ │ ├── f1_score Float
│ │ ├── precision Float
│ │ └── recall Float
│ └── micro {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
├── id Keyword
├── language Keyword
├── local_metrics [Nested]
│ ├── dataset {Object}
│ │ ├── support_test Integer
│ │ └── support_train Integer
│ ├── field_class {Object}
│ │ ├── id Integer
│ │ └── name Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
└── workflow Keyword
"""
EXPECTED_MAPPING_TREE_REPR = """<Mapping>\n%s""" % EXPECTED_MAPPING_REPR
EXPECTED_CLIENT_BOUND_MAPPING_REPR = """<IMapping>\n%s""" % EXPECTED_MAPPING_REPR
| tests/testing_samples/mapping_example.py | 6,699 | !/usr/bin/env python -*- coding: utf-8 -*- subfield | 51 | en | 0.3221 |
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from Logger.app_logger import App_logger
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
class Training:
def __init__(self,train_path,test_path,val_path):
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.file_object = open("Training_Logs/ModelTrainingLog.txt", 'a+')
self.log_object = App_logger()
def train(self):
self.log_object.log(self.file_object,"Entered in to train method in Training class.Training started")
try:
x_train = []
for folder in os.listdir(self.train_path):
sub_path = self.train_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_train.append(img_arr)
x_test = []
for folder in os.listdir(self.test_path):
sub_path = self.test_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_test.append(img_arr)
x_val = []
for folder in os.listdir(self.val_path):
sub_path = self.val_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_val.append(img_arr)
self.log_object.log(self.file_object, "Entered in to train method in Training class.train,test,val split successfull")
train_x = np.array(x_train) / 255.0
test_x = np.array(x_test) / 255.0
val_x = np.array(x_val) / 255.0
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(self.train_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
test_set = test_datagen.flow_from_directory(self.test_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
val_set = val_datagen.flow_from_directory(self.val_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
train_y = training_set.classes
test_y = test_set.classes
val_y = val_set.classes
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)
self.log_object.log(self.file_object, "Entered in to train method in Training class. Model successfully initialized")
for layer in vgg.layers:
layer.trainable = False
x = Flatten() (vgg.output)
prediction = Dense(5 ,activation='softmax') (x)
model = Model(inputs=vgg.input,outputs = prediction)
model.summary()
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer='adam',metrics=['accuracy'])
self.log_object.log(self.file_object, "Entered in to train method in Training class.Model compile successfull")
file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'
self.log_object.log(self.file_object,"check point directory created")
check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')
start = datetime.now()
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Training start time {start}")
history = model.fit(train_x,train_y,
validation_data= (val_x,val_y),
epochs=20,
callbacks = [check_point],
batch_size=64, shuffle=True)
duration = datetime.now() - start
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Total time taken is {duration}")
model.save('mech_tools_model.h5')
self.log_object.log(self.file_object, f"Entered in to train method in Training class.model saved successfully")
# accuracies
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('vgg-acc-rps-1.png')
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('vgg-loss-rps-1.png')
self.log_object.log(self.file_object, "Entered in to train method in Training class.model evaluation started")
model.evaluate(test_x, test_y, batch_size=32)
# predict
y_pred = model.predict(test_x)
y_pred = np.argmax(y_pred, axis=1)
self.log_object.log(self.file_object, f"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}")
self.log_object.log(self.file_object, f"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}")
except Exception as e:
# logging the unsuccessful Training
self.log_object.log(self.file_object, 'Unsuccessful End of Training')
self.log_object.log(self.file_object,f"exception occured.exception is {e}")
raise Exception
self.file_object.close()
if __name__ == "__main__":
train_path = "final_dataset/train"
test_path = "final_dataset/test"
val_path = "final_dataset/val"
train_model = Training(train_path, test_path, val_path)
train_model.train() | training.py | 7,902 | accuracies loss predict logging the unsuccessful Training | 57 | en | 0.943697 |
#
# example from CHiLL manual page 14
#
# permute 3 loops
#
from chill import *
source('permute123456.c')
destination('permute1modified.c')
procedure('mm')
loop(0)
known('ambn > 0')
known('an > 0')
known('bm > 0')
permute([3,1,2])
| chill/examples/chill/testcases/permute1.script.py | 247 | example from CHiLL manual page 14 permute 3 loops | 50 | en | 0.552572 |
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import math
import numpy
import pixiedust
myLogger = pixiedust.getLogger(__name__)
def append(displayObject, arr, option):
if option is not None and displayObject.acceptOption(option["name"]):
arr.append(option)
def chartSize():
return {
'name': 'chartsize',
'description': 'Chart Size',
'metadata': {
'type': 'slider',
'max': 100,
'min': 50,
'default': 100
}
}
def clusterBy(displayObject):
return {
'name': 'clusterby',
'description': 'Cluster By',
'refresh': True,
'metadata': {
'type': "dropdown",
'values': ["None"] + sorted([f for f in displayObject.getFieldNames() if f not in displayObject.getKeyFields() and f not in displayObject.getValueFields()]),
'default': ""
},
'validate': lambda option:\
(option in displayObject.getFieldNames() and option not in displayObject.getKeyFields() and option not in displayObject.getValueFields(),\
"Cluster By value is already used in keys or values for this chart")
}
def timeSeries(displayObject):
if len(displayObject.getKeyFields()) == 1:
pdf = displayObject.getWorkingPandasDataFrame()
field = displayObject.getKeyFields()[0]
dtype = pdf[field].dtype.type if field in pdf else None
existingValue = displayObject.options.get("timeseries", 'false')
if dtype is not None and (dtype is not numpy.datetime64 or existingValue == 'true'):
return {
'name': 'timeseries',
'description': 'Time Series',
'metadata':{
'type': 'checkbox',
'default': 'false'
}
}
def barChart(displayObject):
options = []
options.append(chartSize())
options.append(clusterBy(displayObject))
append(displayObject, options, timeSeries(displayObject))
if not hasattr(displayObject, 'no_orientation') or displayObject.no_orientation is not True:
options.append({
'name': 'orientation',
'description': 'Orientation',
'metadata': {
'type': 'dropdown',
'values': ['vertical', 'horizontal'],
'default': "vertical"
}
})
if displayObject.options.get("clusterby") != None or len(displayObject.getValueFields()) > 1:
options.append({
'name': 'charttype',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['grouped', 'stacked', 'subplots'],
'default': "grouped"
}
})
options.append({
'name': 'legend',
'description': 'Show legend',
'metadata': {
'type': 'checkbox',
'default': "true"
}
})
options.append({
'name': 'sortby',
'description': 'Sort By',
'metadata': {
'type': 'dropdown',
'values': ['Keys ASC', 'Keys DESC', 'Values ASC', 'Values DESC'],
'default': 'Keys ASC'
}
})
return options
def lineChart(displayObject):
options = []
options.append(chartSize())
options.append(clusterBy(displayObject))
append(displayObject, options, timeSeries(displayObject))
if displayObject.options.get("clusterby") != None or len(displayObject.getValueFields()) > 1:
options.append({
'name': 'lineChartType',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['grouped', 'subplots'],
'default': "grouped"
}
})
options.append({
'name': 'legend',
'description': 'Show legend',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
options.append({
'name': 'logx',
'description': 'log scale on x',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
options.append({
'name': 'logy',
'description': 'log scale on y',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
return options
def histogram(displayObject):
options = []
options.append(chartSize())
if len(displayObject.getValueFields()) > 1:
append(displayObject, options, {
'name': 'histoChartType',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['stacked', 'subplots'],
'default': "stacked"
}
})
count = len(displayObject.getWorkingPandasDataFrame().index)
default = math.sqrt(count)
vals = len(displayObject.getWorkingPandasDataFrame().groupby(displayObject.getValueFields()[0]).size())
options.append({
'name': 'binsize',
'description': 'Bin Count',
'metadata': {
'type': 'slider',
'max': int(max(vals, default) + 10),
'min': int(max((min(vals, default) - 10), 2)),
'default': int(default)
}
})
return options
def pieChart(displayObject):
options = []
options.append(chartSize())
return options
def scatterPlot(displayObject):
options = []
options.append(chartSize())
return options
commonOptions = {}
for f in [barChart,lineChart,histogram,pieChart,scatterPlot]:
commonOptions.update({f.__name__:f})
| pixiedust/display/chart/renderers/commonOptions.py | 6,347 | ------------------------------------------------------------------------------- Copyright IBM Corp. 2017 Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------------- | 705 | en | 0.735537 |
import asyncio
import io
from PIL import Image
from PIL import ImageDraw
from discord import Colour
import datetime
import urllib
import urllib.request
import aiohttp
import re
from datetime import datetime, date, timedelta
from calendar import timegm
import time
from utils.database import userDatabase, tibiaDatabase
from config import highscores_categories, network_retry_delay
from utils.messages import EMOJI
from .general import log, global_online_list, get_local_timezone
# Constants
ERROR_NETWORK = 0
ERROR_DOESNTEXIST = 1
ERROR_NOTINDATABASE = 2
# Tibia.com URLs:
url_character = "https://secure.tibia.com/community/?subtopic=characters&name="
url_guild = "https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName="
url_guild_online = "https://secure.tibia.com/community/?subtopic=guilds&page=view&onlyshowonline=1&"
url_house = "https://secure.tibia.com/community/?subtopic=houses&page=view&houseid={id}&world={world}"
url_highscores = "https://secure.tibia.com/community/?subtopic=highscores&world={0}&list={1}&profession={2}¤tpage={3}"
KNIGHT = ["knight", "elite knight", "ek", "k", "kina", "eliteknight","elite"]
PALADIN = ["paladin", "royal paladin", "rp", "p", "pally", "royalpaladin", "royalpally"]
DRUID = ["druid", "elder druid", "ed", "d", "elderdruid", "elder"]
SORCERER = ["sorcerer", "master sorcerer", "ms", "s", "sorc", "mastersorcerer", "master"]
MAGE = DRUID + SORCERER + ["mage"]
NO_VOCATION = ["no vocation", "no voc", "novoc", "nv", "n v", "none", "no", "n", "noob", "noobie", "rook", "rookie"]
highscore_format = {"achievements": "{0} __achievement points__ are **{1}**, on rank **{2}**",
"axe": "{0} __axe fighting__ level is **{1}**, on rank **{2}**",
"club": "{0} __club fighting__ level is **{1}**, on rank **{2}**",
"distance": "{0} __distance fighting__ level is **{1}**, on rank **{2}**",
"fishing": "{0} __fishing__ level is **{1}**, on rank **{2}**",
"fist": "{0} __fist fighting__ level is **{1}**, on rank **{2}**",
"loyalty": "{0} __loyalty points__ are **{1}**, on rank **{2}**",
"magic": "{0} __magic level__ is **{1}**, on rank **{2}**",
"magic_ek": "{0} __magic level__ is **{1}**, on rank **{2}** (knights)",
"magic_rp": "{0} __magic level__ is **{1}**, on rank **{2}** (paladins)",
"shielding": "{0} __shielding__ level is **{1}**, on rank **{2}**",
"sword": "{0} __sword fighting__ level is **{1}**, on rank **{2}**"}
tibia_worlds = ["Amera", "Antica", "Astera", "Aurera", "Aurora", "Bellona", "Belobra", "Beneva", "Calmera", "Calva",
"Calvera", "Candia", "Celesta", "Chrona", "Danera", "Dolera", "Efidia", "Eldera", "Ferobra", "Fidera",
"Fortera", "Garnera", "Guardia", "Harmonia", "Honera", "Hydera", "Inferna", "Iona", "Irmada", "Julera",
"Justera", "Kenora", "Kronera", "Laudera", "Luminera", "Magera", "Menera", "Morta", "Mortera",
"Neptera", "Nerana", "Nika", "Olympa", "Osera", "Pacera", "Premia", "Pythera", "Guilia", "Refugia",
"Rowana", "Secura", "Serdebra", "Shivera", "Silvera", "Solera", "Tavara", "Thera", "Umera", "Unitera",
"Veludera", "Verlana", "Xantera", "Xylana", "Yanara", "Zanera", "Zeluna", "Honbra", "Noctera", "Vita",
"Duna", "Relembra", "Helera", "Tortura", "Macabra"]
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1'))
@asyncio.coroutine
def get_highscores(server,category,pagenum, profession=0, tries=5):
"""Gets a specific page of the highscores
Each list element is a dictionary with the following keys: rank, name, value.
May return ERROR_NETWORK"""
url = url_highscores.format(server, category, profession, pagenum)
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
# Trimming content to reduce load
try:
start_index = content.index('<td style="width: 20%;" >Vocation</td>')
end_index = content.index('<div style="float: left;"><b>» Pages:')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
if category == "loyalty":
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
else:
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
return scoreList
@asyncio.coroutine
def get_server_online(server, tries=5):
"""Returns a list of all the online players in current server.
Each list element is a dictionary with the following keys: name, level"""
server = server.capitalize()
url = 'https://secure.tibia.com/community/?subtopic=worlds&world=' + server
onlineList = []
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
while not content and tries > 0:
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
tries -= 1
# Trimming content to reduce load
try:
start_index = content.index('<div class="BoxContent"')
end_index = content.index('<div id="ThemeboxesColumn" >')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete due to a network error
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
regex_members = r'<a href="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
# Check if list is empty
if m:
# Building dictionary list from online players
for (name, level) in m:
name = urllib.parse.unquote_plus(name)
onlineList.append({'name': name, 'level': int(level)})
return onlineList
@asyncio.coroutine
def get_guild_online(guildname, titlecase=True, tries=5):
"""Returns a guild's world and online member list in a dictionary.
The dictionary contains the following keys: name, logo_url, world and members.
The key members contains a list where each element is a dictionary with the following keys:
rank, name, title, vocation, level, joined.
Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
gstats_url = 'http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)
guild = {}
# Fix casing using guildstats.eu if needed
# Sorry guildstats.eu :D
if not titlecase:
# Fetch website
try:
page = yield from aiohttp.get(gstats_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Make sure we got a healthy fetch
try:
content.index('<div class="footer">')
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
if "<div>Sorry!" in content:
return ERROR_DOESNTEXIST
# Failsafe in case guildstats.eu changes their websites format
try:
content.index("General info")
content.index("Recruitment")
except Exception:
log.error("getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.")
return ERROR_NETWORK
startIndex = content.index("General info")
endIndex = content.index("Recruitment")
content = content[startIndex:endIndex]
m = re.search(r'<a href="set=(.+?)"', content)
if m:
guildname = urllib.parse.unquote_plus(m.group(1))
else:
guildname = guildname.title()
tibia_url = 'https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(
guildname) + '&onlyshowonline=1'
# Fetch website
try:
page = yield from aiohttp.get(tibia_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Trimming content to reduce load and making sure we got a healthy fetch
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index('<div id="ThemeboxesColumn" >')
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
# Tibia.com has no search function, so there's no guild doesn't exist page cause you're not supposed to get to a
# guild that doesn't exists. So the message displayed is "An internal error has ocurred. Please try again later!".
if '<div class="Text" >Error</div>' in content:
if titlecase:
ret = yield from get_guild_online(guildname, False)
return ret
else:
return ERROR_DOESNTEXIST
# Regex pattern to fetch world, guildhall and founding date
m = re.search(r'founded on (\w+) on ([^.]+)', content)
if m:
guild['world'] = m.group(1)
m = re.search(r'Their home on \w+ is ([^\.]+)', content)
if m:
guild["guildhall"] = m.group(1)
# Logo URL
m = re.search(r'<IMG SRC=\"([^\"]+)\" W', content)
if m:
guild['logo_url'] = m.group(1)
# Regex pattern to fetch members
regex_members = r'<TR BGCOLOR=#[\dABCDEF]+><TD>(.+?)</TD>\s</td><TD><A HREF="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)">.+?</A> *\(*(.*?)\)*</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
guild['members'] = []
# Check if list is empty
if m:
# Building dictionary list from members
for (rank, name, title, vocation, level, joined) in m:
rank = '' if (rank == ' ') else rank
name = urllib.parse.unquote_plus(name)
joined = joined.replace(' ', '-')
guild['members'].append({'rank': rank, 'name': name, 'title': title,
'vocation': vocation, 'level': level, 'joined': joined})
guild['name'] = guildname
return guild
@asyncio.coroutine
def get_character(name, tries=5):
"""Returns a dictionary with a player's info
The dictionary contains the following keys: name, deleted, level, vocation, world, residence,
married, gender, guild, last,login, chars*.
*chars is list that contains other characters in the same account (if not hidden).
Each list element is dictionary with the keys: name, world.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
try:
url = url_character + urllib.parse.quote(name.encode('iso-8859-1'))
except UnicodeEncodeError:
return ERROR_DOESNTEXIST
char = dict()
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Trimming content to reduce load
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index("<B>Search Character</B>")
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Check if player exists
if "Name:</td><td>" not in content:
return ERROR_DOESNTEXIST
# TODO: Is there a way to reduce this part?
# Name
m = re.search(r'Name:</td><td>([^<,]+)', content)
if m:
char['name'] = m.group(1).strip()
# Deleted
m = re.search(r', will be deleted at ([^<]+)', content)
if m:
char['deleted'] = True
# Vocation
m = re.search(r'Vocation:</td><td>([^<]+)', content)
if m:
char['vocation'] = m.group(1)
# Level
m = re.search(r'Level:</td><td>(\d+)', content)
if m:
char['level'] = int(m.group(1))
# Use database levels for online characters
for onchar in global_online_list:
if onchar.split("_", 1)[1] == char['name']:
c = userDatabase.cursor()
c.execute("SELECT last_level FROM chars WHERE name LIKE ?", (char['name'],))
result = c.fetchone()
if result:
char['level'] = abs(result["last_level"])
c.close()
break
# World
m = re.search(r'World:</td><td>([^<]+)', content)
if m:
char['world'] = m.group(1)
# Residence (City)
m = re.search(r'Residence:</td><td>([^<]+)', content)
if m:
char['residence'] = m.group(1)
# Marriage
m = re.search(r'Married To:</td><td>?.+name=([^"]+)', content)
if m:
char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1')
# Sex
m = re.search(r'Sex:</td><td>([^<]+)', content)
if m:
if m.group(1) == 'male':
char['gender'] = 'male'
else:
char['gender'] = 'female'
# Guild rank
m = re.search(r'Membership:</td><td>([^<]+)\sof the', content)
if m:
char['rank'] = m.group(1)
# Guild membership
m = re.search(r'GuildName=.*?([^&]+).+', content)
if m:
char['guild'] = urllib.parse.unquote_plus(m.group(1))
# House
m = re.search(r'House:</td><td> <a href=\"https://secure\.tibia\.com/community/\?subtopic=houses.+houseid=(\d+)'
r'&character=(?:[^&]+)&action=characters\" >([^<]+)</a> \(([^(]+)\) is paid until '
r'([A-z]+).*?;(\d+).*?;(\d+)', content)
if m:
char["house_id"] = m.group(1)
char["house"] = m.group(2)
char["house_town"] = m.group(3)
# Last login
m = re.search(r'Last Login:</td><td>([^<]+)', content)
if m:
lastLogin = m.group(1).replace(" ", " ").replace(",", "")
if "never" in lastLogin:
char['last_login'] = None
else:
char['last_login'] = lastLogin
# Discord owner
c = userDatabase.cursor()
c.execute("SELECT user_id FROM chars WHERE name LIKE ?", (char["name"],))
result = c.fetchone()
char["owner_id"] = None if result is None else result["user_id"]
# Update name, vocation and world for chars in database if necessary
c = userDatabase.cursor()
c.execute("SELECT vocation, name, id, world FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result["vocation"] != char['vocation']:
c.execute("UPDATE chars SET vocation = ? WHERE id = ?", (char['vocation'], result["id"],))
log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'],
char['vocation'],
result["vocation"]))
if result["name"] != char["name"]:
c.execute("UPDATE chars SET name = ? WHERE id = ?", (char['name'], result["id"],))
log.info("{0} was renamed to {1} during get_character()".format(result["name"], char['name']))
if result["world"] != char["world"]:
c.execute("UPDATE chars SET world = ? WHERE id = ?", (char['world'], result["id"],))
log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'],
char['world'],
result["world"]))
#Skills from highscores
c = userDatabase.cursor()
for category in highscores_categories:
c.execute("SELECT "+category+","+category+"_rank FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result[category] is not None and result[category+'_rank'] is not None:
char[category] = result[category]
char[category+'_rank'] = result[category+'_rank']
char["deaths"] = []
regex_deaths = r'valign="top" >([^<]+)</td><td>(.+?)</td></tr>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
for m in matches:
death_time = m[0].replace(' ', ' ').replace(",", "")
death_level = ""
death_killer = ""
death_by_player = False
if m[1].find("Died") != -1:
regex_deathinfo_monster = r'Level (\d+) by ([^.]+)'
pattern = re.compile(regex_deathinfo_monster, re.MULTILINE + re.S)
m_deathinfo_monster = re.search(pattern, m[1])
if m_deathinfo_monster:
death_level = m_deathinfo_monster.group(1)
death_killer = m_deathinfo_monster.group(2)
else:
regex_deathinfo_player = r'Level (\d+) by .+?name=([^"]+)'
pattern = re.compile(regex_deathinfo_player, re.MULTILINE + re.S)
m_deathinfo_player = re.search(pattern, m[1])
if m_deathinfo_player:
death_level = m_deathinfo_player.group(1)
death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2))
death_by_player = True
try:
char["deaths"].append({'time': death_time, 'level': int(death_level), 'killer': death_killer,
'byPlayer': death_by_player})
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue
# Other chars
# note that an empty char list means the character is hidden
# otherwise you'd have at least the same char in the list
char['chars'] = []
try:
# See if there is a character list
startIndex = content.index("<B>Characters</B>")
content = content[startIndex:]
# Find characters
regex_chars = r'<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\"([^\"]+)'
pattern = re.compile(regex_chars, re.MULTILINE + re.S)
m = re.findall(pattern, content)
if m:
for (world, name) in m:
name = urllib.parse.unquote_plus(name)
char['chars'].append({'name': name, 'world': world})
except Exception:
pass
return char
def get_rashid_city() -> str:
"""Returns the city Rashid is currently in."""
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
return ["Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
def get_monster(name):
"""Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image."""
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%"+name+"%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['health'] is None or monster['health'] < 1:
monster['health'] = None
c.execute("SELECT Items.title as name, percentage, min, max "
"FROM CreatureDrops, Items "
"WHERE Items.id = CreatureDrops.itemid AND creatureid = ? "
"ORDER BY percentage DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_item(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
# Checking if item exists
if item is not None:
# Checking NPCs that buy the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, SellItems, NPCs "
"WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value DESC", (name,))
npcs = []
value_sell = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_sell is None:
value_sell = npc["value"]
elif npc["value"] != value_sell:
break
# Replacing cities for special npcs and adding colors
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
item["color"] = Colour.green()
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
item["color"] = Colour.blue()
elif name == 'Rashid':
city = get_rashid_city()
item["color"] = Colour(0xF0E916)
elif name == 'Yasir':
city = 'his boat'
elif name == 'Briasol':
item["color"] = Colour(0xA958C4)
npcs.append({"name": name, "city": city})
item['npcs_sold'] = npcs
item['value_sell'] = value_sell
# Checking NPCs that sell the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, BuyItems, NPCs "
"WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value ASC", (name,))
npcs = []
value_buy = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_buy is None:
value_buy = npc["value"]
elif npc["value"] != value_buy:
break
# Replacing cities for special npcs
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
elif name == 'Rashid':
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
city = [
"Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
elif name == 'Yasir':
city = 'his boat'
npcs.append({"name": name, "city": city})
item['npcs_bought'] = npcs
item['value_buy'] = value_buy
# Get creatures that drop it
c.execute("SELECT Creatures.title as name, CreatureDrops.percentage "
"FROM CreatureDrops, Creatures "
"WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? "
"ORDER BY percentage DESC", (item["id"],))
item["dropped_by"] = c.fetchall()
# Checking quest rewards:
c.execute("SELECT Quests.title FROM Quests, QuestRewards "
"WHERE Quests.id = QuestRewards.questid and itemid = ?", (item["id"],))
quests = c.fetchall()
item["quests"] = list()
for quest in quests:
item["quests"].append(quest["title"])
return item
finally:
c.close()
return
def parse_tibia_time(tibia_time: str) -> datetime:
"""Gets a time object from a time string from tibia.com"""
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset))
def get_stats(level: int, vocation: str):
"""Returns a dictionary with the stats for a character of a certain vocation and level.
The dictionary has the following keys: vocation, hp, mp, cap."""
try:
level = int(level)
except ValueError:
return "bad level"
if level <= 0:
return "low level"
elif level > 2000:
return "high level"
vocation = vocation.lower().strip()
if vocation in KNIGHT:
hp = (level - 8) * 15 + 185
mp = (level - 0) * 5 + 50
cap = (level - 8) * 25 + 470
vocation = "knight"
elif vocation in PALADIN:
hp = (level - 8) * 10 + 185
mp = (level - 8) * 15 + 90
cap = (level - 8) * 20 + 470
vocation = "paladin"
elif vocation in MAGE:
hp = (level - 0) * 5 + 145
mp = (level - 8) * 30 + 90
cap = (level - 0) * 10 + 390
vocation = "mage"
elif vocation in NO_VOCATION:
vocation = "no vocation"
else:
return "bad vocation"
if level < 8 or vocation == "no vocation":
hp = (level - 0) * 5 + 145
mp = (level - 0) * 5 + 50
cap = (level - 0) * 10 + 390
exp = (50*pow(level, 3)/3) - 100*pow(level, 2) + (850*level/3) - 200
exp_tnl = 50*level*level - 150 * level + 200
return {"vocation": vocation, "hp": hp, "mp": mp, "cap": cap, "exp": int(exp), "exp_tnl": exp_tnl}
def get_share_range(level: int):
"""Returns the share range for a specific level
The returned value is a list with the lower limit and the upper limit in that order."""
return int(round(level * 2 / 3, 0)), int(round(level * 3 / 2, 0))
# TODO: Improve formatting to match /monster and /item
def get_spell(name):
"""Returns a dictionary containing a spell's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
c.execute("""SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15""",
("%" + name + "%", "%" + name + "%"))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,
SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs
WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
npc["image"] = 0
c.execute("SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items "
"WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?", (npc["id"],))
npc["sell_items"] = c.fetchall()
c.execute("SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items "
"WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?", (npc["id"],))
npc["buy_items"] = c.fetchall()
return npc
finally:
c.close()
@asyncio.coroutine
def get_house(name, world = None):
"""Returns a dictionary containing a house's info, a list of possible matches or None.
If world is specified, it will also find the current status of the house in that world."""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
house = result[0]
else:
return [x['name'] for x in result]
if world is None or world not in tibia_worlds:
house["fetch"] = False
return house
house["world"] = world
house["url"] = url_house.format(id=house["id"], world=world)
tries = 5
while True:
try:
page = yield from aiohttp.get(house["url"])
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
# Trimming content to reduce load
try:
start_index = content.index("\"BoxContent\"")
end_index = content.index("</TD></TR></TABLE>")
content = content[start_index:end_index]
except ValueError:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
house["fetch"] = True
m = re.search(r'monthly rent is <B>(\d+)', content)
if m:
house['rent'] = int(m.group(1))
if "rented" in content:
house["status"] = "rented"
m = re.search(r'rented by <A?.+name=([^\"]+).+e has paid the rent until <B>([^<]+)</B>', content)
if m:
house["owner"] = urllib.parse.unquote_plus(m.group(1))
house["until"] = m.group(2).replace(" ", " ")
if "move out" in content:
house["status"] = "transferred"
m = re.search(r'will move out on <B>([^<]+)</B> \(time of daily server save\) and will pass the '
r'house to <A.+name=([^\"]+).+ for <B>(\d+) gold', content)
if m:
house["transfer_date"] =house["until"] = m.group(1).replace(" ", " ")
house["transferee"] = urllib.parse.unquote_plus(m.group(2))
house["transfer_price"] = int(m.group(3))
elif "auctioned" in content:
house["status"] = "auctioned"
if ". No bid has" in content:
house["status"] = "empty"
break
m = re.search(r'The auction will end at <B>([^\<]+)</B>\. '
r'The highest bid so far is <B>(\d+).+ by .+name=([^\"]+)\"', content)
if m:
house["auction_end"] = m.group(1).replace(" ", " ")
house["top_bid"] = int(m.group(2))
house["top_bidder"] = urllib.parse.unquote_plus(m.group(3))
break
return house
finally:
c.close()
def get_achievement(name):
"""Returns an achievement (dictionary), a list of possible matches or none"""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_tibia_time_zone() -> int:
"""Returns Germany's timezone, considering their daylight saving time dates"""
# Find date in Germany
gt = datetime.utcnow() + timedelta(hours=1)
germany_date = date(gt.year, gt.month, gt.day)
dst_start = date(gt.year, 3, (31 - (int(((5 * gt.year) / 4) + 4) % int(7))))
dst_end = date(gt.year, 10, (31 - (int(((5 * gt.year) / 4) + 1) % int(7))))
if dst_start < germany_date < dst_end:
return 2
return 1
def get_voc_abb(vocation: str) -> str:
"""Given a vocation name, it returns an abbreviated string"""
abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED',
'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'}
try:
return abbrev[vocation.lower()]
except KeyError:
return 'N'
def get_voc_emoji(vocation: str) -> str:
"""Given a vocation name, returns a emoji representing it"""
emoji = {'none': EMOJI[":hatching_chick:"], 'druid': EMOJI[":snowflake:"], 'sorcerer': EMOJI[":flame:"], 'paladin': EMOJI[":archery:"],
'knight': EMOJI[":shield:"], 'elder druid': EMOJI[":snowflake:"],
'master sorcerer': EMOJI[":flame:"], 'royal paladin': EMOJI[":archery:"],
'elite knight': EMOJI[":shield:"]}
try:
return emoji[vocation.lower()]
except KeyError:
return EMOJI[":question:"]
def get_pronouns(gender: str):
"""Gets a list of pronouns based on the gender given. Only binary genders supported, sorry."""
gender = gender.lower()
if gender == "female":
pronoun = ["she", "her", "her"]
elif gender == "male":
pronoun = ["he", "his", "him"]
else:
pronoun = ["it", "its", "it"]
return pronoun
def get_map_area(x, y, z, size=15, scale=8, crosshair=True):
"""Gets a minimap picture of a map area
size refers to the radius of the image in actual tibia sqm
scale is how much the image will be streched (1 = 1 sqm = 1 pixel)"""
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM WorldMap WHERE z LIKE ?", (z,))
result = c.fetchone()
im = Image.open(io.BytesIO(bytearray(result['image'])))
im = im.crop((x-size, y-size, x+size, y+size))
im = im.resize((size*scale, size*scale))
if crosshair:
draw = ImageDraw.Draw(im)
width, height = im.size
draw.line((0, height/2, width, height/2), fill=128)
draw.line((width/2, 0, width/2, height), fill=128)
img_byte_arr = io.BytesIO()
im.save(img_byte_arr, format='png')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
| NabBot-master/utils/tibia.py | 43,555 | Returns an achievement (dictionary), a list of possible matches or none
Returns a dictionary with a player's info
The dictionary contains the following keys: name, deleted, level, vocation, world, residence,
married, gender, guild, last,login, chars*.
*chars is list that contains other characters in the same account (if not hidden).
Each list element is dictionary with the keys: name, world.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly.
Gets a character's tibia.com URL
Returns a guild's world and online member list in a dictionary.
The dictionary contains the following keys: name, logo_url, world and members.
The key members contains a list where each element is a dictionary with the following keys:
rank, name, title, vocation, level, joined.
Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly.
Gets a specific page of the highscores
Each list element is a dictionary with the following keys: rank, name, value.
May return ERROR_NETWORK
Returns a dictionary containing a house's info, a list of possible matches or None.
If world is specified, it will also find the current status of the house in that world.
Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city.
Gets a minimap picture of a map area
size refers to the radius of the image in actual tibia sqm
scale is how much the image will be streched (1 = 1 sqm = 1 pixel)
Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image.
Returns a dictionary containing a NPC's info, a list of possible matches or None
Gets a list of pronouns based on the gender given. Only binary genders supported, sorry.
Returns the city Rashid is currently in.
Returns a list of all the online players in current server.
Each list element is a dictionary with the following keys: name, level
Returns the share range for a specific level
The returned value is a list with the lower limit and the upper limit in that order.
Returns a dictionary containing a spell's info, a list of possible matches or None
Returns a dictionary with the stats for a character of a certain vocation and level.
The dictionary has the following keys: vocation, hp, mp, cap.
Returns Germany's timezone, considering their daylight saving time dates
Given a vocation name, it returns an abbreviated string
Given a vocation name, returns a emoji representing it
Gets a time object from a time string from tibia.com
Constants Tibia.com URLs: Fetch website Trimming content to reduce load Website fetch was incomplete, due to a network error Fetch website This should return ERROR_NETWORK, but requires error handling where this function is used Trimming content to reduce load Website fetch was incomplete due to a network error This should return ERROR_NETWORK, but requires error handling where this function is used Check if list is empty Building dictionary list from online players Fix casing using guildstats.eu if needed Sorry guildstats.eu :D Fetch website Make sure we got a healthy fetch Website fetch was incomplete, due to a network error Check if the guild doesn't exist Failsafe in case guildstats.eu changes their websites format Fetch website Trimming content to reduce load and making sure we got a healthy fetch Website fetch was incomplete, due to a network error Check if the guild doesn't exist Tibia.com has no search function, so there's no guild doesn't exist page cause you're not supposed to get to a guild that doesn't exists. So the message displayed is "An internal error has ocurred. Please try again later!". Regex pattern to fetch world, guildhall and founding date Logo URL Regex pattern to fetch members Check if list is empty Building dictionary list from members Fetch website Trimming content to reduce load Website fetch was incomplete, due to a network error Check if player exists TODO: Is there a way to reduce this part? Name Deleted Vocation Level Use database levels for online characters World Residence (City) Marriage Sex Guild rank Guild membership House Last login Discord owner Update name, vocation and world for chars in database if necessarySkills from highscores Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now. Other chars note that an empty char list means the character is hidden otherwise you'd have at least the same char in the list See if there is a character list Find characters Server save is at 10am, so in tibia a new day starts at that hour Reading monster database Reading item database Search query Checking if item exists Checking NPCs that buy the item Replacing cities for special npcs and adding colors Checking NPCs that sell the item Replacing cities for special npcs Server save is at 10am, so in tibia a new day starts at that hour Get creatures that drop it Checking quest rewards: Getting local time and GMT UTC Offset Extracting timezone Convert time string to time object Removing timezone cause CEST and CET are not supported Getting the offset Add/subtract hours to get the real time TODO: Improve formatting to match /monster and /item search query Search query Trimming content to reduce load Search query Find date in Germany | 5,712 | en | 0.837665 |
from __future__ import division
import argparse
import os
import torch
from mmcv import Config
from mmdet import __version__
from mmdet.apis import (get_root_logger, init_dist, set_random_seed,
train_detector)
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# init logger before other steps
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
train_dataset = build_dataset(cfg.data.train)
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=train_dataset.CLASSES)
# add an attribute for visualization convenience
model.CLASSES = train_dataset.CLASSES
train_detector(
model,
train_dataset,
cfg,
distributed=distributed,
validate=args.validate,
logger=logger)
if __name__ == '__main__':
main()
| tools/train.py | 3,378 | set cudnn_benchmark update configs according to CLI args apply the linear scaling rule (https://arxiv.org/abs/1706.02677) init distributed env first, since logger depends on the dist info. init logger before other steps set random seeds save mmdet version, config file content and class names in checkpoints as meta data add an attribute for visualization convenience | 367 | en | 0.694573 |
"""An example of jinja2 templating"""
from bareasgi import Application, HttpRequest, HttpResponse
import jinja2
import pkg_resources
import uvicorn
from bareasgi_jinja2 import Jinja2TemplateProvider, add_jinja2
async def http_request_handler(request: HttpRequest) -> HttpResponse:
"""Handle the request"""
return await Jinja2TemplateProvider.apply(
request,
'example1.html',
{'name': 'rob'}
)
async def handle_no_template(request: HttpRequest) -> HttpResponse:
"""This is what happens if there is no template"""
return await Jinja2TemplateProvider.apply(
request,
'notemplate.html',
{'name': 'rob'}
)
if __name__ == '__main__':
TEMPLATES = pkg_resources.resource_filename(__name__, "templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATES),
autoescape=jinja2.select_autoescape(['html', 'xml']),
enable_async=True
)
app = Application()
add_jinja2(app, env)
app.http_router.add({'GET'}, '/example1', http_request_handler)
app.http_router.add({'GET'}, '/notemplate', handle_no_template)
uvicorn.run(app, port=9010)
| examples/example1.py | 1,175 | An example of jinja2 templating | 31 | en | 0.236713 |
from __future__ import print_function
import pprint
import os
import time
import msgpackrpc
import math
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import msgpack
import sys
import inspect
import types
import re
import shutil
import numpy as np #pip install numpy
#==============================================================================
# Classes
#==============================================================================
class MsgpackMixin:
def to_msgpack(self, *args, **kwargs):
return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
obj.__dict__ = {k.decode('utf-8'): v for k, v in encoded.items()}
return obj
class AirSimImageType:
Scene = 0
DepthPlanner = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
class DrivetrainType:
MaxDegreeOfFreedom = 0
ForwardOnly = 1
class LandedState:
Landed = 0
Flying = 1
class Vector3r(MsgpackMixin):
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
class Quaternionr(MsgpackMixin):
w_val = np.float32(0)
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0), w_val = np.float32(1)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
class Pose(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
def __init__(self, position_val, orientation_val):
self.position = position_val
self.orientation = orientation_val
class CollisionInfo(MsgpackMixin):
has_collided = False
normal = Vector3r()
impact_point = Vector3r()
position = Vector3r()
penetration_depth = np.float32(0)
time_stamp = np.float32(0)
object_name = ""
object_id = -1
class GeoPoint(MsgpackMixin):
latitude = 0.0
longitude = 0.0
altitude = 0.0
class YawMode(MsgpackMixin):
is_rate = True
yaw_or_rate = 0.0
def __init__(self, is_rate = True, yaw_or_rate = 0.0):
self.is_rate = is_rate
self.yaw_or_rate = yaw_or_rate
class ImageRequest(MsgpackMixin):
camera_id = np.uint8(0)
image_type = AirSimImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_id, image_type, pixels_as_float = False, compress = True):
self.camera_id = camera_id
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = np.float32(0)
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = np.float32(0)
compress = True
width = 0
height = 0
image_type = AirSimImageType.Scene
class CarControls(MsgpackMixin):
throttle = np.float32(0)
steering = np.float32(0)
brake = np.float32(0)
handbrake = False
is_manual_gear = False
manual_gear = 0
gear_immediate = True
def set_throttle(self, throttle_val, forward):
if (forward):
is_manual_gear = False
manual_gear = 0
throttle = abs(throttle_val)
else:
is_manual_gear = False
manual_gear = -1
throttle = - abs(throttle_val)
class CarState(MsgpackMixin):
speed = np.float32(0)
gear = 0
position = Vector3r()
velocity = Vector3r()
orientation = Quaternionr()
class AirSimClientBase:
def __init__(self, ip, port):
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = 3600)
def ping(self):
return self.client.call('ping')
def reset(self):
self.client.call('reset')
def confirmConnection(self):
print('Waiting for connection: ', end='')
home = self.getHomeGeoPoint()
while ((home.latitude == 0 and home.longitude == 0 and home.altitude == 0) or
math.isnan(home.latitude) or math.isnan(home.longitude) or math.isnan(home.altitude)):
time.sleep(1)
home = self.getHomeGeoPoint()
print('X', end='')
print('')
def getHomeGeoPoint(self):
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint'))
# basic flight control
def enableApiControl(self, is_enabled):
return self.client.call('enableApiControl', is_enabled)
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
return self.client.call('simGetSegmentationObjectID', mesh_name)
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImage(self, camera_id, image_type):
# because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_id, image_type)
if (result == "" or result == "\0"):
return None
return result
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImages(self, requests):
responses_raw = self.client.call('simGetImages', requests)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def getCollisionInfo(self):
return CollisionInfo.from_msgpack(self.client.call('getCollisionInfo'))
@staticmethod
def stringToUint8Array(bstr):
return np.fromstring(bstr, np.uint8)
@staticmethod
def stringToFloatArray(bstr):
return np.fromstring(bstr, np.float32)
@staticmethod
def listTo2DFloatArray(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width))
@staticmethod
def getPfmArray(response):
return AirSimClientBase.listTo2DFloatArray(response.image_data_float, response.width, response.height)
@staticmethod
def get_public_fields(obj):
return [attr for attr in dir(obj)
if not (attr.startswith("_")
or inspect.isbuiltin(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr))]
@staticmethod
def to_dict(obj):
return dict([attr, getattr(obj, attr)] for attr in AirSimClientBase.get_public_fields(obj))
@staticmethod
def to_str(obj):
return str(AirSimClientBase.to_dict(obj))
@staticmethod
def write_file(filename, bstr):
with open(filename, 'wb') as afile:
afile.write(bstr)
def simSetPose(self, pose, ignore_collison):
self.client.call('simSetPose', pose, ignore_collison)
def simGetPose(self):
return self.client.call('simGetPose')
# helper method for converting getOrientation to roll/pitch/yaw
# https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
@staticmethod
def toEulerianAngle(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = y * y
# roll (x-axis rotation)
t0 = +2.0 * (w*x + y*z)
t1 = +1.0 - 2.0*(x*x + ysqr)
roll = math.atan2(t0, t1)
# pitch (y-axis rotation)
t2 = +2.0 * (w*y - z*x)
if (t2 > 1.0):
t2 = 1
if (t2 < -1.0):
t2 = -1.0
pitch = math.asin(t2)
# yaw (z-axis rotation)
t3 = +2.0 * (w*z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
yaw = math.atan2(t3, t4)
return (pitch, roll, yaw)
@staticmethod
def toQuaternion(pitch, roll, yaw):
t0 = math.cos(yaw * 0.5)
t1 = math.sin(yaw * 0.5)
t2 = math.cos(roll * 0.5)
t3 = math.sin(roll * 0.5)
t4 = math.cos(pitch * 0.5)
t5 = math.sin(pitch * 0.5)
q = Quaternionr()
q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w
q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x
q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y
q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z
return q
@staticmethod
def wait_key(message = ''):
''' Wait for a key press on the console and return it. '''
if message != '':
print (message)
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
@staticmethod
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
#data = np.flipud(data)
file.close()
return data, scale
@staticmethod
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(temp_str.encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(temp_str.encode('utf-8'))
image.tofile(file)
@staticmethod
def write_png(filename, image):
""" image must be numpy array H X W X channels
"""
import zlib, struct
buf = image.flatten().tobytes()
width = image.shape[1]
height = image.shape[0]
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
AirSimClientBase.write_file(filename, png_bytes)
# ----------------------------------- Multirotor APIs ---------------------------------------------
class MultirotorClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(MultirotorClient, self).__init__(ip, 41451)
def armDisarm(self, arm):
return self.client.call('armDisarm', arm)
def takeoff(self, max_wait_seconds = 15):
return self.client.call('takeoff', max_wait_seconds)
def land(self, max_wait_seconds = 60):
return self.client.call('land', max_wait_seconds)
def goHome(self):
return self.client.call('goHome')
def hover(self):
return self.client.call('hover')
# query vehicle state
def getPosition(self):
return Vector3r.from_msgpack(self.client.call('getPosition'))
def getVelocity(self):
return Vector3r.from_msgpack(self.client.call('getVelocity'))
def getOrientation(self):
return Quaternionr.from_msgpack(self.client.call('getOrientation'))
def getLandedState(self):
return self.client.call('getLandedState')
def getGpsLocation(self):
return GeoPoint.from_msgpack(self.client.call('getGpsLocation'))
def getPitchRollYaw(self):
return self.toEulerianAngle(self.getOrientation())
#def getRCData(self):
# return self.client.call('getRCData')
def timestampNow(self):
return self.client.call('timestampNow')
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def isSimulationMode(self):
return self.client.call('isSimulationMode')
def getServerDebugInfo(self):
return self.client.call('getServerDebugInfo')
# APIs for control
def moveByAngle(self, pitch, roll, z, yaw, duration):
return self.client.call('moveByAngle', pitch, roll, z, yaw, duration)
def moveByVelocity(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode)
def moveByVelocityZ(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode)
def moveOnPath(self, path, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveOnPath', path, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveToZ(self, z, velocity, max_wait_seconds = 60, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToZ', z, velocity, max_wait_seconds, yaw_mode, lookahead, adaptive_lookahead)
def moveToPosition(self, x, y, z, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToPosition', x, y, z, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveByManual(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode)
def rotateToYaw(self, yaw, max_wait_seconds = 60, margin = 5):
return self.client.call('rotateToYaw', yaw, max_wait_seconds, margin)
def rotateByYawRate(self, yaw_rate, duration):
return self.client.call('rotateByYawRate', yaw_rate, duration)
# ----------------------------------- Car APIs ---------------------------------------------
class CarClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(CarClient, self).__init__(ip, 42451)
def setCarControls(self, controls):
self.client.call('setCarControls', controls)
def getCarState(self):
state_raw = self.client.call('getCarState')
return CarState.from_msgpack(state_raw)
#FIXME: keep it and remove all upper that already is in AirSimClient.py
#==============================================================================
# Functions
#==============================================================================
def drive(client, throttle, steering):
car_controls.throttle = throttle
car_controls.steering = steering
client.setCarControls(car_controls)
def drive_forward(client, car_controls):
drive(client, 1.0, 0)
def drive_right(client, car_controls):
drive(client, 1.0, 10)
def drive_left(client, car_controls):
drive(client, 1.0, -10)
def save_image(i):
# get a sinlgle image from the car's camera
responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)])
single_image = responses[0].image_data_uint8
# save the image
AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \
'/image_{}.png'.format(i)), single_image)
#==============================================================================
# Main
#==============================================================================
# Constants
IMAGEDIR = "images"
# Create an empty image directory
try:
shutil.rmtree(IMAGEDIR, ignore_errors=True)
os.stat(IMAGEDIR)
except:
os.mkdir(IMAGEDIR)
# Connect to AirSim
client = CarClient()
client.confirmConnection()
client.enableApiControl(True)
client.reset()
print('Connected')
i = 0
car_controls = CarControls()
while True:
drive_forward(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_right(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_forward(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_left(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
if i >= 40:
break
## get RGBA camera images from the car
#responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)])
## add image to queue
#imagequeue.append(responses[0].image_data_uint8)
## dump queue when it gets full
#if len(imagequeue) == QUEUESIZE:
# for i in range(QUEUESIZE):
# AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \
# '/image%03d.png' % i ), imagequeue[i])
# imagequeue.pop(0)
#collision_info = client.getCollisionInfo()
#if collision_info.has_collided:
# print("Collision at pos %s, normal %s, impact pt %s, penetration %f, name %s, obj id %d" % (
# pprint.pformat(collision_info.position),
# pprint.pformat(collision_info.normal),
# pprint.pformat(collision_info.impact_point),
# collision_info.penetration_depth, collision_info.object_name, collision_info.object_id))
# break
#time.sleep(0.1)
client.enableApiControl(False)
| run_demo.py | 20,375 | Read a pfm file
Wait for a key press on the console and return it.
Write a pfm file
image must be numpy array H X W X channels
install as admin: pip install msgpack-rpc-pythonpip install numpy============================================================================== Classes==============================================================================msgpack.dump(self.to_dict(*args, **kwargs)) basic flight control camera control simGetImage returns compressed png in array of bytes image_type uses one of the AirSimImageType members because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately. camera control simGetImage returns compressed png in array of bytes image_type uses one of the AirSimImageType members helper method for converting getOrientation to roll/pitch/yaw https:en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles roll (x-axis rotation) pitch (y-axis rotation) yaw (z-axis rotation)wxyz little-endian big-endian DEY: I don't know why this was there.data = np.flipud(data) color image greyscale reverse the vertical line order and add null bytes at the start ----------------------------------- Multirotor APIs --------------------------------------------- query vehicle statedef getRCData(self): return self.client.call('getRCData') APIs for control ----------------------------------- Car APIs ---------------------------------------------FIXME: keep it and remove all upper that already is in AirSimClient.py============================================================================== Functions============================================================================== get a sinlgle image from the car's camera save the image============================================================================== Main============================================================================== Constants Create an empty image directory Connect to AirSim get RGBA camera images from the carresponses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)]) add image to queue imagequeue.append(responses[0].image_data_uint8) dump queue when it gets fullif len(imagequeue) == QUEUESIZE: for i in range(QUEUESIZE): AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \ '/image%03d.png' % i ), imagequeue[i]) imagequeue.pop(0) collision_info = client.getCollisionInfo()if collision_info.has_collided: print("Collision at pos %s, normal %s, impact pt %s, penetration %f, name %s, obj id %d" % ( pprint.pformat(collision_info.position), pprint.pformat(collision_info.normal), pprint.pformat(collision_info.impact_point), collision_info.penetration_depth, collision_info.object_name, collision_info.object_id)) breaktime.sleep(0.1) | 2,914 | en | 0.540771 |
#!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
| request_handler/appconfig.py | 3,134 | !/usr/bin/env python coding: utf-8 Конфигурация базы данных Конфигурация журналирования Форматирование сообщения Обработчикаи сообщений Логгеры Базовая конфигурация Конфигурация выпуска Конфигурация разработки Конфигурация тестирования Текущая конфигурация -------------------------------------------------- -------------------------------------------------- Размер буффера данных, загружаемых в базу | 400 | ru | 0.955374 |
#!/usr/bin/env python3
import random
import sys
"""
Markov chains name generator in Python
From http://roguebasin.roguelikedevelopment.org/index.php?title=Markov_chains_name_generator_in_Python .
"""
# from http://www.geocities.com/anvrill/names/cc_goth.html
PLACES = ['Adara', 'Adena', 'Adrianne', 'Alarice', 'Alvita', 'Amara', 'Ambika', 'Antonia', 'Araceli', 'Balandria', 'Basha',
'Beryl', 'Bryn', 'Callia', 'Caryssa', 'Cassandra', 'Casondrah', 'Chatha', 'Ciara', 'Cynara', 'Cytheria', 'Dabria', 'Darcei',
'Deandra', 'Deirdre', 'Delores', 'Desdomna', 'Devi', 'Dominique', 'Drucilla', 'Duvessa', 'Ebony', 'Fantine', 'Fuscienne',
'Gabi', 'Gallia', 'Hanna', 'Hedda', 'Jerica', 'Jetta', 'Joby', 'Kacila', 'Kagami', 'Kala', 'Kallie', 'Keelia', 'Kerry',
'Kerry-Ann', 'Kimberly', 'Killian', 'Kory', 'Lilith', 'Lucretia', 'Lysha', 'Mercedes', 'Mia', 'Maura', 'Perdita', 'Quella',
'Riona', 'Safiya', 'Salina', 'Severin', 'Sidonia', 'Sirena', 'Solita', 'Tempest', 'Thea', 'Treva', 'Trista', 'Vala', 'Winta']
###############################################################################
# Markov Name model
# A random name generator, by Peter Corbett
# http://www.pick.ucam.org/~ptc24/mchain.html
# This script is hereby entered into the public domain
###############################################################################
class Mdict:
def __init__(self):
self.d = {}
def __getitem__(self, key):
if key in self.d:
return self.d[key]
else:
raise KeyError(key)
def add_key(self, prefix, suffix):
if prefix in self.d:
self.d[prefix].append(suffix)
else:
self.d[prefix] = [suffix]
def get_suffix(self,prefix):
l = self[prefix]
return random.choice(l)
class MName:
"""
A name from a Markov chain
"""
def __init__(self, chainlen = 2):
"""
Building the dictionary
"""
if chainlen > 10 or chainlen < 1:
print("Chain length must be between 1 and 10, inclusive")
sys.exit(0)
self.mcd = Mdict()
oldnames = []
self.chainlen = chainlen
for l in PLACES:
l = l.strip()
oldnames.append(l)
s = " " * chainlen + l
for n in range(0,len(l)):
self.mcd.add_key(s[n:n+chainlen], s[n+chainlen])
self.mcd.add_key(s[len(l):len(l)+chainlen], "\n")
def New(self):
"""
New name from the Markov chain
"""
prefix = " " * self.chainlen
name = ""
suffix = ""
while True:
suffix = self.mcd.get_suffix(prefix)
if suffix == "\n" or len(name) > 9:
break
else:
name = name + suffix
prefix = prefix[1:] + suffix
return name.capitalize()
#############################################################################
if __name__ == "__main__":
li = []
for i in range(10):
li.append(MName().New())
for e in sorted(li):
print(e.lower())
| lib/markov_usernames.py | 3,086 | A name from a Markov chain
New name from the Markov chain
Building the dictionary
!/usr/bin/env python3 from http://www.geocities.com/anvrill/names/cc_goth.html Markov Name model A random name generator, by Peter Corbett http://www.pick.ucam.org/~ptc24/mchain.html This script is hereby entered into the public domain | 318 | en | 0.710686 |
#!/usr/bin/python
#By Sun Jinyuan and Cui Yinglu, 2021
foldx_exe = "/user/sunjinyuan/soft/foldx"
def getparser():
parser = argparse.ArgumentParser(description=
'To run Foldx PositionScan with multiple threads, make sure' +
' that you have the foldx and your pdb in the same floder')
parser.add_argument("-s", '--pdbfile', help="The pdb file, the repaired one")
parser.add_argument("-nt", '--number_threads', help="How many threads to run the Foldx")
parser.add_argument("-c", '--chain_id', help="Chain ID")
args = parser.parse_args()
return args
def SOfile2mutlist(pdbname, chain_id, foldx_exe):
AA_list = ["Q", "W", "E", "R", "T", "Y", "I", "P", "A", "S", "D", "F", "G", "H", "K", "L", "V", "N", "M"]
try:
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
except FileNotFoundError:
os.system(foldx_exe + " --command=SequenceOnly --pdb=" + pdbname)
#os.system("/data/home/jsun/mhetase/FoldX/foldx5 --command=SequenceOnly --pdb=" + pdbname)
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
mut_lst = []
for line in SO_file:
lst = line.replace("\n", "").split("\t")
if len(lst) > 3:
if lst[1] == chain_id:
wild_AA = lst[3][0]
for AA in AA_list:
if AA != wild_AA:
mut_lst.append(lst[3] + AA + ";")
return mut_lst
def multi_threads(mut_lst, threads, pdbname, foldx_exe):
t = len(mut_lst) // (int(threads) - 1)
n = 0
for i in range(0, len(mut_lst), t):
submutlst = mut_lst[i:i + t]
n = n + 1
# indi_lst_name = "individual_list_"+str(n)+"_.txt"
sub_dir_name = "Subdirectory" + str(n)
indi_lst_name = sub_dir_name + "/individual_list.txt"
os.mkdir(sub_dir_name)
os.system("cp " + pdbname + " " + sub_dir_name)
with open(indi_lst_name, "w+") as ind_lst:
for mut in submutlst:
ind_lst.write(mut + "\n")
ind_lst.close()
readablefilename = sub_dir_name + "/List_Mutations_readable.txt"
with open(readablefilename, "a+") as readablefile:
# KA12G
x = 1
for mut in submutlst:
readablefile.write(str(x)+" "+mut[0]+" "+mut[2:-2]+" "+mut[-2]+"\n")
#readablefile.write(str(x) + " " + mut[0] + " " + mut[2:-1] + " " + mut[-1] + "\n")
x += 1
readablefile.close()
cfg = "command=BuildModel\npdb=" + pdbname + "\nmutant-file=individual_list.txt\nnumberOfRuns=5"
cfg_name = sub_dir_name + "/BM_" + str(n) + ".cfg"
with open(cfg_name, "w+") as cfg_file:
cfg_file.write(cfg)
cfg_file.close()
with open("todo_list.sh", "a+") as todo_file:
todo_file.write("cd " + sub_dir_name + "\n")
todo_file.write("nohup "+foldx_exe+" -f " + "BM_" + str(n) + ".cfg" + " &\n")
todo_file.write("cd ..\n")
todo_file.close()
if __name__ == "__main__":
import os
import argparse
args = getparser()
pdbname = args.pdbfile
threads = args.number_threads
chain_id = args.chain_id
#print(foldx_exe)
with open("todo_list.sh", "w+") as todo_file:
todo_file.close()
mut_lst = SOfile2mutlist(pdbname, chain_id, foldx_exe)
multi_threads(mut_lst, threads, pdbname, foldx_exe)
| RFACA/foldx/foldx_scan.py | 3,503 | !/usr/bin/pythonBy Sun Jinyuan and Cui Yinglu, 2021os.system("/data/home/jsun/mhetase/FoldX/foldx5 --command=SequenceOnly --pdb=" + pdbname) indi_lst_name = "individual_list_"+str(n)+"_.txt" KA12Greadablefile.write(str(x) + " " + mut[0] + " " + mut[2:-1] + " " + mut[-1] + "\n")print(foldx_exe) | 296 | en | 0.457959 |
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from oslo_config import cfg
from oslo_log import log
import copy
import time
from conductor import service
# from conductor.solver.optimizer import decision_path as dpath
# from conductor.solver.optimizer import best_first
# from conductor.solver.optimizer import greedy
from conductor.solver.optimizer import fit_first
from conductor.solver.optimizer import random_pick
from conductor.solver.request import demand
from conductor.solver.triage_tool.triage_data import TriageData
LOG = log.getLogger(__name__)
CONF = cfg.CONF
SOLVER_OPTS = [
]
CONF.register_opts(SOLVER_OPTS, group='solver')
class Optimizer(object):
# FIXME(gjung): _requests should be request (no underscore, one item)
def __init__(self, conf, _requests=None, _begin_time=None):
self.conf = conf
# start time of solving the plan
if _begin_time is not None:
self._begin_time = _begin_time
# self.search = greedy.Greedy(self.conf)
self.search = None
# self.search = best_first.BestFirst(self.conf)
if _requests is not None:
self.requests = _requests
# Were the 'simulators' ever used? It doesn't look like this.
# Since solver/simulator code needs cleansing before being moved to ONAP,
# I see no value for having this piece of code which is not letting us do
# that cleanup. Also, Shankar has confirmed solver/simulators folder needs
# to go away. Commenting out for now - may be should be removed permanently.
# Shankar (TODO).
# else:
# ''' for simulation '''
# req_sim = request_simulator.RequestSimulator(self.conf)
# req_sim.generate_requests()
# self.requests = req_sim.requests
def get_solution(self, num_solutions):
LOG.debug("search start for max {} solutions".format(num_solutions))
for rk in self.requests:
request = self.requests[rk]
LOG.debug("--- request = {}".format(rk))
decision_list = list()
LOG.debug("1. sort demands")
demand_list = self._sort_demands(request)
for d in demand_list:
LOG.debug(" demand = {}".format(d.name))
LOG.debug("2. search")
rand_counter = 10
while num_solutions == 'all' or num_solutions > 0:
LOG.debug("searching for the solution {}".format(len(decision_list) + 1))
st = time.time()
_copy_demand_list = copy.deepcopy(demand_list)
if not request.objective.goal:
LOG.debug("No objective function is provided. "
"Random pick algorithm is used")
self.search = random_pick.RandomPick(self.conf)
best_path = self.search.search(demand_list, request)
else:
LOG.debug("Fit first algorithm is used")
self.search = fit_first.FitFirst(self.conf)
best_path = self.search.search(demand_list,
request.objective, request)
LOG.debug("search delay = {} sec".format(time.time() - st))
demand_list = copy.deepcopy(_copy_demand_list)
if best_path is not None:
self.search.print_decisions(best_path)
rand_counter = 10
elif not request.objective.goal and rand_counter > 0 and self._has_candidates(request):
# RandomPick gave no candidates after applying constraints. If there are any candidates left
# lets' try again several times until some solution is found. When one of the demands is not unique
# it persists in the list all the time. In order to prevent infinite loop we need to have counter
rand_counter -= 1
LOG.debug("Incomplete random solution - repeat {}".format(rand_counter))
continue
else:
LOG.debug("no solution found")
break
# add the current solution to decision_list
decision_list.append(best_path.decisions)
#remove the candidate with "uniqueness = true"
self._remove_unique_candidate(request, best_path, demand_list)
if num_solutions != 'all':
num_solutions -= 1
self.search.triageSolver.getSolution(decision_list)
return decision_list
def _has_candidates(self, request):
for demand_name, demand in request.demands.items():
LOG.debug("Req Available resources: {} {}".format(demand_name, len(request.demands[demand_name].resources)))
if len(demand.resources) == 0:
LOG.debug("No more candidates for demand {}".format(demand_name))
return False
return True
def _remove_unique_candidate(self, _request, current_decision, demand_list):
# This method is to remove previous solved/used candidate from consideration
# when Conductor needs to provide multiple solutions to the user/client
for demand_name, candidate_attr in current_decision.decisions.items():
candidate_uniqueness = candidate_attr.get('uniqueness')
if candidate_uniqueness and candidate_uniqueness == 'true':
# if the candidate uniqueness is 'false', then remove
# that solved candidate from the translated candidates list
_request.demands[demand_name].resources.pop(candidate_attr.get('candidate_id'))
# update the demand_list
for demand in demand_list:
if(getattr(demand, 'name') == demand_name):
demand.resources = _request.demands[demand_name].resources
def _sort_demands(self, _request):
LOG.debug(" _sort_demands")
demand_list = []
# first, find loc-demand dependencies
# using constraints and objective functions
open_demand_list = []
for key in _request.constraints:
c = _request.constraints[key]
if c.constraint_type == "access_distance":
for dk in c.demand_list:
if _request.demands[dk].sort_base != 1:
_request.demands[dk].sort_base = 1
open_demand_list.append(_request.demands[dk])
for op in _request.objective.operand_list:
if op.function.func_type == "latency_between": #TODO do i need to include the region_group here?
if isinstance(op.function.loc_a, demand.Location):
if _request.demands[op.function.loc_z.name].sort_base != 1:
_request.demands[op.function.loc_z.name].sort_base = 1
open_demand_list.append(op.function.loc_z)
elif isinstance(op.function.loc_z, demand.Location):
if _request.demands[op.function.loc_a.name].sort_base != 1:
_request.demands[op.function.loc_a.name].sort_base = 1
open_demand_list.append(op.function.loc_a)
elif op.function.func_type == "distance_between":
if isinstance(op.function.loc_a, demand.Location):
if _request.demands[op.function.loc_z.name].sort_base != 1:
_request.demands[op.function.loc_z.name].sort_base = 1
open_demand_list.append(op.function.loc_z)
elif isinstance(op.function.loc_z, demand.Location):
if _request.demands[op.function.loc_a.name].sort_base != 1:
_request.demands[op.function.loc_a.name].sort_base = 1
open_demand_list.append(op.function.loc_a)
if len(open_demand_list) == 0:
init_demand = self._exist_not_sorted_demand(_request.demands)
open_demand_list.append(init_demand)
# second, find demand-demand dependencies
while True:
d_list = self._get_depended_demands(open_demand_list, _request)
for d in d_list:
demand_list.append(d)
init_demand = self._exist_not_sorted_demand(_request.demands)
if init_demand is None:
break
open_demand_list.append(init_demand)
return demand_list
def _get_depended_demands(self, _open_demand_list, _request):
demand_list = []
while True:
if len(_open_demand_list) == 0:
break
d = _open_demand_list.pop(0)
if d.sort_base != 1:
d.sort_base = 1
demand_list.append(d)
for key in _request.constraints:
c = _request.constraints[key]
# FIXME(snarayanan): "aic" only to be known by conductor-data
if c.constraint_type == "aic_distance":
if d.name in c.demand_list:
for dk in c.demand_list:
if dk != d.name and \
_request.demands[dk].sort_base != 1:
_request.demands[dk].sort_base = 1
_open_demand_list.append(
_request.demands[dk])
for op in _request.objective.operand_list:
if op.function.func_type == "latency_between": #TODO
if op.function.loc_a.name == d.name:
if op.function.loc_z.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_z.name].sort_base != 1:
_request.demands[
op.function.loc_z.name].sort_base = 1
_open_demand_list.append(op.function.loc_z)
elif op.function.loc_z.name == d.name:
if op.function.loc_a.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_a.name].sort_base != 1:
_request.demands[
op.function.loc_a.name].sort_base = 1
_open_demand_list.append(op.function.loc_a)
elif op.function.func_type == "distance_between":
if op.function.loc_a.name == d.name:
if op.function.loc_z.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_z.name].sort_base != 1:
_request.demands[
op.function.loc_z.name].sort_base = 1
_open_demand_list.append(op.function.loc_z)
elif op.function.loc_z.name == d.name:
if op.function.loc_a.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_a.name].sort_base != 1:
_request.demands[
op.function.loc_a.name].sort_base = 1
_open_demand_list.append(op.function.loc_a)
return demand_list
def _exist_not_sorted_demand(self, _demands):
not_sorted_demand = None
for key in _demands:
demand = _demands[key]
if demand.sort_base != 1:
not_sorted_demand = demand
break
return not_sorted_demand
| conductor/conductor/solver/optimizer/optimizer.py | 12,731 | ------------------------------------------------------------------------- Copyright (c) 2015-2017 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------- from conductor.solver.optimizer import decision_path as dpath from conductor.solver.optimizer import best_first from conductor.solver.optimizer import greedy FIXME(gjung): _requests should be request (no underscore, one item) start time of solving the plan self.search = greedy.Greedy(self.conf) self.search = best_first.BestFirst(self.conf) Were the 'simulators' ever used? It doesn't look like this. Since solver/simulator code needs cleansing before being moved to ONAP, I see no value for having this piece of code which is not letting us do that cleanup. Also, Shankar has confirmed solver/simulators folder needs to go away. Commenting out for now - may be should be removed permanently. Shankar (TODO). else: ''' for simulation ''' req_sim = request_simulator.RequestSimulator(self.conf) req_sim.generate_requests() self.requests = req_sim.requests RandomPick gave no candidates after applying constraints. If there are any candidates left lets' try again several times until some solution is found. When one of the demands is not unique it persists in the list all the time. In order to prevent infinite loop we need to have counter add the current solution to decision_listremove the candidate with "uniqueness = true" This method is to remove previous solved/used candidate from consideration when Conductor needs to provide multiple solutions to the user/client if the candidate uniqueness is 'false', then remove that solved candidate from the translated candidates list update the demand_list first, find loc-demand dependencies using constraints and objective functionsTODO do i need to include the region_group here? second, find demand-demand dependencies FIXME(snarayanan): "aic" only to be known by conductor-dataTODO | 2,494 | en | 0.831059 |
#
# Copyright 2020 by 0x7c2, Simon Brecht.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from templates import check
import func
class check_performance_ispredundancy(check):
page = "Health.Firewall"
category = "Information"
title = "ISP Redundancy"
isFirewall = True
isManagement = False
minVersion = 8020
command = "cpstat fw | grep -A5 'ISP link table' | grep '|'"
isCommand = True
def run_check(self):
for line in self.commandOut:
fields = line.split('|')
ispname = fields[1]
ispstatus = fields[2]
isprole = fields[3]
if ispname != "Name":
ipstatus = "WARN"
if ispstatus == "OK":
state = "PASS"
self.add_result(self.title + " (Name: " + ispname + ")", state, "Role: " + isprole)
else:
self.add_result(self.title, "PASS", "disabled")
class check_performance_securexl_sum(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat | grep -v Template"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split('|')
if len(data) < 4 or data[1].replace(" ","") == "" or data[1].replace(" ","") == "Id":
continue
id = data[1].replace(" ", "")
type = data[2].replace(" ", "")
status = data[3].replace(" ", "")
if status != "enabled":
state = "WARN"
else:
state = "PASS"
feature = True
self.add_result(self.title + " (Instance: " + id + ", Name: " + type + ", Status: " + status + ")", state, "")
class check_performance_securexl_templates(check):
page = "Health.SecureXL"
category = "Templates"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat| grep Templates | sed s/\ \ */\/g| sed s/Templates//g"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
if "disabled" in data[1]:
state = "WARN"
if "enabled" in data[1]:
state = "PASS"
self.add_result(self.title + " (" + data[0] + " Templates)", state, data[1])
class check_performance_securexl_statistics(check):
page = "Health.SecureXL"
category = "Statistics"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stats -s | sed 's/ */ /g' | sed 's/\t/ /g'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "PASS"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
field = data[0].strip(' ')
valraw = data[1].strip(' ').split(" ")
valnum = valraw[0]
valper = int(str(valraw[1]).replace('(','').replace(')','').replace('%',''))
if "Accelerated conns" in field and valper < 30:
state = "WARN"
if "Accelerated pkts" in field and valper < 50:
state = "WARN"
if "F2Fed" in field and valper > 40:
state = "FAIL"
self.add_result(self.title + " (" + field + ")", state, valnum + "(" + str(valper) + "%)")
class check_performance_vpn_accel(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL VPN Acceleration"
isFirewall = True
isManagement = False
minVersion = 8020
command = "vpn accel stat"
isCommand = True
def run_check(self):
found = False
for line in self.commandErr:
if "acceleration is enabled" in line:
self.add_result(self.title, 'PASS', line.strip())
found = True
if not found:
self.add_result(self.title, 'FAIL', str(self.commandOut) + str(self.commandErr))
| performance.py | 3,889 | Copyright 2020 by 0x7c2, Simon Brecht. All rights reserved. This file is part of the Report/Analytic Tool - CPme, and is released under the "Apache License 2.0". Please see the LICENSE file that should have been included as part of this package. | 245 | en | 0.966441 |
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class BlendStats(SwigPyObject):
""" Contains the statistics for blend state binds in a frame. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
calls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many function calls were made."""
nulls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were unbound."""
redundants = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many calls made no change due to the existing bind being identical."""
sets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were bound."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| _pycharm_skeletons/renderdoc/BlendStats.py | 2,222 | Contains the statistics for blend state binds in a frame.
Return self==value.
Return self>=value.
Return self>value.
Return hash(self).
Return self<=value.
Return self<value.
Return self!=value.
Create and return a new object. See help(type) for accurate signature.
encoding: utf-8 module renderdoc from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd by generator 1.146 no doc imports real signature unknown real signature unknown real signature unknown real signature unknown real signature unknown real signature unknown real signature unknown known case of __new__ real signature unknown real signature unknown default default default default default default (!) real value is '' | 698 | en | 0.527344 |
from __future__ import division
from __future__ import print_function
from models.pytorch.pna.layer import PNALayer
from multitask_benchmark.util.train import execute_train, build_arg_parser
# Training settings
parser = build_arg_parser()
parser.add_argument('--self_loop', action='store_true', default=False, help='Whether to add self loops in aggregators')
parser.add_argument('--towers', type=int, default=4, help='Number of towers in MPNN layers')
parser.add_argument('--aggregation', type=str, default='sum', help='Type of aggregation')
parser.add_argument('--pretrans_layers', type=int, default=1, help='Number of MLP layers before aggregation')
parser.add_argument('--posttrans_layers', type=int, default=1, help='Number of MLP layers after aggregation')
args = parser.parse_args()
# The MPNNs can be considered a particular case of PNA networks with a single aggregator and no scalers (identity)
execute_train(gnn_args=dict(nfeat=None,
nhid=args.hidden,
nodes_out=None,
graph_out=None,
dropout=args.dropout,
device=None,
first_conv_descr=dict(layer_type=PNALayer,
args=dict(
aggregators=[args.aggregation],
scalers=['identity'], avg_d=None,
towers=args.towers,
self_loop=args.self_loop,
divide_input=False,
pretrans_layers=args.pretrans_layers,
posttrans_layers=args.posttrans_layers
)),
middle_conv_descr=dict(layer_type=PNALayer,
args=dict(
aggregators=[args.aggregation],
scalers=['identity'],
avg_d=None, towers=args.towers,
self_loop=args.self_loop,
divide_input=True,
pretrans_layers=args.pretrans_layers,
posttrans_layers=args.posttrans_layers
)),
fc_layers=args.fc_layers,
conv_layers=args.conv_layers,
skip=args.skip,
gru=args.gru,
fixed=args.fixed,
variable=args.variable), args=args)
| multitask_benchmark/train/mpnn.py | 3,036 | Training settings The MPNNs can be considered a particular case of PNA networks with a single aggregator and no scalers (identity) | 130 | en | 0.871023 |
import pytest
from spacy import displacy
from spacy.displacy.render import DependencyRenderer, EntityRenderer
from spacy.lang.fa import Persian
from spacy.tokens import Span, Doc
def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "", "kb_url": "#"}
]
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "#"}
]
def test_displacy_parse_ents_with_kb_id_options(en_vocab):
"""Test that named entities with kb_id on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(
doc, {"kb_url_template": "https://www.wikidata.org/wiki/{}"}
)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{
"start": 4,
"end": 10,
"label": "ORG",
"kb_id": "Q95",
"kb_url": "https://www.wikidata.org/wiki/Q95",
}
]
def test_displacy_parse_deps(en_vocab):
"""Test that deps and tags on a Doc are converted into displaCy's format."""
words = ["This", "is", "a", "sentence"]
heads = [1, 1, 3, 1]
pos = ["DET", "VERB", "DET", "NOUN"]
tags = ["DT", "VBZ", "DT", "NN"]
deps = ["nsubj", "ROOT", "det", "attr"]
doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
deps = displacy.parse_deps(doc)
assert isinstance(deps, dict)
assert deps["words"] == [
{"lemma": None, "text": words[0], "tag": pos[0]},
{"lemma": None, "text": words[1], "tag": pos[1]},
{"lemma": None, "text": words[2], "tag": pos[2]},
{"lemma": None, "text": words[3], "tag": pos[3]},
]
assert deps["arcs"] == [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": 2, "end": 3, "label": "det", "dir": "left"},
{"start": 1, "end": 3, "label": "attr", "dir": "right"},
]
def test_displacy_invalid_arcs():
renderer = DependencyRenderer()
words = [{"text": "This", "tag": "DET"}, {"text": "is", "tag": "VERB"}]
arcs = [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": -1, "end": 2, "label": "det", "dir": "left"},
]
with pytest.raises(ValueError):
renderer.render([{"words": words, "arcs": arcs}])
def test_displacy_spans(en_vocab):
"""Test that displaCy can render Spans."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc[1:4], style="ent")
assert html.startswith("<div")
def test_displacy_raises_for_wrong_type(en_vocab):
with pytest.raises(ValueError):
displacy.render("hello world")
def test_displacy_rtl():
# Source: http://www.sobhe.ir/hazm/ – is this correct?
words = ["ما", "بسیار", "کتاب", "می\u200cخوانیم"]
# These are (likely) wrong, but it's just for testing
pos = ["PRO", "ADV", "N_PL", "V_SUB"] # needs to match lang.fa.tag_map
deps = ["foo", "bar", "foo", "baz"]
heads = [1, 0, 3, 1]
nlp = Persian()
doc = Doc(nlp.vocab, words=words, tags=pos, heads=heads, deps=deps)
doc.ents = [Span(doc, 1, 3, label="TEST")]
html = displacy.render(doc, page=True, style="dep")
assert "direction: rtl" in html
assert 'direction="rtl"' in html
assert f'lang="{nlp.lang}"' in html
html = displacy.render(doc, page=True, style="ent")
assert "direction: rtl" in html
assert f'lang="{nlp.lang}"' in html
def test_displacy_render_wrapper(en_vocab):
"""Test that displaCy accepts custom rendering wrapper."""
def wrapper(html):
return "TEST" + html + "TEST"
displacy.set_render_wrapper(wrapper)
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc, style="ent")
assert html.startswith("TEST<div")
assert html.endswith("/div>TEST")
# Restore
displacy.set_render_wrapper(lambda html: html)
def test_displacy_options_case():
ents = ["foo", "BAR"]
colors = {"FOO": "red", "bar": "green"}
renderer = EntityRenderer({"ents": ents, "colors": colors})
text = "abcd"
labels = ["foo", "bar", "FOO", "BAR"]
spans = [{"start": i, "end": i + 1, "label": labels[i]} for i in range(len(text))]
result = renderer.render_ents("abcde", spans, None).split("\n\n")
assert "red" in result[0] and "foo" in result[0]
assert "green" in result[1] and "bar" in result[1]
assert "red" in result[2] and "FOO" in result[2]
assert "green" in result[3] and "BAR" in result[3]
| spacy/tests/test_displacy.py | 5,520 | Test that deps and tags on a Doc are converted into displaCy's format.
Test that named entities on a Doc are converted into displaCy's format.
Test that named entities with kb_id on a Doc are converted into displaCy's format.
Test that displaCy accepts custom rendering wrapper.
Test that displaCy can render Spans.
Source: http://www.sobhe.ir/hazm/ – is this correct? These are (likely) wrong, but it's just for testing needs to match lang.fa.tag_map Restore | 461 | en | 0.844372 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
:param id: Resource Id
:type id: str
:param description: Gets or sets a description for this rule. Restricted
to 140 chars.
:type description: str
:param protocol: Gets or sets Network protocol this rule applies to. Can
be Tcp, Udp or All(*). Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or :class:`SecurityRuleProtocol
<azure.mgmt.network.models.SecurityRuleProtocol>`
:param source_port_range: Gets or sets Source Port or Range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type source_port_range: str
:param destination_port_range: Gets or sets Destination Port or Range.
Integer or range between 0 and 65535. Asterix '*' can also be used to
match all ports.
:type destination_port_range: str
:param source_address_prefix: Gets or sets source address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used. If this is an ingress rule, specifies where
network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: Gets or sets destination address
prefix. CIDR or source IP range. Asterix '*' can also be used to match
all source IPs. Default tags such as 'VirtualNetwork',
'AzureLoadBalancer' and 'Internet' can also be used.
:type destination_address_prefix: str
:param access: Gets or sets network traffic is allowed or denied.
Possible values are 'Allow' and 'Deny'. Possible values include:
'Allow', 'Deny'
:type access: str or :class:`SecurityRuleAccess
<azure.mgmt.network.models.SecurityRuleAccess>`
:param priority: Gets or sets the priority of the rule. The value can be
between 100 and 4096. The priority number must be unique for each rule
in the collection. The lower the priority number, the higher the
priority of the rule.
:type priority: int
:param direction: Gets or sets the direction of the rule.InBound or
Outbound. The direction specifies if rule will be evaluated on incoming
or outcoming traffic. Possible values include: 'Inbound', 'Outbound'
:type direction: str or :class:`SecurityRuleDirection
<azure.mgmt.network.models.SecurityRuleDirection>`
:param provisioning_state: Gets provisioning state of the PublicIP
resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, source_address_prefix, destination_address_prefix, access, direction, id=None, description=None, source_port_range=None, destination_port_range=None, priority=None, provisioning_state=None, name=None, etag=None):
super(SecurityRule, self).__init__(id=id)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| azure-mgmt-network/azure/mgmt/network/models/security_rule.py | 5,419 | Network security rule.
:param id: Resource Id
:type id: str
:param description: Gets or sets a description for this rule. Restricted
to 140 chars.
:type description: str
:param protocol: Gets or sets Network protocol this rule applies to. Can
be Tcp, Udp or All(*). Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or :class:`SecurityRuleProtocol
<azure.mgmt.network.models.SecurityRuleProtocol>`
:param source_port_range: Gets or sets Source Port or Range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type source_port_range: str
:param destination_port_range: Gets or sets Destination Port or Range.
Integer or range between 0 and 65535. Asterix '*' can also be used to
match all ports.
:type destination_port_range: str
:param source_address_prefix: Gets or sets source address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used. If this is an ingress rule, specifies where
network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: Gets or sets destination address
prefix. CIDR or source IP range. Asterix '*' can also be used to match
all source IPs. Default tags such as 'VirtualNetwork',
'AzureLoadBalancer' and 'Internet' can also be used.
:type destination_address_prefix: str
:param access: Gets or sets network traffic is allowed or denied.
Possible values are 'Allow' and 'Deny'. Possible values include:
'Allow', 'Deny'
:type access: str or :class:`SecurityRuleAccess
<azure.mgmt.network.models.SecurityRuleAccess>`
:param priority: Gets or sets the priority of the rule. The value can be
between 100 and 4096. The priority number must be unique for each rule
in the collection. The lower the priority number, the higher the
priority of the rule.
:type priority: int
:param direction: Gets or sets the direction of the rule.InBound or
Outbound. The direction specifies if rule will be evaluated on incoming
or outcoming traffic. Possible values include: 'Inbound', 'Outbound'
:type direction: str or :class:`SecurityRuleDirection
<azure.mgmt.network.models.SecurityRuleDirection>`
:param provisioning_state: Gets provisioning state of the PublicIP
resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated
:type etag: str
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 3,073 | en | 0.668274 |
# Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements DPQuery interface for Gaussian average queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.dp_query import dp_query
from tensorflow_privacy.privacy.dp_query import normalized_query
class GaussianSumQuery(dp_query.SumAggregationDPQuery):
"""Implements DPQuery interface for Gaussian sum queries.
Accumulates clipped vectors, then adds Gaussian noise to the sum.
"""
# pylint: disable=invalid-name
_GlobalState = collections.namedtuple(
'_GlobalState', ['l2_norm_clip', 'stddev'])
def __init__(self, l2_norm_clip, stddev):
"""Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
"""
self._l2_norm_clip = l2_norm_clip
self._stddev = stddev
self._ledger = None
def set_ledger(self, ledger):
self._ledger = ledger
def make_global_state(self, l2_norm_clip, stddev):
"""Creates a global state from the given parameters."""
return self._GlobalState(tf.cast(l2_norm_clip, tf.float32),
tf.cast(stddev, tf.float32))
def initial_global_state(self):
return self.make_global_state(self._l2_norm_clip, self._stddev)
def derive_sample_params(self, global_state):
return global_state.l2_norm_clip
def initial_sample_state(self, template):
return tf.nest.map_structure(
dp_query.zeros_like, template)
def preprocess_record_impl(self, params, record):
"""Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
"""
l2_norm_clip = params
record_as_list = tf.nest.flatten(record)
clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
return tf.nest.pack_sequence_as(record, clipped_as_list), norm
def preprocess_record(self, params, record):
preprocessed_record, _ = self.preprocess_record_impl(params, record)
return preprocessed_record
def get_noised_result(self, sample_state, global_state):
"""See base class."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev)
else:
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):
return v + random_normal(tf.shape(input=v))
if self._ledger:
dependencies = [
self._ledger.record_sum_query(
global_state.l2_norm_clip, global_state.stddev)
]
else:
dependencies = []
with tf.control_dependencies(dependencies):
return tf.nest.map_structure(add_noise, sample_state), global_state
class GaussianAverageQuery(normalized_query.NormalizedQuery):
"""Implements DPQuery interface for Gaussian average queries.
Accumulates clipped vectors, adds Gaussian noise, and normalizes.
Note that we use "fixed-denominator" estimation: the denominator should be
specified as the expected number of records per sample. Accumulating the
denominator separately would also be possible but would be produce a higher
variance estimator.
"""
def __init__(self,
l2_norm_clip,
sum_stddev,
denominator):
"""Initializes the GaussianAverageQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
sum_stddev: The stddev of the noise added to the sum (before
normalization).
denominator: The normalization constant (applied after noise is added to
the sum).
"""
super(GaussianAverageQuery, self).__init__(
numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev),
denominator=denominator)
| tensorflow_privacy/privacy/dp_query/gaussian_query.py | 4,855 | Implements DPQuery interface for Gaussian average queries.
Accumulates clipped vectors, adds Gaussian noise, and normalizes.
Note that we use "fixed-denominator" estimation: the denominator should be
specified as the expected number of records per sample. Accumulating the
denominator separately would also be possible but would be produce a higher
variance estimator.
Implements DPQuery interface for Gaussian sum queries.
Accumulates clipped vectors, then adds Gaussian noise to the sum.
Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
Initializes the GaussianAverageQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
sum_stddev: The stddev of the noise added to the sum (before
normalization).
denominator: The normalization constant (applied after noise is added to
the sum).
See base class.
Creates a global state from the given parameters.
Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
Implements DPQuery interface for Gaussian average queries.
Copyright 2018, The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=invalid-name | 2,017 | en | 0.823089 |
class NumArray:
# O(n) time | O(n) space - where n is the length of the input list
def __init__(self, nums: List[int]):
self.nums = []
currentSum = 0
for num in nums:
currentSum += num
self.nums.append(currentSum)
# O(1) time to look up the nums list
def sumRange(self, left: int, right: int) -> int:
if left > 0:
return self.nums[right] - self.nums[left - 1]
else:
return self.nums[right] | dynamicProgramming/303_range_sum_query_immutable.py | 493 | O(n) time | O(n) space - where n is the length of the input list O(1) time to look up the nums list | 99 | en | 0.691362 |
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: otp.launcher.DownloadWatcher
from direct.task import Task
from otp.otpbase import OTPLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
class DownloadWatcher(DirectObject):
__module__ = __name__
def __init__(self, phaseNames):
self.phaseNames = phaseNames
self.text = DirectLabel(relief=None, guiId='DownloadWatcherText', pos=(-0.96, 0, -0.91), text=OTPLocalizer.DownloadWatcherInitializing, text_fg=(1,
1,
1,
1), text_scale=0.05, textMayChange=1, text_align=TextNode.ALeft, sortOrder=50)
self.bar = DirectWaitBar(guiId='DownloadWatcherBar', pos=(-0.81, 0, -0.96), relief=DGG.SUNKEN, frameSize=(-0.6, 0.6, -0.1, 0.1), borderWidth=(0.02,
0.02), scale=0.25, range=100, sortOrder=50, frameColor=(0.5,
0.5,
0.5,
0.5), barColor=(0.2,
0.7,
0.2,
0.5), text='0%', text_scale=0.16, text_fg=(1,
1,
1,
1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.accept('launcherPercentPhaseComplete', self.update)
return
def update(self, phase, percent, reqByteRate, actualByteRate):
phaseName = self.phaseNames[phase]
self.text['text'] = OTPLocalizer.DownloadWatcherUpdate % phaseName
self.bar['text'] = '%s %%' % percent
self.bar['value'] = percent
def cleanup(self):
self.text.destroy()
self.bar.destroy()
self.ignoreAll() | otp/launcher/DownloadWatcher.py | 4,283 | uncompyle6 version 3.2.0 Python bytecode 2.4 (62061) Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] Embedded file name: otp.launcher.DownloadWatcher | 204 | en | 0.614599 |
from conans import ConanFile, CMake
class LibB(ConanFile):
name = "libB"
version = "0.0"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
scm = {"type": "git",
"url": "auto",
"revision": "auto"}
exports_sources = "LICENSE" # to avoid build info bug
def requirements(self):
self.requires("libA/[>=0.0]@demo/testing")
self.requires("libF/0.0@demo/testing")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
def package(self):
self.copy("LICENSE", dst="licenses")
def package_info(self):
self.cpp_info.libs = ["libB",]
| conanfile.py | 788 | to avoid build info bug | 23 | en | 0.612715 |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion registry classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import os
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.domain import html_validation_service
from core.domain import question_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class MockInvalidSuggestion(suggestion_registry.BaseSuggestion):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseSuggestionUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseSuggestion class."""
def setUp(self):
super(BaseSuggestionUnitTests, self).setUp()
self.base_suggestion = MockInvalidSuggestion()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement accept.'):
self.base_suggestion.accept()
def test_base_class_get_change_list_for_accepting_suggestion_raises_error(
self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement '
'get_change_list_for_accepting_suggestion.'):
self.base_suggestion.get_change_list_for_accepting_suggestion()
def test_base_class_pre_accept_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_accept_validate.'):
self.base_suggestion.pre_accept_validate()
def test_base_class_populate_old_value_of_change_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' populate_old_value_of_change.'):
self.base_suggestion.populate_old_value_of_change()
def test_base_class_pre_update_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_update_validate.'):
self.base_suggestion.pre_update_validate({})
def test_base_class_get_all_html_content_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_all_html_content_strings.'):
self.base_suggestion.get_all_html_content_strings()
def test_base_class_get_target_entity_html_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_target_entity_html_strings.'):
self.base_suggestion.get_target_entity_html_strings()
def test_base_class_convert_html_in_suggestion_change(self):
def conversion_fn():
"""Temporary function."""
pass
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' convert_html_in_suggestion_change.'):
self.base_suggestion.convert_html_in_suggestion_change(
conversion_fn)
class SuggestionEditStateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionEditStateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': 'new suggestion content',
'old_value': None
},
'score_category': 'content.Algebra',
'language_code': None,
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'content')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be content'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be edit_state_property'
):
suggestion.validate()
def test_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.property_name = 'invalid_property'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected property_name to be content'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be None, received wrong_language_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_populate_old_value_of_change_with_invalid_state(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.state_name = 'invalid_state_name'
self.assertIsNone(suggestion.change.old_value)
suggestion.populate_old_value_of_change()
self.assertIsNone(suggestion.change.old_value)
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: new_value, '
'old_value, property_name'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change property_name must be equal to content'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'invalid_state',
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to state_1'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_new_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
new_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion html</p>').to_dict()
suggestion.change.new_value = new_content
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': new_content,
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError, 'The new html must not match the old html'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_non_equal_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to edit_state_property'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}))
def test_get_all_html_content_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [u'new suggestion content']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>suggestion</p>'
},
'old_value': {
'content_id': 'content',
'html': html_content
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.
add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.old_value['html'], expected_html_content)
def test_get_target_entity_html_strings_returns_expected_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': {
'content_id': 'content',
'html': 'Old content.'
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [u'Old content.']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_with_none_old_value(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
self.assertEqual(actual_outcome_list, [])
class SuggestionTranslateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionTranslateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
},
'score_category': 'translation.Algebra',
'language_code': 'hi',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_pre_update_validate_fails_for_invalid_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'Introduction'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to %s' % (
exp_domain.CMD_ADD_WRITTEN_TRANSLATION)
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to Introduction'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_language_code(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The language code must be equal to hi'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_content_html(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is the changed content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change content_html must be equal to <p>This is a ' +
'content.</p>'
):
suggestion.pre_update_validate(
exp_domain.ExplorationChange(change))
def test_create_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'translation')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be translation'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be add_written_translation'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_language_code = (
expected_suggestion_dict['change']['language_code']
)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be %s, '
'received wrong_language_code' % expected_language_code
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError, 'language_code cannot be None'
):
suggestion.validate()
def test_validate_change_with_invalid_language_code_fails_validation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.language_code = 'invalid_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_content_html(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.content_html = 'invalid content_html'
with self.assertRaisesRegexp(
utils.ValidationError,
'The Exploration content has changed since this translation '
'was submitted.'
):
suggestion.pre_accept_validate()
def test_accept_suggestion_adds_translation_in_exploration(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_accept_suggestion_with_psedonymous_author_adds_translation(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.PSEUDONYMOUS_ID,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_get_all_html_content_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'<p>This is translated html.</p>', u'<p>This is a content.</p>']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_strings_returns_expected_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [self.suggestion_dict['change']['content_html']]
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': html_content,
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
}
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.content_html, expected_html_content)
class SuggestionAddQuestionTest(test_utils.GenericTestBase):
"""Tests for the SuggestionAddQuestion class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionAddQuestionTest, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.topic_1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_add_question(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'question')
self.assertEqual(suggestion.get_score_sub_type(), 'topic_1')
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'content.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be "question"'
):
suggestion.validate()
def test_validate_change_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = 'invalid_change'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change to be an instance of QuestionSuggestionChange'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain cmd'
):
suggestion.validate()
def test_validate_change_cmd_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected cmd to be create_new_fully_specified_question'
):
suggestion.validate()
def test_validate_change_question_dict(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.question_dict = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain question_dict'
):
suggestion.validate()
def test_validate_change_question_state_data_schema_version(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
# We are not setting value in suggestion.change.question_dict
# directly since pylint produces unsupported-assignment-operation
# error. The detailed analysis for the same can be checked
# in this issue: https://github.com/oppia/oppia/issues/7008.
question_dict = suggestion.change.question_dict
question_dict['question_state_data_schema_version'] = 0
suggestion.change.question_dict = question_dict
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question state schema version to be %s, '
'received 0' % feconf.CURRENT_STATE_SCHEMA_VERSION
):
suggestion.validate()
def test_validate_change_skill_difficulty_none(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_difficulty'
):
suggestion.validate()
def test_validate_change_skill_difficulty_invalid_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = 0.4
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change skill_difficulty to be one of '
):
suggestion.validate()
def test_pre_accept_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_id'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_change_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError, 'The skill with the given id doesn\'t exist.'
):
suggestion.pre_accept_validate()
def test_get_change_list_for_accepting_suggestion(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.get_change_list_for_accepting_suggestion())
def test_populate_old_value_of_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.populate_old_value_of_change())
def test_cannot_accept_suggestion_with_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError,
'The skill with the given id doesn\'t exist.'
):
suggestion.accept('commit message')
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': question_domain.QUESTION_PROPERTY_LANGUAGE_CODE,
'new_value': 'bn',
'old_value': 'en'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to '
'create_new_fully_specified_question'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_2'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change skill_id must be equal to skill_1'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_complains_if_nothing_changed(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
with self.assertRaisesRegexp(
utils.ValidationError,
'At least one of the new skill_difficulty or question_dict '
'should be changed.'):
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change))
def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only(
self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.6
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_pre_update_validate_accepts_a_change_in_state_data_only(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'hi',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_question_dict = (
expected_suggestion_dict['change']['question_dict']
)
suggestion.validate()
expected_question_dict['language_code'] = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question language_code.wrong_language_code. to be same '
'as suggestion language_code.en.'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be en, received None'):
suggestion.validate()
def test_get_all_html_conztent_strings(self):
suggestion = suggestion_registry.SuggestionAddQuestion(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'', u'<p>This is a hint.</p>', u'<p>This is a solution.</p>', u'']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group = {
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': ''
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 0
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': html_content
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'written_translations': {
'translations_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': [{
'html': 'option 1',
'content_id': 'ca_choices_0'
}]
},
'showChoicesInShuffledOrder': {
'value': True
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'answer_is_exclusive': False,
'correct_answer': 0,
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>'
}
},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.question_dict['question_state_data']['content'][
'html'], expected_html_content)
def test_accept_suggestion_with_images(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;img.svg&quot;}">'
'</oppia-noninteractive-math>')
question_state_dict = self._create_valid_question_data(
'default_state').to_dict()
question_state_dict['content']['html'] = html_content
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
image_context = feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS
fs_services.save_original_and_compressed_versions_of_image(
'img.svg', image_context, 'skill1',
raw_image, 'image', False)
self.save_new_skill('skill1', self.author_id, description='description')
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept('commit_message')
def test_contructor_updates_state_shema_in_change_cmd(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], 27)
suggestion = suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1, suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, change, score_category, 'en', False,
self.fake_date)
self.assertEqual(
suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_contructor_raise_exception_for_invalid_state_shema_version(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': None,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], None)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected state schema version to be in between 25'
):
suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1,
suggestion_models.STATUS_IN_REVIEW, self.author_id, None,
change, score_category, 'en', False, self.fake_date)
class MockInvalidVoiceoverApplication(
suggestion_registry.BaseVoiceoverApplication):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseVoiceoverApplicationUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseVoiceoverApplication class."""
def setUp(self):
super(BaseVoiceoverApplicationUnitTests, self).setUp()
self.base_voiceover_application = MockInvalidVoiceoverApplication()
def test_base_class_init_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement '
'__init__.'):
suggestion_registry.BaseVoiceoverApplication()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement accept.'):
self.base_voiceover_application.accept()
def test_base_class_reject_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement reject.'):
self.base_voiceover_application.reject()
class ExplorationVoiceoverApplicationUnitTest(test_utils.GenericTestBase):
"""Tests for the ExplorationVoiceoverApplication class."""
def setUp(self):
super(ExplorationVoiceoverApplicationUnitTest, self).setUp()
self.signup('author@example.com', 'author')
self.author_id = self.get_user_id_from_email('author@example.com')
self.signup('reviewer@example.com', 'reviewer')
self.reviewer_id = self.get_user_id_from_email('reviewer@example.com')
self.voiceover_application = (
suggestion_registry.ExplorationVoiceoverApplication(
'application_id', 'exp_id', suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, 'en', 'audio_file.mp3', '<p>Content</p>',
None))
def test_validation_with_invalid_target_type_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_type = 'invalid_target'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices, '
'received invalid_target'
):
self.voiceover_application.validate()
def test_validation_with_invalid_target_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_status_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected status to be among allowed choices, '
'received invalid_status'
):
self.voiceover_application.validate()
def test_validation_with_invalid_author_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.author_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_final_reviewer_id_raise_exception(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 123
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be None as the '
'voiceover application is not yet handled.'
):
self.voiceover_application.validate()
def test_validation_for_handled_application_with_invalid_final_review(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
self.voiceover_application.validate()
def test_validation_for_rejected_application_with_no_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_REJECTED
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be a string for a '
'rejected application'
):
self.voiceover_application.validate()
def test_validation_for_accepted_application_with_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
self.voiceover_application.rejection_message = 'Invalid message'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be None for the accepted '
'voiceover application, received Invalid message'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_type_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected language_code to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 'invalid language'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid language'
):
self.voiceover_application.validate()
def test_validation_with_invalid_filename_type_raise_exception(self):
self.assertEqual(self.voiceover_application.filename, 'audio_file.mp3')
self.voiceover_application.validate()
self.voiceover_application.filename = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected filename to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_content_type_raise_exception(self):
self.assertEqual(self.voiceover_application.content, '<p>Content</p>')
self.voiceover_application.validate()
self.voiceover_application.content = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content to be a string'
):
self.voiceover_application.validate()
def test_to_dict_returns_correct_dict(self):
self.voiceover_application.accept(self.reviewer_id)
expected_dict = {
'voiceover_application_id': 'application_id',
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'accepted',
'author_name': 'author',
'final_reviewer_name': 'reviewer',
'language_code': 'en',
'content': '<p>Content</p>',
'filename': 'audio_file.mp3',
'rejection_message': None
}
self.assertEqual(
self.voiceover_application.to_dict(), expected_dict)
def test_is_handled_property_returns_correct_value(self):
self.assertFalse(self.voiceover_application.is_handled)
self.voiceover_application.accept(self.reviewer_id)
self.assertTrue(self.voiceover_application.is_handled)
def test_accept_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.accept(self.reviewer_id)
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'accepted')
def test_reject_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.reject(self.reviewer_id, 'rejection message')
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'rejected')
self.assertEqual(
self.voiceover_application.rejection_message, 'rejection message')
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
"""Tests for the CommunityContributionStats class."""
translation_reviewer_counts_by_lang_code = {
'hi': 0,
'en': 1
}
translation_suggestion_counts_by_lang_code = {
'fr': 6,
'en': 5
}
question_reviewer_count = 1
question_suggestion_count = 4
negative_count = -1
non_integer_count = 'non_integer_count'
sample_language_code = 'en'
invalid_language_code = 'invalid'
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def test_initial_object_with_valid_arguments_has_correct_properties(self):
community_contribution_stats = (
suggestion_registry.CommunityContributionStats(
self.translation_reviewer_counts_by_lang_code,
self.translation_suggestion_counts_by_lang_code,
self.question_reviewer_count,
self.question_suggestion_count
)
)
community_contribution_stats.validate()
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
self.translation_reviewer_counts_by_lang_code)
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
self.translation_suggestion_counts_by_lang_code
)
self.assertEqual(
community_contribution_stats.question_reviewer_count,
self.question_reviewer_count
)
self.assertEqual(
community_contribution_stats.question_suggestion_count,
self.question_suggestion_count
)
def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_get_translation_language_codes_that_need_reviewers_for_one_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {self.sample_language_code})
def test_get_translation_language_codes_that_need_reviewers_for_multi_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code('hi', 1)
stats.set_translation_suggestion_count_for_language_code('fr', 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {'hi', 'fr'})
def test_get_translation_language_codes_that_need_reviewers_for_no_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, set())
def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
self.assertTrue(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertTrue(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_question_reviewers_are_needed_if_suggestions_zero_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
self.assertTrue(stats.are_question_reviewers_needed())
def test_question_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 1
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertTrue(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(stats.are_question_reviewers_needed())
def test_validate_translation_reviewer_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_non_integer_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation reviewer counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation suggestion counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
class ReviewableSuggestionEmailInfoUnitTests(test_utils.GenericTestBase):
"""Tests for the ReviewableSuggestionEmailInfo class."""
suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION
language_code = 'en'
suggestion_content = 'sample question'
submission_datetime = datetime.datetime.utcnow()
def test_initial_object_with_valid_arguments_has_correct_properties(self):
reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
self.suggestion_type, self.language_code,
self.suggestion_content, self.submission_datetime
)
)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
self.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
self.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
self.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
self.submission_datetime)
| core/domain/suggestion_registry_test.py | 141,288 | Tests for the BaseSuggestion class.
Tests for the BaseVoiceoverApplication class.
Tests for the CommunityContributionStats class.
Tests for the ExplorationVoiceoverApplication class.
Tests for the ReviewableSuggestionEmailInfo class.
Tests for the SuggestionAddQuestion class.
Tests for the SuggestionEditStateContent class.
Tests for the SuggestionEditStateContent class.
Checks if the community contribution stats is in its default
state.
Temporary function.
Tests for suggestion registry classes.
Copyright 2018 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=import-only-modules pylint: disable=import-only-modules pylint: disable=super-init-not-called We are not setting value in suggestion.change.question_dict directly since pylint produces unsupported-assignment-operation error. The detailed analysis for the same can be checked in this issue: https://github.com/oppia/oppia/issues/7008. pylint: disable=super-init-not-called | 1,467 | en | 0.817353 |
'''
Given a string, write a function that uses recursion to output a
list of all the possible permutations of that string.
For example, given s='abc' the function should return ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: If a character is repeated, treat each occurence as distinct,
for example an input of 'xxx' would return a list with 6 "versions" of 'xxx'
'''
from nose.tools import assert_equal
def permute(s):
out = []
# Base case
if (len(s) == 1):
out = [s]
else:
# For every letter in string
for i, let in enumerate(s):
# For every permutation
for perm in permute(s[:i] + s[i + 1:]):
# Add it to the output
out += [let + perm]
return out
class TestPerm(object):
def test(self, solution):
assert_equal(sorted(solution('abc')), sorted(
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']))
assert_equal(sorted(solution('dog')), sorted(
['dog', 'dgo', 'odg', 'ogd', 'gdo', 'god']))
print('All test cases passed.')
# Run Tests
t = TestPerm()
t.test(permute)
| udemy-data-structures-and-algorithms/15-recursion/15.8_string_permutation.py | 1,127 | Given a string, write a function that uses recursion to output a
list of all the possible permutations of that string.
For example, given s='abc' the function should return ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: If a character is repeated, treat each occurence as distinct,
for example an input of 'xxx' would return a list with 6 "versions" of 'xxx'
Base case For every letter in string For every permutation Add it to the output Run Tests | 454 | en | 0.781589 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test generate RPC."""
from test_framework.test_framework import MAGATestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class RPCGenerateTest(MAGATestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
message = (
"generate\n"
"has been replaced by the -generate "
"cli option. Refer to -help for more information."
)
self.log.info("Test rpc generate raises with message to use cli option")
assert_raises_rpc_error(-32601, message, self.nodes[0].rpc.generate)
self.log.info("Test rpc generate help prints message to use cli option")
assert_equal(message, self.nodes[0].help("generate"))
self.log.info("Test rpc generate is a hidden command not discoverable in general help")
assert message not in self.nodes[0].help()
if __name__ == "__main__":
RPCGenerateTest().main()
| test/functional/rpc_generate.py | 1,182 | Test generate RPC.
!/usr/bin/env python3 Copyright (c) 2020 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. | 221 | en | 0.512774 |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| espnet2/gan_tts/espnet_model.py | 5,163 | GAN-based TTS ESPnet model.
Initialize ESPnetGANTTSModel module.
Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
GAN-based TTS ESPnet model.
Copyright 2021 Tomoki Hayashi Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) Nothing to do if torch < 1.6.0 NOQA Extract features Normalize Make batch for tts inputs Update kwargs for additional auxiliary inputs | 1,595 | en | 0.553821 |
import time
import pytest
from celery.result import GroupResult
from celery.schedules import crontab
from kombu.exceptions import EncodeError
from director import build_celery_schedule
from director.exceptions import WorkflowSyntaxError
from director.models.tasks import Task
from director.models.workflows import Workflow
KEYS = ["id", "created", "updated", "task"]
def test_execute_one_task_success(app, create_builder):
workflow, builder = create_builder("example", "WORKFLOW", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_EXAMPLE"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_EXAMPLE"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() is None
assert result.parent.get() == "task_example"
assert result.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "success"
assert task.status.value == "success"
def test_execute_one_task_error(app, create_builder):
workflow, builder = create_builder("example", "ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_ERROR"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_ERROR"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
def test_execute_chain_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.parent.parent.get() is None
assert result.parent.get() == "task_c"
assert result.parent.state == "SUCCESS"
assert result.parent.parent.get() == "task_b"
assert result.parent.parent.state == "SUCCESS"
assert result.parent.parent.parent.get() == "task_a"
assert result.parent.parent.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_chain_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_ERROR"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_ERROR"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_b = Task.query.filter_by(key="TASK_B").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_b.status.value == "success"
assert task_error.status.value == "error"
assert workflow.status.value == "error"
def test_execute_group_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == [
"TASK_B",
"TASK_C",
]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() == "task_a"
assert isinstance(result.parent, GroupResult)
assert result.parent.get() == ["task_b", "task_c"]
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_group_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == ["TASK_ERROR", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_ERROR", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
task_c = Task.query.filter_by(key="TASK_C").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_error.status.value == "error"
assert task_c.status.value == "success"
assert workflow.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_one_task(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_ONE_TASK", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.order_by(Task.created_at.asc()).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_multiple_tasks(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_MULTIPLE_TASKS", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_celery_error = Task.query.filter_by(key="TASK_CELERY_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_celery_error.status.value == "error"
assert workflow.status.value == "error"
def test_return_values(app, create_builder):
workflow, builder = create_builder("example", "RETURN_VALUES", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert tasks["INT"] == 1234
assert tasks["LIST"] == ["jack", "sape", "guido"]
assert tasks["NONE"] is None
assert tasks["DICT"] == {"foo": "bar"}
assert tasks["NESTED"] == {
"jack": 4098,
"sape": 4139,
"guido": 4127,
"nested": {"foo": "bar"},
"none": None,
"list": ["jack", "sape", "guido"],
}
def test_return_exception(app, create_builder):
workflow, builder = create_builder("example", "RETURN_EXCEPTION", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert list(tasks["TASK_ERROR"].keys()) == ["exception", "traceback"]
assert tasks["TASK_ERROR"]["exception"] == "division by zero"
assert tasks["TASK_ERROR"]["traceback"].startswith(
"Traceback (most recent call last)"
)
assert "ZeroDivisionError: division by zero" in tasks["TASK_ERROR"]["traceback"]
def test_build_celery_schedule_float_with_payload():
float_schedule = {"payload": {}, "schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
def test_build_celery_schedule_float():
float_schedule = {"schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_week="*", day_of_month="*", month_of_year="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_week="*", day_of_month="*", month_of_year="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_week="1", day_of_month="*", month_of_year="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_week="*", day_of_month="1", month_of_year="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_week="*", day_of_month="*", month_of_year="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_week="*/12", day_of_month="*/13", month_of_year="*/14")
)
]
)
def test_build_celery_schedule_crontab(test_input, expected):
cron_schedule = {"schedule": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_interval():
float_schedule = {"interval": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_month="*", month_of_year="*", day_of_week="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_month="*", month_of_year="*", day_of_week="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_month="1", month_of_year="*", day_of_week="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_month="*", month_of_year="1", day_of_week="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_month="*", month_of_year="*", day_of_week="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_month="*/12", month_of_year="*/13", day_of_week="*/14")
)
]
)
def test_build_celery_crontab(test_input, expected):
cron_schedule = {"crontab": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_invalid_crontab():
# missing one element on the crontab syntax
periodic_conf = {"crontab": "* * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", periodic_conf)
def test_build_celery_invalid_schedule():
cron_schedule = {"crontab": "* * * * 12"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", cron_schedule)
def test_build_celery_invalid_periodic_key():
cron_schedule = {"non_valid_key": "* * * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_key", cron_schedule)
| tests/test_workflows.py | 14,451 | Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Canvas has been built Tasks added in DB Tasks executed in Celery DB rows status updated Tasks executed in Celery DB rows status updated Tasks executed in Celery DB rows status updated missing one element on the crontab syntax | 665 | en | 0.958352 |
import logging
import time
from abc import abstractmethod
from enum import Enum
from typing import Dict, Callable, Any, List
from schema import Schema
import sqlalchemy
from sqlalchemy.engine import ResultProxy
from sqlalchemy.orm import Query
from sqlalchemy.schema import Table
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.base import Connection
from contextlib import contextmanager
from flask_app.utilities.DataInterfaces import ConnectionOptions
logger = logging.getLogger(__name__)
class SqlDialect(Enum):
postgres = "postgres"
sqlite = "sqlite"
@classmethod
def has_value(cls, value) -> bool:
return any(value == item.value for item in cls)
# TODO: Connection Factory
class SqlConnectionOptions(ConnectionOptions):
@staticmethod
def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions':
"""
Function signatures for factory method
Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,
database_name: str, timeout: int = None)
"""
return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def __init__(self, dialect: SqlDialect, host: str, port: int, username: str, password: str, database_name: str
, timeout_s: int = None):
self.dialect: SqlDialect = dialect
self.host: str = host
self.port: int = port
self.username: str = username
self.password: str = password
self.database_name: str = database_name
self.timeout: int = timeout_s
self.connection_string: str = None
@classmethod
@abstractmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
pass
class PostgresConnectionOptions(SqlConnectionOptions):
_factory_schema: Schema = Schema(
{
'host': str,
'port': int,
'username': str,
'password': str,
'database_name': str
# 'timeout': int
},
ignore_extra_keys=True
)
def __init__(self,
dialect: SqlDialect,
host: str,
port: int,
username: str,
password: str,
database_name: str,
timeout_s: int = None) -> None:
super().__init__(dialect, host, port, username, password, database_name, timeout_s)
self.connection_string = \
f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database_name}"
@classmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
return schema.validate(parameters)
@classmethod
def factory(cls, **kwargs) -> 'PostgresConnectionOptions':
parameters: Dict = cls.schema_validate_arguments(cls._factory_schema, kwargs)
return cls(SqlDialect.postgres, parameters['host'], parameters['port']
, parameters['username'], parameters['password'], parameters['database_name']
, parameters.get('timeout'))
class SqlConnectionFactories:
_factories: Dict[SqlDialect, Callable] = {
SqlDialect.postgres: PostgresConnectionOptions.factory
# , SqlDialects.sqlite: SqliteConnectionOptions.factory
}
@classmethod
def get_factory(cls, factory_type: SqlDialect) -> Callable:
return cls._factories[factory_type]
class SqlInterface:
"""SQL methods to tack onto SQL based librarians"""
def __init__(self, connection_options: SqlConnectionOptions) -> None:
self.connection_options = connection_options
self.sql_engine: Engine = None
self.sql_metadata: sqlalchemy.MetaData = None
def update(self, schema: str, table: str, column: str, value: Any, sql_connection: Connection) -> None:
raise NotImplementedError
def select(self, schema: str, table: str, sql_connection: Connection) -> List[Dict[str, Any]]:
sql_table: Table = self._get_table_reflection(schema, table)
return self._execute_query(sql_connection, sql_table.select())
def insert(self, schema: str, table: str, values: List[Dict[str, Any]], sql_connection: Connection) -> None:
sql_table: Table = self._get_table_reflection(schema, table)
insert_query = sql_table.insert(values=values)
self._execute_query(sql_connection, insert_query)
def setup_pre_connection(self, connection_options) -> None:
self._build_engine(connection_options)
self._metadata_reflection(self.sql_engine)
def close_connection(self, sql_connection: Connection) -> None:
if sql_connection is not None:
sql_connection.close()
@contextmanager
def managed_connection(self, connection_options: SqlConnectionOptions = None) -> Connection:
if connection_options is None:
connection_options = self.connection_options
self.setup_pre_connection(connection_options)
connection: Connection = None
try:
connection = self.sql_engine.connect()
yield connection
finally:
self.close_connection(connection)
# SQLAlchemy internal methods
def _build_engine(self, connection_options: SqlConnectionOptions) -> None:
self.sql_engine = sqlalchemy.create_engine(connection_options.connection_string)
def _metadata_reflection(self, sql_engine) -> None:
self.sql_metadata = sqlalchemy.MetaData(bind=sql_engine)
def _get_table_reflection(self, schema: str, table: str) -> Table:
return Table(table, self.sql_metadata, schema=schema, autoload=True)
def _validate_write_schema(self, table: Table, values: Dict[str, Any]) -> bool:
table_columns = list(dict(table.columns).keys())
return list(values.keys()) == table_columns
def _parse_result_proxy(self, result) -> List[Dict[str, Any]]:
return list(map(lambda x: dict(x), result))
def _execute_query(self, sql_connection: Connection, sql_query: Query) -> List[Dict[str, Any]]:
start_time: float = time.time()
return_result: List[Dict[str, Any]] = None
try:
result: ResultProxy = sql_connection.execute(sql_query)
if result.returns_rows:
return_result: List[Dict[str, Any]] = self._parse_result_proxy(result)
except Exception as e:
logger.info(f"SQL query failed: {e}")
logger.debug(f"SQL query {str(sql_query.compile())}, connection: {sql_connection.engine} failed with exception {e}")
raise e
finally:
end_time: float = time.time()
query_time: float = end_time - start_time
logger.info(f"SQL execute time: {query_time}")
logger.debug(
f"SQL execute time: {query_time}, query: {str(sql_query.compile())}, connection: {sql_connection.engine}"
)
return return_result
| flask_app/utilities/DataInterfaces/SqlInterface.py | 6,994 | SQL methods to tack onto SQL based librarians
Function signatures for factory method
Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,
database_name: str, timeout: int = None)
TODO: Connection Factory 'timeout': int , SqlDialects.sqlite: SqliteConnectionOptions.factory SQLAlchemy internal methods | 335 | en | 0.519816 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.