hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a01ae6d6183dd969db97b7db1b546eb6571cc76
| 4,389
|
py
|
Python
|
Lectures/Lecture-08/HW-Shokouhi-Ahmadiyan/HW1.py
|
khalooei/TMLDL2018
|
370b76708d4c0b8b14ed22e321b2c285268c99ef
|
[
"MIT"
] | 3
|
2018-08-06T15:34:58.000Z
|
2022-02-11T14:19:05.000Z
|
Lectures/Lecture-08/HW-Shokouhi-Ahmadiyan/HW1.py
|
khalooei/TMLDL2018
|
370b76708d4c0b8b14ed22e321b2c285268c99ef
|
[
"MIT"
] | null | null | null |
Lectures/Lecture-08/HW-Shokouhi-Ahmadiyan/HW1.py
|
khalooei/TMLDL2018
|
370b76708d4c0b8b14ed22e321b2c285268c99ef
|
[
"MIT"
] | 3
|
2018-08-06T15:35:01.000Z
|
2020-08-08T07:53:07.000Z
|
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
class Neural_Network(object):
def __init__(self):
# Define Hyperparameters
# self.inputLayerSize = 2
# self.outputLayerSize = 1
# self.hiddenLayerSize = 3
self.inputLayerSize = 784
self.outputLayerSize = 1
self.hiddenLayerSize = 100000
# Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize, self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize, self.outputLayerSize)
def forward(self, X):
# Propogate inputs though network
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
yHat = self.sigmoid(self.z3)
return yHat
def sigmoid(self, z):
# Apply sigmoid activation function to scalar, vector, or matrix
return 1 / (1 + np.exp(-z))
def sigmoidPrime(self, z):
# Gradient of sigmoid
return np.exp(-z) / ((1 + np.exp(-z)) ** 2)
def costFunction(self, X, y):
# Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5 * sum((y - self.yHat) ** 2)
return J
def costFunctionPrime(self, X, y):
# Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y - self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
# Helper Functions for interacting with other classes:
def getParams(self):
# Get W1 and W2 unrolled into vector:
params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
return params
def setParams(self, params):
# Set W1 and W2 using single paramater vector.
W1_start = 0
W1_end = self.hiddenLayerSize * self.inputLayerSize
self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize, self.hiddenLayerSize))
W2_end = W1_end + self.hiddenLayerSize * self.outputLayerSize
self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))
def computeGradients(self, X, y):
dJdW1, dJdW2 = self.costFunctionPrime(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
class trainer(object):
def __init__(self, N):
# Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X, y)
return cost, grad
def train(self, X, y):
# Make an internal variable for the callback function:
self.X = X
self.y = y
# Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp': False}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='L-BFGS-B', \
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
import mnist
if __name__ == '__main__':
x_train, t_train, x_test, t_test = mnist.load()
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
# # X = (hours sleeping, hours studying), y = Score on test
# X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)
# y = np.array(([75], [82], [93]), dtype=float)
# # Normalize
# X = X / np.amax(X, axis=0)
# y = y / 100 # Max test score is 100
train_number=100
X =x_train[0:train_number,:]
y=t_train[0:train_number]
y=y.reshape((len(y),1))
NN = Neural_Network()
print('y is ',y[0:10])
yHat=NN.forward(X)
print('yHat before train ',yHat[0:10])
T = trainer(NN)
T.train(X, y)
yHat = np.round(NN.forward(X))
print('yHat after is ',yHat[0:10])
# # plt.plot(T.J)
# # plt.grid(1)
# # plt.xlabel('Iterations')
# # plt.ylabel('Cost')
# # plt.show()
| 31.804348
| 98
| 0.603554
|
4a01b0088fe1fe2bc9f580a032bb15deb263424f
| 25,279
|
py
|
Python
|
ludwig/encoders/h3_encoders.py
|
markhsia/ludwig
|
4bfb711b169ca4f54a75e1b75b491d37d5971b15
|
[
"Apache-2.0"
] | 1
|
2021-07-09T03:17:41.000Z
|
2021-07-09T03:17:41.000Z
|
ludwig/encoders/h3_encoders.py
|
wuqundong520/ludwig
|
13c35ec79f930e7dac295e642d92abe82f8c8046
|
[
"Apache-2.0"
] | null | null | null |
ludwig/encoders/h3_encoders.py
|
wuqundong520/ludwig
|
13c35ec79f930e7dac295e642d92abe82f8c8046
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import tensorflow as tf
from tensorflow.keras.layers import Layer
from ludwig.modules.embedding_modules import Embed
from ludwig.modules.fully_connected_modules import FCStack
from ludwig.modules.initializer_modules import get_initializer
from ludwig.modules.recurrent_modules import RecurrentStack
from ludwig.modules.reduction_modules import SequenceReducer
logger = logging.getLogger(__name__)
class H3Embed(Layer):
def __init__(
self,
embedding_size=10,
embeddings_on_cpu=False,
fc_layers=None,
num_fc_layers=0,
fc_size=10,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
reduce_output='sum',
**kwargs
):
"""
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None`, the default
initialized of each variable is used (`glorot_uniform`
in most cases). Options are: `constant`, `identity`, `zeros`,
`ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters, e.g.
`{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer to
TensorFlow's documentation.
:type initializer: str
:param regularize: if `True` the embedding wieghts are added to
the set of weights that get reularized by a regularization
loss (if the `regularization_lambda` in `training`
is greater than 0).
:type regularize: Boolean
"""
super(H3Embed, self).__init__()
logger.debug(' {}'.format(self.name))
self.embedding_size = embedding_size
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
logger.debug(' mode Embed')
self.embed_mode = Embed(
[str(i) for i in range(3)],
embedding_size,
representation='dense',
embeddings_trainable=True,
pretrained_embeddings=None,
force_embedding_size=True,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' edge Embed')
self.embed_edge = Embed(
[str(i) for i in range(7)],
embedding_size,
representation='dense',
embeddings_trainable=True,
pretrained_embeddings=None,
force_embedding_size=True,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' resolution Embed')
self.embed_resolution = Embed(
[str(i) for i in range(16)],
embedding_size,
representation='dense',
embeddings_trainable=True,
pretrained_embeddings=None,
force_embedding_size=True,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' base cell Embed')
self.embed_base_cell = Embed(
[str(i) for i in range(122)],
embedding_size,
representation='dense',
embeddings_trainable=True,
pretrained_embeddings=None,
force_embedding_size=True,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' cells Embed')
self.embed_cells = Embed(
[str(i) for i in range(8)],
embedding_size,
representation='dense',
embeddings_trainable=True,
pretrained_embeddings=None,
force_embedding_size=True,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
embedding_initializer=weights_initializer,
embedding_regularizer=weights_regularizer
)
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
)
def call(
self,
inputs,
training=None,
mask=None
):
"""
:param input_vector: The input vector fed into the encoder.
Shape: [batch x 19], type tf.int8
:type input_vector: Tensor
:param training: bool specifying if in training mode (important for dropout)
:type training: bool
:param mask: bool tensor encoding masked timesteps in the input
:type mask: bool
"""
input_vector = tf.cast(inputs, tf.int32)
# ================ Embeddings ================
embedded_mode = self.embed_mode(
input_vector[:, 0:1],
training=training,
mask=mask
)
embedded_edge = self.embed_edge(
input_vector[:, 1:2],
training=training,
mask=mask
)
embedded_resolution = self.embed_resolution(
input_vector[:, 2:3],
training=training,
mask=mask
)
embedded_base_cell = self.embed_base_cell(
input_vector[:, 3:4],
training=training,
mask=mask
)
embedded_cells = self.embed_cells(
input_vector[:, 4:],
training=training,
mask=mask
)
# ================ Masking ================
resolution = input_vector[:, 2]
mask = tf.cast(
tf.expand_dims(tf.sequence_mask(resolution, 15),
-1),
dtype=tf.float32
)
masked_embedded_cells = embedded_cells * mask
# ================ Reduce ================
concatenated = tf.concat(
[embedded_mode, embedded_edge, embedded_resolution,
embedded_base_cell, masked_embedded_cells],
axis=1)
hidden = self.reduce_sequence(concatenated)
# ================ FC Stack ================
# logger.debug(' flatten hidden: {0}'.format(hidden))
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
class H3WeightedSum(Layer):
def __init__(
self,
embedding_size=10,
embeddings_on_cpu=False,
should_softmax=False,
fc_layers=None,
num_fc_layers=0,
fc_size=10,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
**kwargs
):
"""
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: Boolean
:param initializer: the initializer to use. If `None`, the default
initialized of each variable is used (`glorot_uniform`
in most cases). Options are: `constant`, `identity`, `zeros`,
`ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters, e.g.
`{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer to
TensorFlow's documentation.
:type initializer: str
:param regularize: if `True` the embedding wieghts are added to
the set of weights that get reularized by a regularization
loss (if the `regularization_lambda` in `training`
is greater than 0).
:type regularize: Boolean
"""
super(H3WeightedSum, self).__init__()
logger.debug(' {}'.format(self.name))
self.should_softmax = should_softmax
self.reduce_sequence = SequenceReducer(reduce_mode='sum')
self.h3_embed = H3Embed(
embedding_size,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
weights_initializer=weights_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
# weights_constraint=weights_constraint,
# bias_constraint=bias_constraint,
reduce_output=None
)
self.aggregation_weights = tf.Variable(
get_initializer(weights_initializer)([19, 1])
)
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
)
def call(
self,
inputs,
training=None,
mask=None
):
"""
:param input_vector: The input vector fed into the encoder.
Shape: [batch x 19], type tf.int8
:type input_vector: Tensor
:param training: bool specifying if in training mode (important for dropout)
:type training: bool
:param mask: bool tensor encoding masked timesteps in the input
:type mask: bool
"""
# ================ Embeddings ================
input_vector = inputs
embedded_h3 = self.h3_embed(
input_vector,
training=training,
mask=mask
)
# ================ Weighted Sum ================
if self.should_softmax:
weights = tf.nn.softmax(self.aggregation_weights)
else:
weights = self.aggregation_weights
hidden = self.reduce_sequence(embedded_h3['encoder_output'] * weights)
# ================ FC Stack ================
# logger.debug(' flatten hidden: {0}'.format(hidden))
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return {'encoder_output': hidden}
class H3RNN(Layer):
def __init__(
self,
embedding_size=10,
embeddings_on_cpu=False,
num_layers=1,
state_size=10,
cell_type='rnn',
bidirectional=False,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
weights_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
weights_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
dropout=0.0,
recurrent_dropout=0.0,
reduce_output='last',
**kwargs
):
"""
:param embedding_size: it is the maximum embedding size, the actual
size will be `min(vocaularyb_size, embedding_size)`
for `dense` representations and exacly `vocaularyb_size`
for the `sparse` encoding, where `vocabulary_size` is
the number of different strings appearing in the training set
in the column the feature is named after (plus 1 for `<UNK>`).
:type embedding_size: Integer
:param embeddings_on_cpu: by default embedings matrices are stored
on GPU memory if a GPU is used, as it allows
for faster access, but in some cases the embedding matrix
may be really big and this parameter forces the placement
of the embedding matrix in regular memroy and the CPU is used
to resolve them, slightly slowing down the process
as a result of data transfer between CPU and GPU memory.
:param num_layers: the number of stacked recurrent layers.
:type num_layers: Integer
:param cell_type: the type of recurrent cell to use.
Avalable values are: `rnn`, `lstm`, `lstm_block`, `lstm`,
`ln`, `lstm_cudnn`, `gru`, `gru_block`, `gru_cudnn`.
For reference about the differences between the cells please
refer to TensorFlow's documentstion. We suggest to use the
`block` variants on CPU and the `cudnn` variants on GPU
because of their increased speed.
:type cell_type: str
:param state_size: the size of the state of the rnn.
:type state_size: Integer
:param bidirectional: if `True` two recurrent networks will perform
encoding in the forward and backward direction and
their outputs will be concatenated.
:type bidirectional: Boolean
:param activation: Activation function to use.
:type activation: string
:param recurrent_activation: Activation function to use for the
recurrent step.
:type recurrent_activation: string
:param use_bias: bool determines where to use a bias vector
:type use_bias: bool
:param unit_forget_bias: if True add 1 to the bias forget gate at
initialization.
:type unit_forget_bias: bool
:param weights_initializer: Initializer for the weights (aka kernel)
matrix
:type weights_initializer: string
:param recurrent_initializer: Initializer for the recurrent weights
matrix
:type recurrent_initializer: string
:param bias_initializer: Initializer for the bias vector
:type bias_initializer: string
:param weights_regularizer: regularizer applied to the weights
(kernal) matrix
:type weights_regularizer: string
:param recurrent_regularizer: Regularizer for the recurrent weights
matrix
:type recurrent_regularizer: string
:param bias_regularizer: reguralizer function applied to biase vector.
:type bias_regularizer: string
:param activity_regularizer: Regularizer applied to the output of the
layer (activation)
:type activity_regularizer: string
:param dropout: determines if there should be a dropout layer before
returning the encoder output.
:type dropout: float
:param recurrent_dropout: Float between 0.0 and 1.0. Fraction of
the units to drop for the linear transformation of the
recurrent state.
:type recurrent_dropout: float
:param initializer: the initializer to use. If `None` it uses
`glorot_uniform`. Options are: `constant`, `identity`,
`zeros`, `ones`, `orthogonal`, `normal`, `uniform`,
`truncated_normal`, `variance_scaling`, `glorot_normal`,
`glorot_uniform`, `xavier_normal`, `xavier_uniform`,
`he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`.
Alternatively it is possible to specify a dictionary with
a key `type` that identifies the type of initialzier and
other keys for its parameters,
e.g. `{type: normal, mean: 0, stddev: 0}`.
To know the parameters of each initializer, please refer
to TensorFlow's documentation.
:type initializer: str
:param regularize: if a `regularize` is not already specified in
`conv_layers` or `fc_layers` this is the default `regularize`
that will be used for each layer. It indicates if
the layer weights should be considered when computing
a regularization loss.
:type regularize:
:param reduce_output: defines how to reduce the output tensor of
the convolutional layers along the `s` sequence length
dimention if the rank of the tensor is greater than 2.
Available values are: `sum`, `mean` or `avg`, `max`, `concat`
(concatenates along the first dimension), `last` (returns
the last vector of the first dimension) and `None` or `null`
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(H3RNN, self).__init__()
logger.debug(' {}'.format(self.name))
self.embedding_size = embedding_size
self.h3_embed = H3Embed(
embedding_size,
embeddings_on_cpu=embeddings_on_cpu,
dropout=dropout,
weights_initializer=weights_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
# weights_constraint=weights_constraint,
# bias_constraint=bias_constraint,
reduce_output=None
)
logger.debug(' RecurrentStack')
self.recurrent_stack = RecurrentStack(
state_size=state_size,
cell_type=cell_type,
num_layers=num_layers,
bidirectional=bidirectional,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
unit_forget_bias=unit_forget_bias,
weights_initializer=weights_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
reduce_output=reduce_output
)
def call(
self,
inputs,
training=None,
mask=None
):
"""
:param input_vector: The input vector fed into the encoder.
Shape: [batch x 19], type tf.int8
:type input_vector: Tensor
:param training: bool specifying if in training mode (important for dropout)
:type training: bool
:param mask: bool tensor encoding masked timesteps in the input
:type mask: bool
"""
# ================ Embeddings ================
embedded_h3 = self.h3_embed(
inputs,
training=training,
mask=mask
)
# ================ RNN ================
hidden, final_state = self.recurrent_stack(
embedded_h3['encoder_output'],
training=training,
mask=mask
)
return {
'encoder_output': hidden,
'encoder_output_state': final_state
}
| 41.852649
| 88
| 0.580403
|
4a01b0d0b09d9157676399f57469b431765aa0da
| 12,493
|
py
|
Python
|
src/get_data/archive/get_data_arr.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | null | null | null |
src/get_data/archive/get_data_arr.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | null | null | null |
src/get_data/archive/get_data_arr.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | null | null | null |
import glob
import shutil
import os
import pandas as pd
import nrrd
import re
import cv2
from sklearn.model_selection import train_test_split, GroupShuffleSplit
from collections import Counter
import matplotlib
import matplotlib.pyplot as plt
import pickle
#import skimage.transform as st
from tensorflow.keras.utils import to_categorical
#----------------------------------------------------------------------------------------
# training dataset
#----------------------------------------------------------------------------------------
def train_val_split(proj_dir, PMH_data_dir, CHUM_data_dir, CHUS_data_dir, PMH_label_csv,
CHUM_label_csv, CHUS_label_csv, train_img_dir, val_img_dir):
### PMH dataset
PMH_label = pd.read_csv(os.path.join(proj_dir, PMH_label_csv))
PMH_label['Contrast'] = PMH_label['Contrast'].map({'Yes': 'C', 'No': 'N'})
labels = PMH_label['Contrast'].to_list()
#labels = to_categorical(labels)
#print(labels)
fns = [fn for fn in sorted(glob.glob(PMH_data_dir + '/*nrrd'))]
#print(fns)
df_PMH = pd.DataFrame({'file': fns, 'label': labels})
pd.options.display.max_colwidth = 100
print(df_PMH)
file = df_PMH['file'][0]
data, header = nrrd.read(file)
print(data.shape)
### CHUM dataset
try:
labels = []
CHUM_label = pd.read_csv(os.path.join(proj_dir, CHUM_label_csv))
CHUM_label['Contrast'] = CHUM_label['Contrast'].map({'Yes': 'C', 'No': 'N'})
for i in range(CHUM_label.shape[0]):
file = CHUM_label['Patient ID'].iloc[i]
scan = file.split('/')[-1].split('_')[2].strip()
if scan == 'CT-SIM':
labels.append(CHUM_label['Contrast'].iloc[i])
elif scan == 'CT-PET':
continue
#print(labels)
fns = []
for fn in sorted(glob.glob(train_data_dir + '/*nrrd')):
scan = fn.split('/')[-1].split('_')[2].strip()
if scan == 'CT-SIM':
fns.append(fn)
else:
continue
#print(fns)
df_CHUM = pd.DataFrame({'file': fns, 'label': labels})
pd.options.display.max_colwidth = 100
print(df)
file = df['file'][0]
data, header = nrrd.read(file)
print(data.shape)
except:
print('CHUM dataset failed.')
### CHUS dataset
try:
labels = []
CHUS_label = pd.read_csv(os.path.join(proj_dir, CHUS_label_csv))
CHUS_label['Contrast'] = CHUS_label['Contrast'].map({'Yes': 'C', 'No': 'N'})
labels = CHUS_label['Contrast'].to_list()
#print(labels)
fns = []
for fn in sorted(glob.glob(CHUS_data_dir + '/*nrrd')):
scan = fn.split('/')[-1].split('_')[2].strip()
if scan == 'CT-SIMPET':
fns.append(fn)
else:
continue
#print(fns)
df_CHUS = pd.DataFrame({'file': fns, 'label': labels})
pd.options.display.max_colwidth = 100
print(df_CHUS)
file = df_CHUS['file'][0]
data, header = nrrd.read(file)
print(data.shape)
except:
print('CHUS dataset failed.')
### combine dataset for train-val split
df_tot = pd.concat([df_PMH, df_CHUM, df_CHUS])
x = df_tot['file']
y = df_tot['label']
x_train, x_val, y_train, y_val = train_test_split(
x,
y,
stratify=y,
test_size=0.2,
random_state=42
)
print(x_train)
print(y_train)
print(x_val)
print(y_val)
x_train.to_pickle(os.path.join(train_img_dir, 'x_train.p'))
y_train.to_pickle(os.path.join(train_img_dir, 'y_train.p'))
x_val.to_pickle(os.path.join(val_img_dir, 'x_val.p'))
y_val.to_pickle(os.path.join(val_img_dir, 'y_val.p'))
#return x_train, x_val, y_train, y_val
#----------------------------------------------------------------------------------------
# training dataset
#----------------------------------------------------------------------------------------
def train_dataset(train_img_dir):
list_slice_number = []
list_label = []
list_img = []
list_fn = []
count = 0
x_train = pd.read_pickle(os.path.join(train_img_dir, x_train))
y_train = pd.read_pickle(os.path.join(val_img_dir, y_train))
try:
for train_file in x_train:
count += 1
print(count)
### create consistent patient ID format
if train_file.split('/')[-1].split('_')[0] == 'PMH':
patient_id = 'PMH' + train_file.split('/')[-1].split('-')[1][2:4].strip()
elif train_file.split('/')[-1].split('-')[1] == 'CHUM':
patient_id = 'CHUM' + train_file.split('/')[-1].split('-')[2].strip()
elif train_file.split('/')[-1].split('-')[1] == 'CHUS':
patient_id = 'CHUS' + train_file.split('/')[-1].split('-')[2].strip()
### extract img from nrrd files
data, header = nrrd.read(train_file)
data = data.transpose(2, 0, 1)
data[data <= -1024] = -1024
data = cv2.resize(data, dsize=(36, 64, 64), interpolation=cv2.INTER_CUBIC)
arr = np.empty([0, 64, 64])
arr = np.concatenate([arr, data], 0)
### find img slices for all scans
list_slice_number.append(data.shape[0])
### create patient ID and slice index for img
for i in range(data.shape[0]):
img = data[:, :, i]
fn = patient_id + '_' + 'slice%s'%(f'{i:03d}')
#fn = patient_id + '_' + 'slice%s'%(f'{i:03d}') + '.npy'
#img_dir = os.path.join(train_img_dir, fn)
#matplotlib.image.imsave(img_dir, img, cmap='gray')
#np.save(img_dir, img)
#list_img.append(img_dir)
list_fn.append(fn)
except:
print('train dataset failed.')
else:
print('train dataset created.')
print(arr.shape)
numpy.save(os.path.join(train_img_dir, 'train_arr.npy'), train_arr)
#arr = np.repeat(arr[..., np.newaxis], 3, axis=1)
#arr = st.resize(arr, (arr.shape[0], 224, 224, 3))
#print(arr.shape)
### generate labels for CT slices
for label, slice_number in zip(y_train, list_slice_number):
list_1 = [label] * slice_number
list_label.extend(list_1)
#print(len(list_img))
#print(len(list_label))
### makeing dataframe containing img directories and labels
#train_df = pd.DataFrame({'image': list_img, 'label': list_label})
train_df = pd.DataFrame({'fn': list_fn, 'label': list_label})
print(train_df[0:10])
### save dataframe to pickle
train_df.to_pickle(os.path.join(train_img_dir, 'train_df.p'))
print('train data size:', train_df.shape[0])
#return train_df
#----------------------------------------------------------------------------------------
# val dataset
#----------------------------------------------------------------------------------------
def val_dataset(val_img_dir):
list_slice_number = []
list_label = []
list_img = []
list_fn = []
count = 0
x_val = pd.read_pickle(os.path.join(val_img_dir, x_val))
y_val = pd.read_pickle(os.path.join(val_img_dir, y_val))
### generate CT slices and save them as jpg
try:
for val_file in x_val:
count += 1
print(count)
### create consistent patient ID format
if val_file.split('/')[-1].split('_')[0] == 'PMH':
patient_id = 'PMH' + val_file.split('/')[-1].split('-')[1][2:4].strip()
elif val_file.split('/')[-1].split('-')[1] == 'CHUM':
val_id = 'CHUM' + val_file.split('/')[-1].split('-')[2].strip()
elif val_file.split('/')[-1].split('-')[1] == 'CHUS':
val_id = 'CHUS' + val_file.split('/')[-1].split('-')[2].strip()
### extract img from nrrd files
data, header = nrrd.read(train_file)
data = data.transpose(2, 0, 1)
data[data <= -1024] = -1024
data = cv2.resize(data, dsize=(36, 64, 64), interpolation=cv2.INTER_CUBIC)
arr = np.empty([0, 64, 64])
arr = np.concatenate([arr, data], 0)
### find img slices for all scans
list_slice_number.append(data.shape[0])
### create patient ID and slice index for img
for i in range(data.shape[0]):
img = data[:, :, i]
fn = patient_id + '_' + 'slice%s'%(f'{i:03d}')
#fn = patient_id + '_' + 'slice%s'%(f'{i:03d}') + '.npy'
#img_dir = os.path.join(train_img_dir, fn)
#matplotlib.image.imsave(img_dir, img, cmap='gray')
#np.save(img_dir, img)
#list_img.append(img_dir)
list_fn.append(fn)
except:
print('val dataset failed.')
else:
print('val dataset created.')
print(arr.shape)
numpy.save(os.path.join(val_img_dir, 'val_arr.npy'), val_arr)
#arr = np.repeat(arr[..., np.newaxis], 3, axis=1)
#arr = st.resize(arr, (arr.shape[0], 224, 224, 3))
#print(arr.shape)
### generate labels for CT slices
for label, slice_number in zip(y_val, list_slice_number):
list_1 = [label] * slice_number
list_label.extend(list_1)
#print(len(list_img))
#print(len(list_label))
### makeing dataframe containing img directories and labels
#train_df = pd.DataFrame({'image': list_img, 'label': list_label})
val_df = pd.DataFrame({'fn': list_fn, 'label': list_label})
print(val_df[0:10])
### save dataframe to pickle
val_df.to_pickle(os.path.join(val_img_dir, 'val_df.p'))
print('val data size:', val_df.shape[0])
#return val_df
#----------------------------------------------------------------------------------------
# test dataset
#----------------------------------------------------------------------------------------
def test_dataset(test_label_dir, test_label_file, test_data_dir, test_img_dir):
list_slice_number = []
list_label = []
list_img = []
count = 0
df_label = pd.read_csv(os.path.join(proj_dir, mdacc_label_file))
df_label['Contrast'] = df_label['Contrast'].map({'Yes': 'C', 'No': 'N'})
labels = df_label['Contrast'].to_list()
#print(labels)
fns = [fn for fn in sorted(glob.glob(test_data_dir + '/*nrrd'))]
#print(fns)
df = pd.DataFrame({'file': fns, 'label': labels})
pd.options.display.max_colwidth = 100
#print(df)
file = df['file'][0]
#data, header = nrrd.read(test_file)
#print(data.shape)
x_test = df['file']
y_test = df['label']
### generate CT slices and save them as jpg
try:
for test_file in x_test:
count += 1
print(count)
patient_id = 'MDACC' + train_file.split('/')[-1].split('-')[2][1:3].strip()
data, header = nrrd.read(test_file)
data = data.transpose(2, 0, 1)
data[data <= -1024] = -1024
data = cv2.resize(data, dsize=(36, 64, 64), interpolation=cv2.INTER_CUBIC)
arr = np.empty([0, 64, 64])
arr = np.concatenate([arr, data], 0)
### find img slices for all scans
list_slice_number.append(data.shape[0])
### create patient ID and slice index for img
for i in range(data.shape[0]):
img = data[:, :, i]
fn = patient_id + '_' + 'slice%s'%(f'{i:03d}')
#fn = patient_id + '_' + 'slice%s'%(f'{i:03d}') + '.npy'
#img_dir = os.path.join(train_img_dir, fn)
#matplotlib.image.imsave(img_dir, img, cmap='gray')
#np.save(img_dir, img)
#list_img.append(img_dir)
list_fn.append(fn)
except:
print('test dataset failed.')
else:
print('test dataset created.')
print(arr.shape)
numpy.save(os.path.join(test_img_dir, 'test_arr.npy'), val_arr)
### generate labels for CT slices
for label, slice_number in zip(y_test, list_slice_number):
list_1 = [label] * slice_number
list_label.extend(list_1)
print(len(list_img))
test_df = pd.DataFrame({'image': list_img, 'label': list_label})
print(test_df[0:10])
test_df.to_pickle(os.path.join(test_img_dir, 'test_df.p'))
print('test data size:', test_df.shape[0])
#return test_df
| 39.040625
| 98
| 0.534379
|
4a01b0f24f456c2c42f17cc603da4ea6caf97c61
| 8,126
|
py
|
Python
|
proxyclient/m1n1/macho.py
|
EricRabil/m1n1
|
0a1a9348c32e2e44374720cd9d68cbe81cf696df
|
[
"MIT"
] | 1,604
|
2021-01-14T19:04:59.000Z
|
2022-03-31T18:34:16.000Z
|
proxyclient/m1n1/macho.py
|
EricRabil/m1n1
|
0a1a9348c32e2e44374720cd9d68cbe81cf696df
|
[
"MIT"
] | 105
|
2021-01-15T03:52:27.000Z
|
2022-03-30T22:16:52.000Z
|
proxyclient/m1n1/macho.py
|
EricRabil/m1n1
|
0a1a9348c32e2e44374720cd9d68cbe81cf696df
|
[
"MIT"
] | 96
|
2021-01-14T21:13:53.000Z
|
2022-03-31T12:14:14.000Z
|
# SPDX-License-Identifier: MIT
from io import BytesIO, SEEK_END, SEEK_SET
import bisect
from construct import *
from .utils import *
__all__ = ["MachO"]
MachOLoadCmdType = "LoadCmdType" / Enum(Int32ul,
SYMTAB = 0x02,
UNIXTHREAD = 0x05,
SEGMENT_64 = 0x19,
UUID = 0x1b,
BUILD_VERSION = 0x32,
DYLD_CHAINED_FIXUPS = 0x80000034,
FILESET_ENTRY = 0x80000035,
)
MachOArmThreadStateFlavor = "ThreadStateFlavor" / Enum(Int32ul,
THREAD64 = 6,
)
MachOHeader = Struct(
"magic" / Hex(Int32ul),
"cputype" / Hex(Int32ul),
"cpusubtype" / Hex(Int32ul),
"filetype" / Hex(Int32ul),
"ncmds" / Hex(Int32ul),
"sizeofcmds" / Hex(Int32ul),
"flags" / Hex(Int32ul),
"reserved" / Hex(Int32ul),
)
MachOVmProt = FlagsEnum(Int32sl,
PROT_READ = 0x01,
PROT_WRITE = 0x02,
PROT_EXECUTE = 0x04,
)
MachOCmdSymTab = Struct(
"symoff" / Hex(Int32ul),
"nsyms" / Int32ul,
"stroff" / Hex(Int32ul),
"strsize" / Hex(Int32ul),
)
MachOCmdUnixThread = GreedyRange(Struct(
"flavor" / MachOArmThreadStateFlavor,
"data" / Prefixed(ExprAdapter(Int32ul, obj_ * 4, obj_ / 4), Switch(this.flavor, {
MachOArmThreadStateFlavor.THREAD64: Struct(
"x" / Array(29, Hex(Int64ul)),
"fp" / Hex(Int64ul),
"lr" / Hex(Int64ul),
"sp" / Hex(Int64ul),
"pc" / Hex(Int64ul),
"cpsr" / Hex(Int32ul),
"flags" / Hex(Int32ul),
)
})),
))
NList = Struct(
"n_strx" / Hex(Int32ul),
"n_type" / Hex(Int8ul),
"n_sect" / Hex(Int8ul),
"n_desc" / Hex(Int16sl),
"n_value" / Hex(Int64ul),
)
MachOCmdSegment64 = Struct(
"segname" / PaddedString(16, "ascii"),
"vmaddr" / Hex(Int64ul),
"vmsize" / Hex(Int64ul),
"fileoff" / Hex(Int64ul),
"filesize" / Hex(Int64ul),
"maxprot" / MachOVmProt,
"initprot" / MachOVmProt,
"nsects" / Int32ul,
"flags" / Hex(Int32ul),
"sections" / GreedyRange(Struct(
"sectname" / PaddedString(16, "ascii"),
"segname" / PaddedString(16, "ascii"),
"addr" / Hex(Int64ul),
"size" / Hex(Int64ul),
"offset" / Hex(Int32ul),
"align" / Hex(Int32ul),
"reloff" / Hex(Int32ul),
"nreloc" / Hex(Int32ul),
"flags" / Hex(Int32ul),
"reserved1" / Hex(Int32ul),
"reserved2" / Hex(Int32ul),
"reserved3" / Hex(Int32ul),
)),
)
MachOFilesetEntry = Struct(
"addr" / Hex(Int64ul),
"offset" / Hex(Int64ul),
"entryid" / Hex(Int32ul),
"reserved" / Hex(Int32ul),
"name" / CString("ascii"),
)
MachOCmd = Struct(
"cmd" / Hex(MachOLoadCmdType),
"args" / Prefixed(ExprAdapter(Int32ul, obj_ - 8, obj_ + 8), Switch(this.cmd, {
MachOLoadCmdType.SYMTAB: MachOCmdSymTab,
MachOLoadCmdType.UNIXTHREAD: MachOCmdUnixThread,
MachOLoadCmdType.SEGMENT_64: MachOCmdSegment64,
MachOLoadCmdType.UUID: Hex(Bytes(16)),
MachOLoadCmdType.FILESET_ENTRY: MachOFilesetEntry,
}, default=GreedyBytes)),
)
MachOFile = Struct(
"header" / MachOHeader,
"cmds" / Array(this.header.ncmds, MachOCmd),
)
class MachO:
def __init__(self, data):
if isinstance(data, bytes):
self.io = BytesIO(data)
else:
self.io = data
self.off = self.io.tell()
self.io.seek(0, SEEK_END)
self.end = self.io.tell()
self.size = self.end - self.off
self.io.seek(self.off, SEEK_SET)
self.obj = MachOFile.parse_stream(self.io)
self.symbols = {}
self.load_info()
self.load_fileset()
def load_info(self):
self.vmin, self.vmax = (1 << 64), 0
self.entry = None
for cmd in self.obj.cmds:
if cmd.cmd == MachOLoadCmdType.SEGMENT_64:
self.vmin = min(self.vmin, cmd.args.vmaddr)
self.vmax = max(self.vmax, cmd.args.vmaddr + cmd.args.vmsize)
elif cmd.cmd == MachOLoadCmdType.UNIXTHREAD:
self.entry = cmd.args[0].data.pc
def prepare_image(self, load_hook=None):
memory_size = self.vmax - self.vmin
image = bytearray(memory_size)
for cmd in self.get_cmds(MachOLoadCmdType.SEGMENT_64):
dest = cmd.args.vmaddr - self.vmin
end = min(self.size, cmd.args.fileoff + cmd.args.filesize)
size = end - cmd.args.fileoff
print(f"LOAD: {cmd.args.segname} {size} bytes from {cmd.args.fileoff:x} to {dest:x}")
self.io.seek(self.off + cmd.args.fileoff)
data = self.io.read(size)
if load_hook is not None:
data = load_hook(data, cmd.args.segname, size, cmd.args.fileoff, dest)
image[dest:dest + size] = data
if cmd.args.vmsize > size:
clearsize = cmd.args.vmsize - size
if cmd.args.segname == "PYLD":
print("SKIP: %d bytes from 0x%x to 0x%x" % (clearsize, dest + size, dest + size + clearsize))
memory_size -= clearsize - 4 # leave a payload end marker
image = image[:memory_size]
else:
print("ZERO: %d bytes from 0x%x to 0x%x" % (clearsize, dest + size, dest + size + clearsize))
image[dest + size:dest + cmd.args.vmsize] = bytes(clearsize)
return image
def get_cmds(self, cmdtype):
for cmd in self.obj.cmds:
if cmd.cmd == cmdtype:
yield cmd
def get_cmd(self, cmdtype):
cmds = list(self.get_cmds(cmdtype))
if len(cmds) == 0:
raise Exception(f"No commands of type {cmdtype}")
if len(cmds) > 1:
raise Exception(f"More than one commands of type {cmdtype} (found {len(cmd)})")
return cmds[0]
def load_fileset(self):
self.subfiles = {}
for fe in self.get_cmds(MachOLoadCmdType.FILESET_ENTRY):
self.io.seek(self.off + fe.args.offset)
subfile = MachO(self.io)
self.subfiles[fe.args.name] = subfile
for seg in subfile.get_cmds(MachOLoadCmdType.SEGMENT_64):
self.symbols[f"{fe.args.name}:{seg.args.segname}"] = seg.args.vmaddr
def add_symbols(self, filename, syms):
try:
subfile = self.subfiles[filename]
except KeyError:
raise Exception(f"No fileset entry for {filename}")
sym_segs = {}
for sym_seg in syms.get_cmds(MachOLoadCmdType.SEGMENT_64):
sym_segs[sym_seg.args.segname] = sym_seg
syms.load_symbols()
symtab = [(v, k) for (k, v) in syms.symbols.items()]
symtab.sort()
for seg in subfile.get_cmds(MachOLoadCmdType.SEGMENT_64):
if seg.args.segname not in sym_segs:
continue
sym_seg = sym_segs[seg.args.segname]
start = bisect.bisect_left(symtab, (sym_seg.args.vmaddr, ""))
end = bisect.bisect_left(symtab, (sym_seg.args.vmaddr + sym_seg.args.vmsize, ""))
for addr, sym in symtab[start:end]:
sname = f"{filename}:{sym}"
self.symbols[sname] = addr - sym_seg.args.vmaddr + seg.args.vmaddr
def load_symbols(self):
self.symbols = {}
cmd = self.get_cmd(MachOLoadCmdType.SYMTAB)
nsyms = cmd.args.nsyms
length = NList.sizeof() * nsyms
self.io.seek(self.off + cmd.args.symoff)
symdata = self.io.read(length)
symbols = Array(nsyms, NList).parse(symdata)
for i in symbols:
off = cmd.args.stroff + i.n_strx
self.io.seek(self.off + off)
name = self.io.read(1024).split(b"\x00")[0].decode("ascii")
self.symbols[name] = i.n_value
if __name__ == "__main__":
import sys
macho = MachO(open(sys.argv[1], "rb").read())
if len(sys.argv) > 2:
syms = MachO(open(sys.argv[2], "rb").read())
macho.add_symbols("com.apple.kernel", syms)
symtab = [(v, k) for (k, v) in macho.symbols.items()]
symtab.sort()
for addr, name in symtab:
print(f"0x{addr:x} {name}")
| 31.866667
| 113
| 0.577037
|
4a01b1d95534179931ef0919a400c09158b0453c
| 886
|
py
|
Python
|
Resene naloge/euler33.py
|
CadezDavid/ProjectEuler
|
9e11aa5782fb600c98eba9e04766b3bd79acea0e
|
[
"MIT"
] | null | null | null |
Resene naloge/euler33.py
|
CadezDavid/ProjectEuler
|
9e11aa5782fb600c98eba9e04766b3bd79acea0e
|
[
"MIT"
] | null | null | null |
Resene naloge/euler33.py
|
CadezDavid/ProjectEuler
|
9e11aa5782fb600c98eba9e04766b3bd79acea0e
|
[
"MIT"
] | null | null | null |
import fractions
# def krajsanje(n, m):
# list = []
# m = [i for i in str(m)]
# n = [i for i in str(n)]
# k = list(set(m).symmetric_difference(n))
# if min(m, n) / max(m, n) == min(k[0], k[1]) / max(k[0], k[1])
# list.append([m, n])
def odstrani_skupne(m, n):
if '0' in str(n):
return False
if '0' in str(m):
return False
mlist = [stevka for stevka in str(m) if stevka not in str(n)]
nlist = [stevka for stevka in str(n) if stevka not in str(m)]
if len(mlist) != 1 or len(nlist) != 1:
return False
return int(mlist[0]) / int(nlist[0])
produkt = fractions.Fraction(1, 1)
list = []
for m in range(10, 100):
for n in range(m + 1, 100):
if odstrani_skupne(m, n) == m / n:
list.append([m, n])
produkt *= fractions.Fraction(m, n)
print(list)
print(fractions.Fraction(produkt))
| 28.580645
| 67
| 0.549661
|
4a01b25fe4d6c5358c7fab4d04ee04219b9b53b1
| 23,620
|
py
|
Python
|
src/compiler/vhdl_gen.py
|
luinaudt/deparser
|
06d3c82ee151da4cef2823015d56bf867e0205f6
|
[
"Apache-2.0"
] | 7
|
2021-02-25T14:48:39.000Z
|
2022-03-05T06:46:01.000Z
|
src/compiler/vhdl_gen.py
|
luinaudt/deparser
|
06d3c82ee151da4cef2823015d56bf867e0205f6
|
[
"Apache-2.0"
] | null | null | null |
src/compiler/vhdl_gen.py
|
luinaudt/deparser
|
06d3c82ee151da4cef2823015d56bf867e0205f6
|
[
"Apache-2.0"
] | 1
|
2020-12-15T08:23:11.000Z
|
2020-12-15T08:23:11.000Z
|
from colorama import Fore, Style
from string import Template
from os import path, mkdir, scandir
from shutil import copyfile
import networkx as nx
from warnings import warn
import vhdl_util
VERSION = 0.1
class deparserHDL(object):
def __getlibrary(self):
"""set a dictionnary with library folder
each folder is the <name> of an entity
file component.vhdl components instantiation templates
file entity.vhdl are placement template for components
file module.vhdl are lib file to copy
"""
self.lib = {}
for d in scandir(self.tmplFolder):
if d.is_dir():
curPath = path.join(self.tmplFolder, d.name)
self.lib[d.name] = (path.join(curPath, "module.vhdl"),
path.join(curPath, "component.vhdl"),
path.join(curPath, "entity.vhdl"))
def __init__(self, deparser, outputDir,
templateFolder,
phvBus,
baseName="deparser",
libDirName="lib",
clk="clk", reset_n="reset_n"):
self.clkName = clk
self.enDep = "en_deparser"
self.rstName = reset_n
self.dep = deparser
self.phvBus = phvBus
self.headerBus = phvBus[0]["data"]
self.busValidAssocPos = phvBus[1]["data"]
self.entityName = baseName
self.tmplFolder = templateFolder
self.tmplFile = path.join(templateFolder, "deparser.vhdl")
self.libDir = path.join(outputDir, libDirName)
if not path.exists(self.libDir):
mkdir(self.libDir)
self.signals = {}
self.entities = {}
self.stateMachines = {}
self.components = {}
self.muxes = {}
self.payloadShifters = {}
self.__getlibrary()
self.dictSub = {'name': baseName,
'code': "",
'payloadConnect': "",
'payloadSize': deparser.busSize,
'outputSize': deparser.busSize,
'nbMuxes': deparser.nbStateMachine}
def getVHDLParam(self):
return self.dictSub
def _setSignalStr(self):
strSignal = ""
sigTmpl = Template("signal $n : ${t}; \n")
for n, t in self.signals.items():
strSignal += sigTmpl.substitute({"n": n, "t": t})
self.dictSub["signals"] = strSignal
def __str__(self):
self.genInputs()
self._setSignalStr()
self._setEntitiesImplCode()
self._setComponentsCode()
self._setMuxesConnectionCode()
self._setPayloadConnectionCode()
with open(self.tmplFile, 'r') as myfile:
tmpl = Template(myfile.read())
return tmpl.safe_substitute(self.dictSub)
def _setComponentsCode(self):
code = ""
for n, d in self.components.items():
if n not in self.lib:
raise NameError("component {} does not exist "
"in library".format(n))
if d is False:
with open(self.lib[n][1], 'r') as f:
code += f.read()
else:
with open(self.lib[n][1], 'r') as f:
tmpl = Template(f.read())
for n, dic in d.items():
code += tmpl.safe_substitute(dic)
self.dictSub["components"] = code
def _setEntitiesImplCode(self):
""" Gen implementation for a component
Component : component name
tmplDict : template dictionnary
Require a <component>_place.vhdl file in <component> dir
"""
implCode = ""
for c, d in self.entities.values():
if c not in self.lib:
raise NameError("component {} does not exist "
"in library".format(c))
with open(self.lib[c][2], 'r') as f:
tData = Template(f.read())
implCode += tData.safe_substitute(d)
self.dictSub["entities"] = implCode
def writeTB(self, fileName):
Tmpl = {"compVersion": VERSION,
"name": self.entityName,
"payloadSize": self.dictSub["payloadSize"],
"outputSize": self.dictSub["outputSize"],
"phvBus": self.dictSub["phvBus"],
"phvValidity": self.dictSub["phvValidity"],
"phvBusWidth": self.dictSub["phvBusWidth"],
"phvValidityWidth": self.dictSub["phvValidityWidth"]}
phvBus = ""
phvBusIn = ""
phvBusTmpl = "phvBus({} downto {}) <= {}_bus;\n"
phvInTmpl = "{}_bus : in std_logic_vector({} downto 0);\n"
for name, pos in self.headerBus.items():
phvBusIn += phvInTmpl.format(name, pos[1] - pos[0])
phvBus += phvBusTmpl.format(pos[1], pos[0], name)
vBus = ""
vBusIn = ""
for name, pos in self.busValidAssocPos.items():
vBusIn += "{}_valid : in std_logic;\n".format(name)
vBus += "validityBus({}) <= {}_valid;\n".format(pos, name)
Tmpl["setPhvBus"] = phvBus
Tmpl["setValBus"] = vBus
Tmpl["headerBuses"] = phvBusIn
Tmpl["validityBits"] = vBusIn
with open(path.join(self.tmplFolder, "deparser_tb.vhdl")) as inFile:
TB = Template(inFile.read())
with open(fileName, 'w') as outFile:
outFile.write(TB.substitute(Tmpl))
def writeFiles(self, mainFileName):
""" export all files.
mainFile + lib files in libFolder
"""
for name, d in self.components.items():
tF = self.lib[name][0]
if d is False:
oF = path.join(self.libDir,
"{}.vhdl".format(name)) # output lib file
copyfile(tF, oF)
else:
with open(tF, 'r') as tmpl:
t = Template(tmpl.read())
for n, dic in d.items():
oF = path.join(self.libDir,
"{}.vhdl".format(n)) # output lib file
with open(oF, 'w') as outFile:
outFile.write(t.substitute(dic))
with open(mainFileName, 'w') as outFile:
outFile.write(str(self))
def genInputs(self):
# value assignments
self.dictSub["phvBus"] = self.phvBus[0]["name"]
self.dictSub["phvValidity"] = self.getValidBusName()
self.dictSub["phvBusWidth"] = self.phvBus[0]["width"] - 1
self.dictSub["phvValidityWidth"] = self.getNbHeaders() - 1
def getValidBusName(self):
return self.phvBus[1]["name"]
def getNbHeaders(self):
return self.phvBus[1]["width"]
def appendCode(self, code):
oldCode = self.dictSub["code"]
if code in oldCode:
warn("append code already here : \n"
"oldCode : {}\n newCode : {}"
"\n".format(oldCode, code))
oldCode += code
self.dictSub["code"] = oldCode
def _addVector(self, name, size):
self._addSignal(name,
"std_logic_vector({} downto 0)".format(size - 1))
def _addLogic(self, name):
self._addSignal(name, "std_logic")
def _addSignal(self, name, t):
""" name : signal name
t signal Type
"""
if name in self.signals:
raise NameError("signal {} already exist".format(name))
self.signals[name] = t
def _addEntity(self, name, tmplDict):
"""Add entity name with template file template
and tmplDict
error if name exists
"""
if name in self.entities:
raise NameError("entity {} already exist".format(name))
self.entities[name] = tmplDict
def getEntity(self, name):
if name in self.entities:
return self.entities[name]
else:
raise NameError("entity {} does not exist".format(name))
def _connectVectors(self, dst, src):
""" return the connection of 2 signals
dst, src are tuples : (name, msb, lsb)
"""
tmplStr = "${dst}"
dictTmpl = {"dst": dst[0],
"src": src[0]}
if len(dst) == 3:
tmplStr += "(${dMSB} downto ${dLSB})"
dictTmpl["dLSB"] = dst[2]
dictTmpl["dMSB"] = dst[1]
tmplStr += " <= ${src}"
if len(src) == 3:
tmplStr += "(${sMSB} downto ${sLSB})"
dictTmpl["sLSB"] = src[2]
dictTmpl["sMSB"] = src[1]
tmplStr += ";\n"
tmpl = Template(tmplStr)
return tmpl.substitute(dictTmpl)
def _setPayloadConnectionCode(self):
code = Template("""
-- payload connections \n
process(payload_in_tkeep, payload_in_tdata, payload_in_tvalid) is
begin
${data}
if payload_in_tvalid = '1' then
${keepValid}
else
${keepUnvalid}
end if;
end process;
process(clk) is
begin
if rising_edge(clk) then
case $phvValidity is
${CtrlAssoc}
when others =>
${CtrlOthers}
end case;
end if;
end process;
""")
codeCtrlO = ""
codeCtrlAssoc = ""
codeData = ""
code1 = ""
code2 = ""
phvValWidth = self.dictSub["phvValidityWidth"]
payloadBusWidth = self.dictSub["payloadSize"]
paths = nx.all_simple_paths(self.dep.depG,
self.dep.init,
self.dep.last)
for p in paths:
tW = 0
phv_val_list = ["0"] * (phvValWidth+1)
# Counting total header sizes in tW
for h in p:
if h in self.headerBus:
tW += self.headerBus[h][1] - self.headerBus[h][0] + 1
# generate phv_val cond
phv_val_list[self.busValidAssocPos[h]] = "1"
codeCtrlAssoc += 'when "{}" =>\n'.format(''.join(phv_val_list[::-1]))
# get payoadShift that is 0
psList = list(self.payloadShifters.values())
pos = int((tW % payloadBusWidth)/8)
for i in range(pos):
ps = psList[i]
control = self.getEntity(ps[0])[1]["control"]
ctrlW = self.getEntity(ps[0])[1]["wControl"]
offset = i + len(psList) - pos
value = "'1' & {}".format(vhdl_util.int2vector(offset,
ctrlW - 1))
codeCtrlAssoc += self._connectVectors((control, ),
(value, ))
for j, i in enumerate(range(pos, len(psList))):
ps = psList[i]
control = self.getEntity(ps[0])[1]["control"]
ctrlW = self.getEntity(ps[0])[1]["wControl"]
value = "'0' & {}".format(vhdl_util.int2vector(j, ctrlW - 1))
codeCtrlAssoc += self._connectVectors((control, ),
(value, ))
# print(" mod {}\n cond : {}".format(int((tW % payloadBusWidth)/8),
# "".join(phv_val_list)))
for ps in self.payloadShifters.values():
codeData += self._connectVectors(ps[1]["inData"][1],
ps[1]["inData"][0])
code1 += self._connectVectors(ps[1]["inKeep"][1],
ps[1]["inKeep"][0])
code2 += self._connectVectors(ps[1]["inKeep"][1],
("(others => '0')", ))
entity = self.getEntity(ps[0])
codeCtrlO += self._connectVectors((entity[1]["control"], ),
("(others => '0')", ))
payloadTmplDict = {"phvValidity": self.dictSub["phvValidity"],
"data": codeData,
"keepValid": code1,
"keepUnvalid": code2,
"CtrlOthers": codeCtrlO,
"CtrlAssoc": codeCtrlAssoc}
self.dictSub['payloadConnect'] = code.safe_substitute(payloadTmplDict)
def _setMuxesConnectionCode(self):
def getMuxConnectStr(muxNum):
""" Generate the code to connect a Mux
"""
code = ""
_, connections = self.muxes[muxNum]
entity = self._getMuxEntity(muxNum)
pDst = ["", 0, 0]
pSrc = ["", 0, 0]
pDst = [entity["input"], 0, 0]
width = entity["width"]
for src, dst in connections.values():
pDst[1] = int((dst+1)*width - 1)
pDst[2] = int(dst * width)
pSrc[1] = int(src[1] + width - 1)
pSrc[2] = int(src[1])
pSrc[0] = src[0]
code += self._connectVectors(pDst, pSrc)
return code
allMuxStr = ""
for n in self.muxes:
allMuxStr += getMuxConnectStr(n)
self.dictSub["muxes"] = allMuxStr
def genPayloadShifter(self):
for i in range(self.dep.nbStateMachine):
self._genPayloadShifter(i)
def genMuxes(self):
for i in range(self.dep.nbStateMachine):
self._genMux(i)
self._genStateMachine(i)
def _getStMCompTmpl(self, num, name):
"""Gen template for a state machine
"""
graph = self.dep.getStateMachine(num)
stateList = {}
for u, v, d in graph.edges(data=True):
if u not in stateList:
stateList[u] = []
stateList[u].append((v, d))
def genStateTransitionCode(listTransition):
def getStateTransition(name, cond):
busAssoc = self.busValidAssocPos
transitionTmpl = "NEXT_STATE <= {}; \n"
condTmpl = "headerValid({}) = '1' then \n {} \n"
tmp = transitionTmpl.format(name)
if "label" in cond:
tmp = condTmpl.format(busAssoc[cond["label"]],
tmp)
return tmp, ("label" in cond)
transitionCode = "{} {}"
condCodeTmpl = "if {}"
condCode = ""
noCondCode = ""
for n, d in listTransition:
code, cond = getStateTransition(n, d)
if cond:
condCode += condCodeTmpl.format(code)
condCodeTmpl = "elsif {}"
else:
noCondCode += code
if len(condCode) > 0:
condCode += "end if;\n"
return transitionCode.format(noCondCode, condCode)
tmplDict = {"compVersion": VERSION,
"name": name,
"initState": self.dep.init,
"lastState": self.dep.last,
"stateList": "({})".format(", "
.join(list(graph.nodes))),
"initStateTransition":
genStateTransitionCode(stateList.pop(self.dep.init))}
otherStateTransition = ""
assocMuxIn = self.muxes[num][1] # get ctrl val to assign for a state
for k, struct in stateList.items():
otherStateTransition += "when {} =>\n".format(k)
stateMuxConv = vhdl_util.int2vector(assocMuxIn[k][1],
"outputWidth")
otherStateTransition += "output_reg <= {} ;\n".format(stateMuxConv)
otherStateTransition += genStateTransitionCode(struct)
tmplDict["otherStateTransition"] = otherStateTransition
return tmplDict
def _getStateMachineEntity(self, num):
compName = "state_machine_{}".format(num)
name = "stM_{}".format(num)
nbInput = self.getNbHeaders()
outWidth = self._getMuxEntity(num)["wControl"]
output = self._getMuxEntity(num)["control"]
if "state_machine" not in self.components:
self.components["state_machine"] = {}
if name not in self.entities:
stComp = self.components["state_machine"]
if compName not in stComp:
stComp[compName] = self._getStMCompTmpl(num, compName)
tmplDict = {"name": name,
"componentName": compName,
"nbHeader": nbInput,
"wControl": outWidth,
"clk": self.clkName,
"reset_n": self.rstName,
"start": "start_deparser",
"ready": "deparser_rdy_i({})".format(num),
"finish": "out_valid({})".format(num),
"headersValid": self.getValidBusName(),
"output": output}
self._addEntity(name, ("state_machine", tmplDict))
return self.getEntity(name)[1]
def _genStateMachine(self, num):
if num not in self.stateMachines:
entity = self._getStateMachineEntity(num)
self.stateMachines[num] = (entity["name"],)
else:
warn("trying to regenerate stateMachine {}".format(num))
def _getMuxEntity(self, muxNum):
"""Function to get a mux entity name with
nbIn as nb input and width as output size
The mux name is generated such as being unique for
a certain type of mux.
if mux does not exist, add it to entities dictionnary
"""
graph = self.dep.getStateMachine(muxNum)
nbInput = len(graph)-2
if nbInput==0:
nbInput=1
outWidth = 8
muxName = "mux_{}".format(muxNum)
outputName = "muxes_o({})".format(muxNum)
inputName = "muxes_{}_in".format(muxNum)
controlName = "muxes_{}_ctrl".format(muxNum)
if muxName not in self.entities:
if "mux" not in self.components:
self.components["mux"] = False
dictMux = {"name": muxName,
"nbInput": nbInput,
"wControl": vhdl_util.getLog2In(nbInput),
"clk": self.clkName,
"width": outWidth,
"input": inputName,
"wInput": int(nbInput * outWidth),
"output": outputName,
"control": controlName}
self._addEntity(muxName, ("mux", dictMux))
return self.getEntity(muxName)[1]
def _getPayloadShifterEntity(self, num):
# graph = self.dep.getStateMachine(num)
nbInput = int(self.dictSub['payloadSize']/8)
width = 8
name = "payloadShifter_{}".format(num)
controlName = "payload_{}_ctrl".format(num)
inDataName = "payload_shift_{}_data_in".format(num)
inKeepName = "payload_shift_{}_keep_in".format(num)
selDataName = "payload_o_data({})".format(num)
selKeepName = "payload_o_keep({})".format(num)
if name not in self.entities:
if "payload_shifter" not in self.components:
self.components["payload_shifter"] = False
dictParam = {"name": name,
"nbInput": nbInput,
"width": width,
"dataWidth": int(nbInput * width),
"keepWidthIn": int(1 * nbInput), # width on keepinput
"keepWidth": 1,
"wControl": vhdl_util.getLog2In(nbInput)+1,
"clk": self.clkName,
"control": controlName,
"inData": inDataName,
"inKeep": inKeepName,
"selKeep": selKeepName,
"selData": selDataName}
self._addEntity(name, ("payload_shifter", dictParam))
return self.getEntity(name)[1]
def _genPayloadShifter(self, num):
"""Payload shifter
"""
def genConnections(num, entity):
""" Connection of the payload shifter
Dictionnary key : input = (src, dst)
src : tuple(signalName, MSB, LSB)
dst : tuple(signalName, MSB, LSB)
"""
connections = {}
connections["inKeep"] = (("payload_in_tkeep", ),
(entity["inKeep"], ))
connections["inData"] = (("payload_in_tdata", ),
(entity["inData"], ))
return connections
if num not in self.payloadShifters:
entity = self._getPayloadShifterEntity(num)
self._addVector(entity["control"], entity["wControl"])
self._addVector(entity["inData"], entity["dataWidth"])
self._addVector(entity["inKeep"], entity["keepWidthIn"])
connections = genConnections(num, entity)
self.payloadShifters[num] = (entity["name"], connections)
else:
warn("trying to regenerate payload shifter {}".format(num))
def _genMux(self, muxNum):
""" Mux is tuple : entityName, stateMachine assignments)
"""
def genConnections(num):
""" Connection :
Dictionnary key = graph node name
value : tuple(src, dst)
src: tuple(signalName, start)
dst: mux input number
"""
connections = {}
graph = self.dep.getStateMachine(num)
i = 0
for n, d in graph.nodes(data=True):
if d != {}:
signalName = self.phvBus[0]["name"]
startPos = d["pos"][0] + self.headerBus[d["header"]][0]
connections[n] = ((signalName, startPos), i)
i += 1
return connections
if muxNum not in self.muxes:
entity = self._getMuxEntity(muxNum)
self._addVector(entity["control"], entity["wControl"])
self._addVector(entity["input"], entity["wInput"])
connections = genConnections(muxNum)
self.muxes[muxNum] = (entity["name"], connections)
else:
warn("Trying to regenerate mux {}".format(muxNum))
def _validateInputs(funcIn):
""" funcIn : list of three tuples :
(Type got, variable Name, expected type)
"""
val = True
# validate input
for g, n, e in funcIn:
if g != e:
print(Fore.YELLOW + "Wrong {} type got {}"
", expected {} {}".format(n, g, e,
Style.RESET_ALL))
val = False
return val
def exportDeparserToVHDL(deparser, outputFolder, phvBus, baseName="deparser"):
""" This function export to VHDL a deparserStateMachines
If stateMachines are not of type deparserStateMachines exit
"""
toValidate = [(type(outputFolder), "outputFolder", str),
(type(baseName), "baseName", str)]
if not _validateInputs(toValidate):
return
if not path.exists(outputFolder):
mkdir(outputFolder)
outputFiles = path.join(outputFolder, baseName + ".vhdl")
output_tb = path.join(outputFolder, "{}_tb.vhdl".format(baseName))
vhdlGen = deparserHDL(deparser, outputFolder, 'library', phvBus, baseName)
vhdlGen.genMuxes()
vhdlGen.genPayloadShifter()
vhdlGen.writeFiles(outputFiles)
vhdlGen.writeTB(output_tb)
return vhdlGen
| 39.76431
| 81
| 0.512151
|
4a01b3729f5904d4c48a50e0f33c0bd90cbb9916
| 20,842
|
py
|
Python
|
lingvo/tasks/lm/model.py
|
j-luo93/lingvo
|
7398974078391362f0c1b027164a8f33f88cf86b
|
[
"Apache-2.0"
] | 4
|
2019-01-08T02:59:38.000Z
|
2022-02-18T11:31:37.000Z
|
lingvo/tasks/lm/model.py
|
j-luo93/lingvo
|
7398974078391362f0c1b027164a8f33f88cf86b
|
[
"Apache-2.0"
] | null | null | null |
lingvo/tasks/lm/model.py
|
j-luo93/lingvo
|
7398974078391362f0c1b027164a8f33f88cf86b
|
[
"Apache-2.0"
] | 1
|
2019-07-02T14:09:42.000Z
|
2019-07-02T14:09:42.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LM models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow as tf
import numpy as np
from lingvo.core import base_layer
from lingvo.core import base_model
from lingvo.core import lr_schedule
from lingvo.core import py_utils
from lingvo.tasks.lm import layers
class LanguageModel(base_model.BaseTask):
"""Language model."""
@classmethod
def Params(cls):
p = super(LanguageModel, cls).Params()
p.Define('lm', layers.RnnLm.Params(), 'LM layer.')
tp = p.train
tp.Define(
'max_lstm_gradient_norm', 0.0,
'Clip gradient for vars in lstm layers by setting this value to '
'something > 0.')
tp.Define(
'sum_loss_across_tokens_in_batch', False,
'Sum the logP across predicted tokens in batch when set to True; '
'average across predicted tokens in batch o/w (default).')
tp.Define('isometric', 0.0, 'Weight for isometric constraint')
tp.Define('chunk_loss_anneal', 0.0, 'Anneal weight for chunk loss to 1.0 at this many steps')
tp.lr_schedule = lr_schedule.PiecewiseConstantLearningRateSchedule.Params(
).Set(
boundaries=[350000, 500000, 600000], values=[1.0, 0.1, 0.01, 0.001])
tp.vn_start_step = 20000
tp.vn_std = 0.0
tp.learning_rate = 0.001
tp.l2_regularizer_weight = 1e-6
tp.clip_gradient_norm_to_value = 1.0
tp.grad_norm_to_clip_to_zero = 100.0
p.Define('batch_size', 20, 'Batch size')
p.Define('contiguous', False, 'Flag')
return p
@base_layer.initializer
def __init__(self, params):
super(LanguageModel, self).__init__(params)
p = self.params
assert p.lm.vocab_size == p.input.tokenizer.vocab_size, (
'lm.vocab_size does not match input.tokenizer.vocab_size: %d vs %d' %
(p.lm.vocab_size, p.input.tokenizer.vocab_size))
with tf.variable_scope(p.name):
# Construct the model.
self.CreateChild('lm', p.lm)
def get_weight_params():
return py_utils.WeightParams(
shape=[1, p.batch_size, p.lm.emb.embedding_dim],
init=py_utils.WeightInit.Constant(scale=np.zeros([p.batch_size, p.lm.emb.embedding_dim])),
dtype=tf.float32,
collections=[self.__class__.__name__ + '_vars'])
# buffs = dict()
for i in range(p.lm.rnns.num_layers):
m = get_weight_params()
c = get_weight_params()
self.CreateVariable('last_state_%d_m' %i, m, trainable=False)
self.CreateVariable('last_state_%d_c' %i, c, trainable=False)
# buffs['last_state_%d_m' %i] = tf.Variable(np.zeros([p.batch_size, p.lm.emb.embedding_dim]), trainable=False, name='last_state_%d_m' %i, dtype=tf.float32)
# buffs['last_state_%d_c' %i] = tf.Variable(np.zeros([p.batch_size, p.lm.emb.embedding_dim]), trainable=False, name='last_state_%d_c' %i, dtype=tf.float32)
# self.buffs = buffs
def _TrimIfPossibleThenTranspose(self, ids, paddings, labels, weights, chunk_ids=None):
data = (ids, paddings, labels, weights)
if not py_utils.use_tpu():
max_seq_len = tf.cast(
tf.reduce_max(tf.reduce_sum(1.0 - paddings, 1)), tf.int32)
data = (x[:, :max_seq_len] for x in data)
if chunk_ids is not None:
chunk_ids = tf.transpose(chunk_ids[:, :max_seq_len])
return [tf.transpose(x) for x in data] + [chunk_ids]
def FPropTower(self, theta, input_batch):
p = self.params
chunk_ids = input_batch.chunk_ids if p.lm.use_chunks else None
ids, paddings, labels_ids, weights, chunk_ids = self._TrimIfPossibleThenTranspose(
input_batch.ids, input_batch.paddings, input_batch.labels,
input_batch.weights, chunk_ids=chunk_ids)
seqlen = tf.shape(ids)[0]
batch_size = tf.shape(ids)[1]
zero_state = self.lm.zero_state(batch_size)
with tf.name_scope('prepare_state'):
if p.contiguous:
state0 = py_utils.NestedMap(rnn=[])
for i in range(p.lm.rnns.num_layers):
if p.is_eval:
last_m = tf.reshape(self.theta['last_state_%d_m' %i], [p.batch_size, p.lm.emb.embedding_dim])
last_c = tf.reshape(self.theta['last_state_%d_c' %i], [p.batch_size, p.lm.emb.embedding_dim])
else:
last_m = self.theta['last_state_%d_m' %i]
last_c = self.theta['last_state_%d_c' %i]
m = tf.cond(input_batch.take_last_state, lambda: last_m, lambda: zero_state.rnn[i].m)
c = tf.cond(input_batch.take_last_state, lambda: last_c, lambda: zero_state.rnn[i].c)
# c = tf.Print(c, [c])
state0.rnn.append(py_utils.NestedMap(c=c, m=m))
else:
state0 = zero_state
labels = py_utils.NestedMap(class_ids=labels_ids, class_weights=weights)
xent_output, state1 = self.lm.FProp(theta.lm, ids, paddings, state0, labels=labels, chunk_ids=chunk_ids)
# self.state1 = state1
if p.contiguous:
assign_ops = list()
for i in range(p.lm.rnns.num_layers):
m = tf.reshape(state1.rnn[i].m, [1, p.batch_size, p.lm.emb.embedding_dim])
c = tf.reshape(state1.rnn[i].c, [1, p.batch_size, p.lm.emb.embedding_dim])
if not p.is_eval:
state1.rnn[i].m = m
state1.rnn[i].c = c
assign_ops.append(tf.assign(self.vars['last_state_%i_m' %i], m))
assign_ops.append(tf.assign(self.vars['last_state_%i_c' %i], c))
self.last_state_group_op = tf.group(*assign_ops)
# +1 to account for the end of sequence symbol.
div = 2 if p.input.use_chunks else 1 # tags shouldn't be counted as words
num_words = tf.cast(
tf.reduce_sum(input_batch.word_count // div + tf.constant(1, dtype=tf.int32) * (1 - p.contiguous)),
tf.float32)
predicted_labels = tf.cast(xent_output.per_example_argmax, labels_ids.dtype)
num_preds = xent_output.total_weight
mean_acc = tf.reduce_sum(
tf.cast(tf.equal(labels_ids, predicted_labels), tf.float32) *
weights) / (
num_preds + 1e-4)
if p.lm.emb.cls == layers.HRREmbeddingLayer:
if p.train.isometric > 0.0:
isometric_constraint = 0.0
nr = p.lm.emb.num_roles
# TODO(jmluo) rearrange it to divide the code according to three modes
if 'F' in theta.lm.emb:
F_wm = theta.lm.emb.F
nr, nf, d = F_wm.get_shape().as_list()
# F2d leads to overspefication of parameters in F
F2d = tf.reshape(F_wm, [nr * nf, d])
diff = tf.matmul(F2d, tf.transpose(F2d)) - tf.eye(nr * nf)
# diff = tf.matmul(F_wm, tf.transpose(F_wm, perm=[0, 2, 1])) - tf.eye(nf)
isometric_constraint += tf.reduce_sum(diff**2)
if 'A' in theta.lm:
d = theta.lm.A.get_shape().as_list()[0]
A = tf.reshape(theta.lm.A, [d, 2, d])
A1 = A[:, 0]
A2 = A[:, 1]
diff = tf.matmul(A1, tf.transpose(A2)) / 2
# isometric_constraint += tf.reduce_sum(diff ** 2)
if nr > 1 and 'r' in theta.lm.emb:
r_wm = theta.lm.emb.r
diff = tf.matmul(r_wm, tf.transpose(r_wm)) - tf.eye(nr)
isometric_constraint += tf.reduce_sum(diff**2)
if 'R' in theta.lm:
R_wm = theta.lm.R
diff = tf.matmul(R_wm, tf.transpose(R_wm)) - tf.eye(p.lm.num_sent_roles)
isometric_constraint += tf.reduce_sum(diff**2)
if p.lm.emb.mode == 'rs':
assert 'rR' in theta.lm.emb
rR = theta.lm.emb.rR
diff = tf.matmul(rR, tf.transpose(rR)) - tf.eye(2)
isometric_constraint += tf.reduce_sum(diff ** 2)
rs_all = theta.lm.emb.rs.wm
for rs in rs_all:
rs = tf.reshape(rs, [-1, 2, 2])
norm = tf.reduce_sum(rs ** 2, axis=-1)
isometric_constraint += tf.reduce_sum((norm - 1.0) ** 2) + tf.reduce_sum((rs ** 2) * ((1 - rs) ** 2))
normalized_rs = tf.nn.l2_normalize(rs, axis=-1)
dot = tf.matmul(normalized_rs, tf.transpose(normalized_rs, perm=[0, 2, 1]))
isometric_constraint += tf.reduce_sum(((dot * (tf.ones([2, 2]) - tf.eye(2))) ** 2) * 0.5)
tf.summary.histogram('rs', tf.stack(rs_all))
isometric_loss = isometric_constraint * p.train.isometric
if p.lm.use_chunks:# and not p.is_eval:
with tf.name_scope('global_decode'):
assert p.lm.num_sent_roles > 0
total_chunk_loss = -tf.reduce_sum(xent_output.chunk_log_probs)
avg_chunk_loss = total_chunk_loss / xent_output.num_chunks
global_step = tf.to_float(py_utils.GetOrCreateGlobalStep())
temperature = tf.minimum(tf.constant(p.train.chunk_loss_anneal), global_step) / p.train.chunk_loss_anneal
tf.summary.scalar('chunk/temperature', temperature)
annealed_total_chunk_loss = temperature * total_chunk_loss
annealed_avg_chunk_loss = temperature * avg_chunk_loss
chunk_loss = annealed_avg_chunk_loss
loss = xent_output.avg_xent
if p.train.sum_loss_across_tokens_in_batch:
loss = xent_output.total_xent
if 'chunk_loss' in locals():
chunk_loss = annealed_total_chunk_loss
metrics = {
'fraction_of_correct_next_step_preds': (mean_acc, num_preds),
'log_pplx': (xent_output.avg_xent, num_preds),
'log_pplx_per_word': (xent_output.total_xent / num_words, num_words),
'num_predictions': (num_preds, 1),
'num_words': (num_words, 1)
}
#tmp_loss = loss# + theta.dummy * theta.dummy
if 'isometric_loss' in locals():
#tmp_loss += isometric_loss
metrics['isometric'] = (isometric_loss, 1)
if 'chunk_loss' in locals():
#tmp_loss += chunk_loss
metrics['chunk_loss'] = (chunk_loss, 1)
metrics['annealed_total_chunk_loss'] = (annealed_total_chunk_loss, 1)
metrics['annealed_avg_chunk_loss'] = (annealed_avg_chunk_loss, xent_output.num_chunks)
metrics['total_chunk_loss'] = (total_chunk_loss, 1)
metrics['avg_chunk_loss'] = (avg_chunk_loss, xent_output.num_chunks)
metrics['num_chunks'] = (xent_output.num_chunks, 1)
#metrics['loss'] = (tmp_loss, num_preds)
if p.train.sum_loss_across_tokens_in_batch:
metrics['loss'] = (loss, 1)
else:
metrics['loss'] = (loss, num_preds)
metrics['batch_size'] = (tf.cast(batch_size, tf.float32), 1)
return metrics
def AdjustEvalMetrics(self, metrics):
with tf.name_scope('aggregate_loss'):
if self.params.train.sum_loss_across_tokens_in_batch:
loss, w = metrics['loss']
loss = loss / metrics['batch_size'][0]
metrics['loss'] = (loss, w)
return metrics
def FProp(self, theta, input_batch):
metrics = super(LanguageModel, self).FProp(theta, input_batch)
if 'isometric' in metrics:
self._loss = self._loss + metrics['isometric'][0]
if 'chunk_loss' in metrics:
if self.params.train.sum_loss_across_tokens_in_batch:
self._loss = self._loss + metrics['annealed_total_chunk_loss'][0] / metrics['batch_size'][0]
else:
self._loss = self._loss + metrics['annealed_avg_chunk_loss'][0]
return metrics
def AdjustGradients(self, var_grad):
"""Clip LSTM gradients.
Args:
var_grad: a `.NestedMap` of (variable, gradient). You can view
`var_grad` as an ordered list of (key, (var, grad)) tuples. Every
key of `var_grad` exists in `vmap`. Every variable in `vmap` that
contributes to loss must exist in `var_grad`. Every var of `var_grad`
must exist in `vmap`. `grad` is the corresponding gradient computed
for `var`. `grad` is guaranteed to be not None.
Returns:
adjusted version of `var_grad` that has clipped the LSTM gradients
if `self.params.max_lstm_gradient_norm` is set.
"""
p = self.params
if p.train.max_lstm_gradient_norm:
lstm_var_grad = var_grad.lm.rnns
lstm_vars = lstm_var_grad.Transform(lambda x: x[0]).Flatten()
lstm_grads = lstm_var_grad.Transform(lambda x: x[1]).Flatten()
clipped_lstm_grads, _ = tf.clip_by_global_norm(
lstm_grads, p.train.max_lstm_gradient_norm)
var_grad.lm.rnns = var_grad.lm.rnns.Pack(
list(zip(lstm_vars, clipped_lstm_grads)))
return var_grad
def Inference(self):
"""Constructs the inference subgraphs.
Returns:
{'subgraph_name': (fetches, feeds)}
"""
subgraphs = {}
with tf.name_scope('inference'):
subgraphs['default'] = self._InferenceSubgraph_Default()
subgraphs['rnn_step'] = self._InferenceSubgraph_RNNStep()
return subgraphs
def _InferenceSubgraph_Default(self):
"""Default inference subgraph.
Returns:
fetches: A dictionary of fetches, containing:
log_pplx_per_token: A matrix of shape [batch, time]. [i, j]
is i-th input text's j-th token's log prob.
paddings: A matrix of shape [batch, time]. The padding mask.
log_pplx_per_sample: A vector of shape [batch]. [i]
is i-th input text's log prob.
num_oovs_per_sample: A vector of shape [batch] counting the total number
of out-of-vocabulary tokens in each input.
tokens_from_labels: A vector of shape [batch] returning the predicted
tokens as a sequence after mapping them back to strings from ids using
the vocabulary.
ids: A matrix of shape [batch, time]. [i, j]
is i-th input text's j-th token's id.
feeds: A dictionary of feeds, containing:
text: A placeholder for a vector of strings.
"""
p = self.params
text = tf.placeholder(tf.string, shape=[None])
# [batch, time]
ids, labels, paddings = self.input_generator.StringsToIds(text)
chunk_ids = None
if p.lm.gold_chunks:
ids, labels, paddings, chunk_ids = lm_inp.LmInput.GetChunks(ids, labels, paddings)
lengths = tf.reduce_sum(tf.to_int32(1 - paddings), axis=1)
tokens_from_labels = self.input_generator.IdsToStrings(labels, lengths)
oovs = tf.equal(labels, self.input_generator.tokenizer.unk_id)
num_oovs_per_sample = tf.to_int32(
tf.reduce_sum(tf.to_float(oovs) * (1 - paddings), axis=1))
# [time, batch]
ids, paddings, labels, weights, chunk_ids = self._TrimIfPossibleThenTranspose(
ids, paddings, labels, 1.0 - paddings, chunk_ids)
batch_size = tf.shape(ids)[1]
state0 = self.lm.zero_state(batch_size)
if p.lm.num_sent_roles > 0 and not p.lm.global_decode:
lower_state0 = self.lm.zero_state(batch_size)
xent_output, _, _ = self.lm.FPropDefaultTheta(
inputs=ids,
paddings=paddings,
state0=state0,
lower_state0=lower_state0,
labels=py_utils.NestedMap(class_ids=labels, class_weights=weights),
chunk_ids=chunk_ids,
ids=ids)
else:
xent_output, _ = self.lm.FPropDefaultTheta(
inputs=ids,
paddings=paddings,
state0=state0,
labels=py_utils.NestedMap(class_ids=labels, class_weights=weights),
chunk_ids=chunk_ids,
ids=ids)
per_example_xent = py_utils.HasShape(xent_output.per_example_xent,
tf.shape(ids))
log_pplx_per_sample = tf.reduce_sum(
per_example_xent * (1 - paddings), axis=0)
fetches = {
'log_pplx_per_token': # [batch, time]
tf.transpose(per_example_xent),
'paddings': # [batch, time]
tf.transpose(paddings),
'lengths': # [batch]
lengths,
'log_pplx_per_sample': # [batch]
log_pplx_per_sample,
'num_oovs_per_sample': # [batch], int32
num_oovs_per_sample,
'tokens_from_labels': # [batch], string
tokens_from_labels,
'ids': # [batch, time], int32
ids
}
feeds = {'text': text}
# Also pass intermediate results
if 'inter_res' in xent_output:
inter_res = xent_output.inter_res
for key in inter_res:
new_key = 'inter_res.%s' %key
assert new_key not in fetches
fetches[new_key] = getattr(inter_res, key)
return fetches, feeds
def _InferenceSubgraph_RNNStep(self):
"""Inference subgraph for one rnn step.
Returns:
fetches: A dictionary of fetches, containing:
zero_m_out_i: A matrix of shape [batch, output_size].
m values of the i-th layer of zero recurrent state.
zero_c_out_i: A matrix of shape [batch, hidden_size].
c values of the i-th layer of zero recurrent state.
logits: A matrix of shape [batch, num_candidates]. [i, j]
is i-th input's j-th candidate's logit.
m_out_i: A matrix of shape [batch, output_size].
m values of the i-th layer of new recurrent state after one step.
c_out_i: A matrix of shape [batch, hidden_size].
c values of the i-th layer of new recurrent state after one step.
feeds: A dictionary of feeds, containing:
step_ids: A matrix of shape [batch, 1]. [i, 0]
is the word id to run one step for the i-th input.
candidate_ids: A 3D tensor of shape [batch, num_candidates, 2].
[i, j, 0] = i just for indexing convenience.
[i, j, 1] is the word id of the i-th input's j-th candidate.
m_in_i: A matrix of shape [batch, output_size].
m values of input recurrent state.
c_in_i: A matrix of shape [batch, hidden_size].
c values of input recurrent state.
"""
fetches, feeds = {}, {}
# Run one step with input ids and return logits.
# [batch, 1]
step_ids = tf.placeholder(tf.int32, [None, 1])
feeds['step_ids'] = step_ids
# Return logits only for certain candidate ids. This is to avoid returning
# a big list of logits for all words.
# This is a 3D tensor and it satisfies that:
# candidate_ids[i, j, 0] = i (just for indexing convenience)
# candidate_ids[i, j, 1] = the word id of the j-th candidate
# [batch, num_candidates, 2]
candidate_ids = tf.placeholder(tf.int32, [None, None, 2])
feeds['candidate_ids'] = candidate_ids
# Get initial zero states.
batch_size = tf.shape(step_ids)[0]
zero_state = self.lm.zero_state(batch_size)
# Input LM state.
state0 = zero_state.Transform(lambda x: tf.placeholder(tf.float32))
# Run LM for one step
step_ids_vec = tf.reshape(step_ids, [-1])
step_paddings = tf.zeros(tf.shape(step_ids_vec), dtype=self.params.dtype)
p = self.params
lower_state0 = None
if p.lm.num_sent_roles > 0 and not p.lm.global_decode:
lower_zero_state0 = self.lm.lower_rnns.zero_state(batch_size)
lower_state0 = lower_zero_state0.Transform(lambda x: tf.placeholder(tf.float32))
res = self.lm.Step(self.lm.theta, step_ids_vec, step_paddings,
state0, lower_state0=lower_state0, step_inference=True) # TODO(jmluo) HACKY
if p.lm.num_sent_roles > 0 and not p.lm.global_decode:
out, state1, lower_state1 = res
# add more feeds and fetches for lower level rnn
feeds['lowerrnnstate:m'] = lower_state0.rnn[0].m
feeds['lowerrnnstate:c'] = lower_state0.rnn[0].c
fetches['lowerrnnstate:m'] = lower_state1.rnn[0].m
fetches['lowerrnnstate:c'] = lower_state1.rnn[0].c
else:
out, state1 = res
# Create feeds/fetches map for states.
for i, (zero_s, s0, s1) in enumerate(
zip(zero_state.rnn, state0.rnn, state1.rnn)):
feeds['rnnstate:m_%02d' % i] = s0.m
feeds['rnnstate:c_%02d' % i] = s0.c
fetches['rnnstate:zero_m_%02d' % i] = zero_s.m
fetches['rnnstate:zero_c_%02d' % i] = zero_s.c
fetches['rnnstate:m_%02d' % i] = s1.m
fetches['rnnstate:c_%02d' % i] = s1.c
# Collect logits for candidates
# [batch, num_candidates]
prob = tf.nn.softmax(out.logits)
candidate_prob = tf.gather_nd(prob, candidate_ids)
candidate_logits = tf.log(candidate_prob)
fetches['logits'] = candidate_logits
if 'gating_probs' in out:
fetches['gating_probs'] = out.gating_probs
if 'cce' in out:
fetches['cce'] = out.cce
# print('check here', fetches)
return fetches, feeds
| 41.767535
| 161
| 0.647587
|
4a01b3b3187cd32561ac5627aa749d00d8bef6e7
| 2,127
|
py
|
Python
|
src/server/unix_user_handler.py
|
CDAT/libcdms
|
5ae832b52c8235ff859ce8e099347d42943bbd8a
|
[
"BSD-3-Clause"
] | null | null | null |
src/server/unix_user_handler.py
|
CDAT/libcdms
|
5ae832b52c8235ff859ce8e099347d42943bbd8a
|
[
"BSD-3-Clause"
] | 3
|
2018-05-28T01:12:59.000Z
|
2020-11-05T00:56:00.000Z
|
src/server/unix_user_handler.py
|
UV-CDAT/libcdms
|
5ae832b52c8235ff859ce8e099347d42943bbd8a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996, 1997 by Sam Rushing
# All Rights Reserved.
#
# This software is provided free for non-commercial use. If you are
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id$'
# support for `~user/public_html'.
import regex
import string
import default_handler
import filesys
import os
import pwd
split_path = default_handler.split_path
get_header = default_handler.get_header
user_dir = regex.compile ('/~\([^/]+\)\(.*\)')
class unix_user_handler (default_handler.default_handler):
def __init__ (self, public_html = 'public_html'):
self.public_html = public_html
default_handler.default_handler.__init__ (self, None)
# cache userdir-filesystem objects
fs_cache = {}
def match (self, request):
if user_dir.match (request.uri) == len(request.uri):
return 1
else:
return 0
def handle_request (self, request):
# get the user name
user = user_dir.group(1)
rest = user_dir.group(2)
# special hack to catch those lazy URL typers
if not rest:
request['Location'] = 'http://%s/~%s/' % (
request.channel.server.server_name,
user
)
request.error (301)
return
# have we already built a userdir fs for this user?
if self.fs_cache.has_key (user):
fs = self.fs_cache[user]
else:
# no, well then, let's build one.
# first, find out where the user directory is
try:
info = pwd.getpwnam (user)
except KeyError:
request.error (404)
return
ud = info[5] + '/' + self.public_html
if os.path.isdir (ud):
fs = filesys.os_filesystem (ud)
self.fs_cache[user] = fs
else:
request.error (404)
return
# fake out default_handler
self.filesystem = fs
# massage the request URI
request.uri = '/' + rest
return default_handler.default_handler.handle_request (self, request)
def __repr__ (self):
return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (
id(self),
self.public_html,
len(self.fs_cache)
)
| 24.448276
| 86
| 0.686883
|
4a01b3d51a793b1f82b03886cc24dc79e45aad08
| 10,270
|
py
|
Python
|
numba/cuda/tests/cudapy/test_array.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-08-10T05:33:29.000Z
|
2021-08-10T05:33:29.000Z
|
numba/cuda/tests/cudapy/test_array.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
numba/cuda/tests/cudapy/test_array.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-01-31T18:58:54.000Z
|
2021-01-31T18:58:54.000Z
|
import numpy as np
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim, skip_unless_cudasim
from numba import config, cuda
if config.ENABLE_CUDASIM:
ARRAY_LIKE_FUNCTIONS = (cuda.device_array_like, cuda.pinned_array_like)
else:
ARRAY_LIKE_FUNCTIONS = (cuda.device_array_like, cuda.mapped_array_like,
cuda.pinned_array_like)
class TestCudaArray(CUDATestCase):
def test_gpu_array_zero_length(self):
x = np.arange(0)
dx = cuda.to_device(x)
hx = dx.copy_to_host()
self.assertEqual(x.shape, dx.shape)
self.assertEqual(x.size, dx.size)
self.assertEqual(x.shape, hx.shape)
self.assertEqual(x.size, hx.size)
def test_gpu_array_strided(self):
@cuda.jit('void(double[:])')
def kernel(x):
i = cuda.grid(1)
if i < x.shape[0]:
x[i] = i
x = np.arange(10, dtype=np.double)
y = np.ndarray(shape=10 * 8, buffer=x, dtype=np.byte)
z = np.ndarray(9, buffer=y[4:-4], dtype=np.double)
kernel[10, 10](z)
self.assertTrue(np.allclose(z, list(range(9))))
def test_gpu_array_interleaved(self):
@cuda.jit('void(double[:], double[:])')
def copykernel(x, y):
i = cuda.grid(1)
if i < x.shape[0]:
x[i] = i
y[i] = i
x = np.arange(10, dtype=np.double)
y = x[:-1:2]
# z = x[1::2]
# n = y.size
try:
cuda.devicearray.auto_device(y)
except ValueError:
pass
else:
raise AssertionError("Should raise exception complaining the "
"contiguous-ness of the array.")
# Should we handle this use case?
# assert z.size == y.size
# copykernel[1, n](y, x)
# print(y, z)
# assert np.all(y == z)
# assert np.all(y == list(range(n)))
def test_auto_device_const(self):
d, _ = cuda.devicearray.auto_device(2)
self.assertTrue(np.all(d.copy_to_host() == np.array(2)))
def _test_array_like_same(self, like_func, array):
"""
Tests of *_array_like where shape, strides, dtype, and flags should
all be equal.
"""
array_like = like_func(array)
self.assertEqual(array.shape, array_like.shape)
self.assertEqual(array.strides, array_like.strides)
self.assertEqual(array.dtype, array_like.dtype)
self.assertEqual(array.flags['C_CONTIGUOUS'],
array_like.flags['C_CONTIGUOUS'])
self.assertEqual(array.flags['F_CONTIGUOUS'],
array_like.flags['F_CONTIGUOUS'])
def test_array_like_1d(self):
d_a = cuda.device_array(10, order='C')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_2d(self):
d_a = cuda.device_array((10, 12), order='C')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_2d_transpose(self):
d_a = cuda.device_array((10, 12), order='C')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_3d(self):
d_a = cuda.device_array((10, 12, 14), order='C')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_1d_f(self):
d_a = cuda.device_array(10, order='F')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_2d_f(self):
d_a = cuda.device_array((10, 12), order='F')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_2d_f_transpose(self):
d_a = cuda.device_array((10, 12), order='F')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def test_array_like_3d_f(self):
d_a = cuda.device_array((10, 12, 14), order='F')
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_same(like_func, d_a)
def _test_array_like_view(self, like_func, view, d_view):
"""
Tests of device_array_like where the original array is a view - the
strides should not be equal because a contiguous array is expected.
"""
nb_like = like_func(d_view)
self.assertEqual(d_view.shape, nb_like.shape)
self.assertEqual(d_view.dtype, nb_like.dtype)
# Use NumPy as a reference for the expected strides
np_like = np.zeros_like(view)
self.assertEqual(nb_like.strides, np_like.strides)
self.assertEqual(nb_like.flags['C_CONTIGUOUS'],
np_like.flags['C_CONTIGUOUS'])
self.assertEqual(nb_like.flags['F_CONTIGUOUS'],
np_like.flags['F_CONTIGUOUS'])
def test_array_like_1d_view(self):
shape = 10
view = np.zeros(shape)[::2]
d_view = cuda.device_array(shape)[::2]
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_view(like_func, view, d_view)
def test_array_like_1d_view_f(self):
shape = 10
view = np.zeros(shape, order='F')[::2]
d_view = cuda.device_array(shape, order='F')[::2]
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_view(like_func, view, d_view)
def test_array_like_2d_view(self):
shape = (10, 12)
view = np.zeros(shape)[::2, ::2]
d_view = cuda.device_array(shape)[::2, ::2]
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_view(like_func, view, d_view)
def test_array_like_2d_view_f(self):
shape = (10, 12)
view = np.zeros(shape, order='F')[::2, ::2]
d_view = cuda.device_array(shape, order='F')[::2, ::2]
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_view(like_func, view, d_view)
@skip_on_cudasim('Numba and NumPy stride semantics differ for transpose')
def test_array_like_2d_view_transpose_device(self):
shape = (10, 12)
d_view = cuda.device_array(shape)[::2, ::2].T
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
# This is a special case (see issue #4974) because creating the
# transpose creates a new contiguous allocation with different
# strides. In this case, rather than comparing against NumPy,
# we can only compare against expected values.
like = like_func(d_view)
self.assertEqual(d_view.shape, like.shape)
self.assertEqual(d_view.dtype, like.dtype)
self.assertEqual((40, 8), like.strides)
self.assertTrue(like.flags['C_CONTIGUOUS'])
self.assertFalse(like.flags['F_CONTIGUOUS'])
@skip_unless_cudasim('Numba and NumPy stride semantics differ for '
'transpose')
def test_array_like_2d_view_transpose_simulator(self):
shape = (10, 12)
view = np.zeros(shape)[::2, ::2].T
d_view = cuda.device_array(shape)[::2, ::2].T
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
# On the simulator, the transpose has different strides to on a
# CUDA device (See issue #4974). Here we can compare strides
# against NumPy as a reference.
np_like = np.zeros_like(view)
nb_like = like_func(d_view)
self.assertEqual(d_view.shape, nb_like.shape)
self.assertEqual(d_view.dtype, nb_like.dtype)
self.assertEqual(np_like.strides, nb_like.strides)
self.assertEqual(np_like.flags['C_CONTIGUOUS'],
nb_like.flags['C_CONTIGUOUS'])
self.assertEqual(np_like.flags['F_CONTIGUOUS'],
nb_like.flags['F_CONTIGUOUS'])
def test_array_like_2d_view_f_transpose(self):
shape = (10, 12)
view = np.zeros(shape, order='F')[::2, ::2].T
d_view = cuda.device_array(shape, order='F')[::2, ::2].T
for like_func in ARRAY_LIKE_FUNCTIONS:
with self.subTest(like_func=like_func):
self._test_array_like_view(like_func, view, d_view)
@skip_on_cudasim('Kernel definitions not created in the simulator')
def test_issue_4628(self):
# CUDA Device arrays were reported as always being typed with 'A' order
# so launching the kernel with a host array and then a device array
# resulted in two definitions being compiled - one for 'C' order from
# the host array, and one for 'A' order from the device array. With the
# resolution of this issue, the order of the device array is also 'C',
# so after the kernel launches there should only be one definition of
# the function.
@cuda.jit
def func(A, out):
i = cuda.grid(1)
out[i] = A[i] * 2
n = 128
a = np.ones((n,))
d_a = cuda.to_device(a)
result = np.zeros((n,))
func[1, 128](a, result)
func[1, 128](d_a, result)
self.assertEqual(1, len(func.definitions))
if __name__ == '__main__':
unittest.main()
| 40.433071
| 79
| 0.606232
|
4a01b477b935366f9261af54b85e8a4540ee66f7
| 5,045
|
py
|
Python
|
lazy/serialize/_multi.py
|
trisongz/lazycls
|
701bad1a358ed3bb136347d0c5eb81de3201f6a3
|
[
"MIT"
] | 2
|
2021-12-02T00:13:16.000Z
|
2022-02-26T11:18:33.000Z
|
lazy/serialize/_multi.py
|
trisongz/lazycls
|
701bad1a358ed3bb136347d0c5eb81de3201f6a3
|
[
"MIT"
] | null | null | null |
lazy/serialize/_multi.py
|
trisongz/lazycls
|
701bad1a358ed3bb136347d0c5eb81de3201f6a3
|
[
"MIT"
] | null | null | null |
"""
Serializers that depend on/combine with one another
"""
__all__ = (
'YamlBase64', 'YamlBGZ',
'JsonBase64', 'JasonBGZ'
)
from typing import Dict, Any, List, Union
from .core import Defaults
from ._json import Json
from ._yaml import Yaml
from ._base import Base
class YamlBase64(Yaml):
@classmethod
def dumps(cls, obj: Dict[Any, Any], dumper: str = Defaults.yaml_dumper, *args, default: Dict[Any, Any] = None, **kwargs) -> str:
rez = cls._encode(obj, dumper = dumper, *args, default = default, **kwargs)
return Base.b64_encode(rez, *args, **kwargs)
@classmethod
def loads(cls, data: Union[str, bytes], loader: str = Defaults.yaml_loader, *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
rez = Base.b64_decode(data, *args, **kwargs)
return cls._decode(rez, loader = loader, *args, **kwargs)
@classmethod
async def async_dumps(cls, obj: Dict[Any, Any], dumper: str = Defaults.yaml_dumper, *args, default: Any = None, **kwargs) -> str:
if not cls.async_supported: raise Exception
rez = await cls._async_encode(obj, dumper = dumper, *args, default = default, **kwargs)
return Base.b64_encode(rez, *args, **kwargs)
@classmethod
async def async_loads(cls, data: Union[str, bytes], loader: str = Defaults.yaml_loader, *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
if not cls.async_supported: raise Exception
rez = Base.b64_decode(data, *args, **kwargs)
return await cls._async_decode(rez, loader = loader, *args, **kwargs)
class YamlBGZ(Yaml):
@classmethod
def dumps(cls, obj: Dict[Any, Any], dumper: str = Defaults.yaml_dumper, *args, default: Dict[Any, Any] = None, **kwargs) -> str:
rez = cls._encode(obj, dumper = dumper, *args, default = default, **kwargs)
return Base.b64_gzip_encode(rez, *args, **kwargs)
@classmethod
def loads(cls, data: Union[str, bytes], loader: str = Defaults.yaml_loader, *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
rez = Base.b64_gzip_decode(data, *args, **kwargs)
return cls._decode(rez, loader = loader, *args, **kwargs)
@classmethod
async def async_dumps(cls, obj: Dict[Any, Any], dumper: str = Defaults.yaml_dumper, *args, default: Any = None, **kwargs) -> str:
if not cls.async_supported: raise Exception
rez = await cls._async_encode(obj, dumper = dumper, *args, default = default, **kwargs)
return Base.b64_gzip_encode(rez, *args, **kwargs)
@classmethod
async def async_loads(cls, data: Union[str, bytes], loader: str = Defaults.yaml_loader, *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
if not cls.async_supported: raise Exception
rez = Base.b64_gzip_decode(data, *args, **kwargs)
return await cls._async_decode(rez, loader = loader, *args, **kwargs)
class JsonBase64(Json):
@classmethod
def dumps(cls, obj: Dict[Any, Any], *args, default: Dict[Any, Any] = None, **kwargs) -> str:
rez = cls._encode(obj, *args, default = default, **kwargs)
return Base.b64_encode(rez, *args, **kwargs)
@classmethod
def loads(cls, data: Union[str, bytes], *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
rez = Base.b64_decode(data, *args, **kwargs)
return cls._decode(rez, *args, **kwargs)
@classmethod
async def async_dumps(cls, obj: Dict[Any, Any], *args, default: Any = None, **kwargs) -> str:
if not cls.async_supported: raise Exception
rez = await cls._async_encode(obj, *args, default = default, **kwargs)
return Base.b64_encode(rez, *args, **kwargs)
@classmethod
async def async_loads(cls, data: Union[str, bytes], *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
if not cls.async_supported: raise Exception
rez = Base.b64_decode(data, *args, **kwargs)
return await cls._async_decode(rez, *args, **kwargs)
class JsonBGZ(Json):
@classmethod
def dumps(cls, obj: Dict[Any, Any], *args, default: Dict[Any, Any] = None, **kwargs) -> str:
rez = cls._encode(obj, *args, default = default, **kwargs)
return Base.b64_gzip_encode(rez, *args, **kwargs)
@classmethod
def loads(cls, data: Union[str, bytes], *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
rez = Base.b64_gzip_decode(data, *args, **kwargs)
return cls._decode(rez, *args, **kwargs)
@classmethod
async def async_dumps(cls, obj: Dict[Any, Any], *args, default: Any = None, **kwargs) -> str:
if not cls.async_supported: raise Exception
rez = await cls._async_encode(obj, *args, default = default, **kwargs)
return Base.b64_gzip_encode(rez, *args, **kwargs)
@classmethod
async def async_loads(cls, data: Union[str, bytes], *args, **kwargs) -> Union[Dict[Any, Any], List[str]]:
if not cls.async_supported: raise Exception
rez = Base.b64_gzip_decode(data, *args, **kwargs)
return await cls._async_decode(rez, *args, **kwargs)
| 44.254386
| 145
| 0.645193
|
4a01b6221aba07cbb41dae59227ed22f3d630822
| 681
|
py
|
Python
|
examples/cat.py
|
lggruspe/genbu
|
fd87ae31155eabc12aaf02ebfcf34b05c77dc428
|
[
"MIT"
] | 1
|
2021-05-01T15:35:19.000Z
|
2021-05-01T15:35:19.000Z
|
examples/cat.py
|
lggruspe/genbu
|
fd87ae31155eabc12aaf02ebfcf34b05c77dc428
|
[
"MIT"
] | null | null | null |
examples/cat.py
|
lggruspe/genbu
|
fd87ae31155eabc12aaf02ebfcf34b05c77dc428
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import sys
from genbu import Genbu, Param, combinators as comb, usage
def cat(path: Path) -> str:
"""Concatenate contents of path to stdout."""
return path.read_text()
cli = Genbu(
cat,
params=[
Param("path", ["-p", "--path"], comb.One(Path)),
Param(
"help_",
["-?", "-h", "--help"],
comb.Emit(True),
aggregator=lambda _: sys.exit(usage(cli)),
),
],
)
if __name__ == "__main__":
try:
print(cli.run())
except Exception as exc:
name = " ".join(cli.complete_name())
print(f"{name}: {exc}\nTry '{name} -h' for more information.")
| 21.967742
| 70
| 0.53304
|
4a01b623b1a720e132b35e08389234331b8b3c72
| 2,490
|
py
|
Python
|
src/schema2rst/inspectors/mysql.py
|
MindStrongHealth/schema2rst
|
50f4f6b238c2a4a16ce3ad067dbab698f1d19a41
|
[
"Apache-1.1"
] | 1
|
2020-11-19T17:49:35.000Z
|
2020-11-19T17:49:35.000Z
|
src/schema2rst/inspectors/mysql.py
|
MindStrongHealth/schema2rst
|
50f4f6b238c2a4a16ce3ad067dbab698f1d19a41
|
[
"Apache-1.1"
] | null | null | null |
src/schema2rst/inspectors/mysql.py
|
MindStrongHealth/schema2rst
|
50f4f6b238c2a4a16ce3ad067dbab698f1d19a41
|
[
"Apache-1.1"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from schema2rst.inspectors.base import SimpleInspector
class MySQLInspector(SimpleInspector):
def get_tables(self, **kw):
tables = super(MySQLInspector, self).get_tables(**kw)
for table in tables:
query = ("""SELECT TABLE_COMMENT
FROM information_schema.Tables
WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'""" %
(self.default_schema_name, table['name']))
r = self.bind.execute(query).fetchone()
table['fullname'] = re.sub('; InnoDB.*$', '', r[0])
if table['fullname'].startswith('InnoDB'):
table['fullname'] = None
return tables
def get_columns(self, table_name, **kw):
columns = super(MySQLInspector, self).get_columns(table_name, **kw)
for column in columns:
query = ("""SELECT COLUMN_TYPE, COLLATION_NAME,
EXTRA, COLUMN_COMMENT
FROM information_schema.Columns
WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND
COLUMN_NAME = '%s'""" %
(self.default_schema_name, table_name, column['name']))
r = self.bind.execute(query).fetchone()
column['type'] = r[0]
options = []
collation_name = r[1]
if collation_name and collation_name != 'utf8_general_ci':
options.append(collation_name)
extra = r[2]
if extra:
options.append(extra)
for key in column['foreign_keys']:
for refcolumn in key['referred_columns']:
msg = "FK: %s.%s" % (key['referred_table'], refcolumn)
options.append(msg)
column.set_comment(r[3], options)
return columns
| 38.307692
| 76
| 0.579518
|
4a01b7075267862df8fe9b44dca3848ded346f01
| 10,682
|
py
|
Python
|
digani/res/email/email_extractor.py
|
ZwEin27/dig-attribute-name-identification
|
669c2d4f66560e4085884a5d0c259390214ef69f
|
[
"Apache-2.0"
] | null | null | null |
digani/res/email/email_extractor.py
|
ZwEin27/dig-attribute-name-identification
|
669c2d4f66560e4085884a5d0c259390214ef69f
|
[
"Apache-2.0"
] | null | null | null |
digani/res/email/email_extractor.py
|
ZwEin27/dig-attribute-name-identification
|
669c2d4f66560e4085884a5d0c259390214ef69f
|
[
"Apache-2.0"
] | null | null | null |
import re
import json
from sets import Set
class EE(object):
"""Extractor of email addresses from text.
The legal definition is in https://en.wikipedia.org/wiki/Email_address
This class attempts to map purposefully obfuscated email addresses to legal addresses.
Users of this class should call EE.extract_email(), see documentation.
The main program is to test against the ground truth.
"""
def __init__(self):
self.common_domains = [
"gmail",
"gee mail",
"g mail",
"gml",
"yahoo",
"hotmail"
]
self.common_domains_regex = "(?:" + "|".join(self.common_domains) + ")"
self.gmail_synonyms = [
"gee mail",
"g mail"
"gml"
]
self.gmail_synonyms_regex = "(?:" + "|".join(self.gmail_synonyms) + ")"
self.com_synonyms = [
r"com\b",
r"co\s*\.\s*\w\w\w?",
r"co\s+dot\s+\w\w\w?"
]
self.com_synonyms_regex = r"(?:" + "|".join(self.com_synonyms) + ")"
# The intent here is to match things like "yahoo com", "yahoo dot com"
# We require matching the com synonyms to avoid interpreting text that contains "at yahoo" as part of a domain name.
self.spelled_out_domain_regex = r"(?:" + self.common_domains_regex + "(?:(?:dot\s+|\.+|\,+|\s+)" + self.com_synonyms_regex + "))"
# print "spelled_out_domain_regex:%s" % spelled_out_domain_regex
self.at_regexes = [
r"@",
r"\(+@\)+",
r"\[+@\]+",
r"\(+(?:at|arroba)\)+",
r"\[+(?:at|arroba)\]+",
r"\{+(?:at|arroba)\}+",
r"\s+(?:at|arroba)@",
r"@at\s+",
r"at\s+(?=" + self.spelled_out_domain_regex + ")",
r"(?<=\w\w\w|\wat)\s+(?=" + self.spelled_out_domain_regex + ")",
r"(?<=\w\w\w|\wat)\[\](?=" + self.spelled_out_domain_regex + "?" + ")"
]
self.at_regex = "(?:" + r'|'.join(self.at_regexes) + ")"
# print "at_regex:%s" % at_regex
# People put junk between the "at" sign and the start of the domain
self.at_postfix_regexes = [
",+\s*",
"\.+\s*"
]
self.at_postfix_regex = "(?:" + r'|'.join(self.at_postfix_regexes) + ")?"
self.full_at_regex = self.at_regex + self.at_postfix_regex + "\s*"
# print "full_at_regex:%s" % full_at_regex
# Character set defined by the standard
self.basic_dns_label_regex = r"[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]"
self.non_dns_regex = r"[^a-zA-Z0-9\-.]"
# Sometimes people do things like maria at (yahoo) (dot) (com)
self.wrapped_basic_dns_label_regexes = [
self.basic_dns_label_regex,
"\(+" + self.basic_dns_label_regex + "\)+",
"\[+" + self.basic_dns_label_regex + "\]+"
]
self.dns_label_regex = "(?:" + "|".join(self.wrapped_basic_dns_label_regexes) + ")"
# People put all kinds of junk between the parts of a domain name
self.dot_regex = "[(\[]*dot[)\]]*"
self.dns_separator_regexes = [
"\s*\.+\s*",
"[\.\s]+" + self.dot_regex + "[\.\s]+",
"\(+(?:\.|" + self.dot_regex + ")+\)+",
"\[+\.+\]+",
"\{+\.+\}+",
"\s+(?=" + self.com_synonyms_regex + ")"
]
self.dns_separator_regex = "(?:" + ",*" + "|".join(self.dns_separator_regexes) + ",*" + ")"
self.dns_re = self.full_at_regex + r"(" + self.dns_label_regex + r"(?:" + self.dns_separator_regex + self.dns_label_regex + r")*)"
#
# Regex for the user name part of legal addresses.
# Assuming all text has been lowercased before.
#
# Assuming that special characters are not used, this can be added later.
# from wikipedia: space and "(),:;<>@[\] characters are allowed with restrictions
# all allowed: !#$%&'*+-/=?^_`{|}~ and space
# allowed without quoting: !#$%&'*+-/?^_`{|}~, dot can appear, but not at the beginning
# The full set requires starting with alphanumeric, this is because of all the junk
# that appears often. Also require at least 4 characters.
# full_username_regex = r"[a-z0-9][a-z0-9.!#$%&'*+-/?^_`{|}~]{3,}"
self.full_username_regex = r"[a-z0-9]+(?:[-.!#$%&'*+/?^_`{|}~][a-z0-9]+)*"
# The basic regex is for cases when there is no @ sign, which means there was plenty
# of obfuscation and the potential for all kinds of decoration which we don't want in
# the email address. We don't allow consecutive punctuation to avoid grabbing emails
# such as me.......LouiseHolland41@gmail
self.basic_username_regex = r"(?:[a-z0-9]+(?:(?:[-+_.]|[(]?dot[)]?)[a-z0-9]+)*\s*)"
# use lookahead to find the @ immediately following the user name, with possible spaces.
self.strict_username_regex = r"(?:" + self.full_username_regex + r"(?=@))"
self.username_regex = r"(" + self.basic_username_regex + r"|" + self.strict_username_regex + r")"
self.email_regex = self.username_regex + self.dns_re
def clean_domain(self, regex_match):
"""Once we compute the domain, santity check it, being conservative and throwing out
suspicious domains. Prefer precision to recall.
:param regex_match: the output of our regex matching
:type regex_match: string
:return:
:rtype:
"""
# print "clean_domain:%s" % regex_match
result = regex_match
result = re.sub(self.gmail_synonyms_regex, "gmail", result)
result = re.sub("\s+", ".", result)
result = re.sub(self.dot_regex, ".", result)
result = re.sub(self.non_dns_regex, "", result)
result = re.sub("\.+", ".", result)
result = result.strip()
# If the domain ends with one of the common domains, add .com at the end
if re.match(self.common_domains_regex + "$", result):
result += ".com"
# All domains have to contain a .
if result.find('.') < 0:
return ''
# If the doman contains gmail, it has has to be gmail.com
# This is drastic because of examples such as "at faithlynn1959@gmail. in call"
if result.find('gmail') >= 0:
if result != 'gmail.com':
return ''
return result
@staticmethod
def clean_username(string):
"""
:param string:
:type string:
:return:
:rtype:
"""
username = string.strip()
username = re.sub("[(]?dot[)]?", '.', username)
# paranoid sanity check to reject short user names.
if len(username) < 4:
return None
return username
def extract_domain(self, string):
"""Extract the domain part of an email address within a string.
Separate method used for testing purposes only.
:param string:
:return:
:rtype:
"""
matches = re.findall(self.dns_re, string, re.I)
clean_results = []
for m in matches:
clean_results.append(self.clean_domain(m))
# print("domains: "+', '.join(clean_results))
# print "\n"
return clean_results
def extract_email(self, string, return_as_string=False):
"""Extract email address from string.
:param string: the text to extract from
:param return_as_string: whether to return the result as a string of comma-separated values or
as a set
:type return_as_string: Boolean
"""
line = string.lower().replace('\n', ' ').replace('\r', '')
line = re.sub(r"[*?]+", " ", line)
line = re.sub(r"\\n", " ", line)
line = re.sub(r"\s+g\s+mail\s+", " gmail ", line)
# print line
# return EE.extract_domain(line)
matches = re.findall(self.email_regex, line)
clean_results = Set()
for (u, d) in matches:
# print "user: %s, domain: %s" % (u, d)
domain = self.clean_domain(d)
username = EE.clean_username(u)
if domain and username:
email = username + "@" + domain
clean_results.add(email)
# print ">>> %s" % email
if return_as_string:
return ",".join(clean_results)
else:
return clean_results
# if __name__ == '__main__':
# # file = open('/Users/pszekely/Downloads/ht-email/ht-email.txt', 'r')
# # file = open('/Users/pszekely/Downloads/ht-email/jakarta.txt', 'r')
# # file = open('/Users/pszekely/Downloads/ht-email/test.txt', 'r')
# # file = open('/Users/pszekely/Downloads/ht-email/emails.txt', 'r')
#
# # line = "oikqlthi @ gmail commy GmaiL.. nude.ass33"
# # line = "@ashleyspecialselect@gmail .com"
# # line = "My personal gmail....wowboobs7"
# # line = "My personal gmail....cum2mom"
# # line = "[atmashraffreelancer gmail com]"
# # line = "\nSweetAbby90 at gmail\n" # this should be a separate pattern as it is all in one line
# # EE.extract_email(line)
#
# with open('../../training/emails_ground_truth.json') as gt_file:
# ground_truth = json.load(gt_file)
#
# correct = 0
# incorrect = 0
# not_recalled = 0
# incorrectly_extracted = []
# not_extracted = []
# for r in ground_truth:
# found = False
# sentence = r["sentence"]
# # print "as string: %s" % EE.extract_email(sentence, True)
# emails = EE.extract_email(sentence)
# if len(emails) == 0:
# print "~~~ no extractions"
# for e in emails:
# found = True
# if e in r["emails"]:
# correct += 1
# print "+++ %s" % e
# else:
# if len(r["emails"]) > 0:
# incorrect += 1
# r["extracted"] = e
# incorrectly_extracted.append(r)
# print "--- got: %s, expected: %s" % (e, r["emails"])
# print "\n"
# if not found and len(r["emails"]) > 0:
# not_recalled += 1
# r["extracted"] = ""
# not_extracted.append(r)
#
# print json.dumps(not_extracted, indent=4)
# print json.dumps(incorrectly_extracted, indent=4)
# print "\ncorrect %d, incorrect %d, not extracted: %d" % (correct, incorrect, not_recalled)
# print len(ground_truth)
| 39.858209
| 138
| 0.536791
|
4a01b7311ae380d50e1402b5c97a99328da0137f
| 470
|
py
|
Python
|
Scripts/vowel.py
|
yashrajmani/hacktoberfest2021
|
871c4ce3b5c0353e5f737f895ea79e46d3fedf20
|
[
"MIT"
] | 15
|
2021-10-03T08:27:22.000Z
|
2022-01-10T11:20:56.000Z
|
Scripts/vowel.py
|
yashrajmani/hacktoberfest2021
|
871c4ce3b5c0353e5f737f895ea79e46d3fedf20
|
[
"MIT"
] | 3
|
2021-10-03T08:23:57.000Z
|
2021-10-15T05:21:15.000Z
|
Scripts/vowel.py
|
yashrajmani/hacktoberfest2021
|
871c4ce3b5c0353e5f737f895ea79e46d3fedf20
|
[
"MIT"
] | 51
|
2021-10-03T08:20:44.000Z
|
2021-10-31T12:51:11.000Z
|
y=input("Enter a string:")
k=len(y)
u=0
l=0
v=0
c=0
for i in range(k):
if y[i] in ['A','E','I','O','U']:
v+=1
u+=1
elif y[i] in ['a','e','i','o','u']:
v+=1
l+=1
else:
if y[i].isupper()==True:
c+=1
u+=1
else:
c+=1
l+=1
print("Number of vowels are:",v)
print("Number of consonants are:",c)
print("Number of upper case are:",u)
print("Number of lower case are:",l)
| 18.8
| 39
| 0.442553
|
4a01b80ef7fd3042cb585acbdf15c7183cfdd0cf
| 2,991
|
py
|
Python
|
backend/env/shopstopbackend/api/migrations/0001_initial.py
|
heleneelling/TDT4140-PU
|
0dabe0873fae7ebd45737798157a2b1b57fc74e2
|
[
"MIT"
] | null | null | null |
backend/env/shopstopbackend/api/migrations/0001_initial.py
|
heleneelling/TDT4140-PU
|
0dabe0873fae7ebd45737798157a2b1b57fc74e2
|
[
"MIT"
] | null | null | null |
backend/env/shopstopbackend/api/migrations/0001_initial.py
|
heleneelling/TDT4140-PU
|
0dabe0873fae7ebd45737798157a2b1b57fc74e2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-11 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Groups',
fields=[
('groupid', models.AutoField(db_column='GroupID', primary_key=True, serialize=False)),
('groupname', models.CharField(db_column='GroupName', max_length=255)),
],
options={
'db_table': 'Groups',
'managed': False,
},
),
migrations.CreateModel(
name='Iteminlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ischecked', models.IntegerField(db_column='IsChecked')),
],
options={
'db_table': 'ItemInList',
'managed': False,
},
),
migrations.CreateModel(
name='Items',
fields=[
('itemid', models.AutoField(db_column='ItemID', primary_key=True, serialize=False)),
('itemname', models.CharField(blank=True, db_column='ItemName', max_length=255, null=True)),
],
options={
'db_table': 'Items',
'managed': False,
},
),
migrations.CreateModel(
name='Memberofgroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'MemberOfGroup',
'managed': False,
},
),
migrations.CreateModel(
name='Shoppinglists',
fields=[
('listid', models.AutoField(db_column='ListID', primary_key=True, serialize=False)),
('listname', models.CharField(blank=True, db_column='ListName', max_length=255, null=True)),
('iscomplete', models.IntegerField(db_column='IsComplete')),
],
options={
'db_table': 'ShoppingLists',
'managed': False,
},
),
migrations.CreateModel(
name='Users',
fields=[
('userid', models.AutoField(db_column='UserID', primary_key=True, serialize=False)),
('username', models.CharField(db_column='Username', max_length=255)),
('email', models.CharField(db_column='Email', max_length=255, unique=True)),
('avatar', models.CharField(blank=True, db_column='Avatar', max_length=255, null=True)),
('password', models.CharField(db_column='Password', max_length=255)),
],
options={
'db_table': 'Users',
'managed': False,
},
),
]
| 35.607143
| 114
| 0.506854
|
4a01b8445e27a0f667718077fc3e52adea63e1c0
| 17,317
|
py
|
Python
|
python/dgl/backend/pytorch/tensor.py
|
zheng-da/dgl
|
8091419468628039af1f9f4b5185bedbb26eb8f4
|
[
"Apache-2.0"
] | 2
|
2020-08-05T07:21:51.000Z
|
2021-02-20T10:22:23.000Z
|
python/dgl/backend/pytorch/tensor.py
|
zheng-da/dgl
|
8091419468628039af1f9f4b5185bedbb26eb8f4
|
[
"Apache-2.0"
] | 1
|
2019-02-06T02:02:41.000Z
|
2019-02-06T20:22:32.000Z
|
python/dgl/backend/pytorch/tensor.py
|
zheng-da/dgl-1
|
8091419468628039af1f9f4b5185bedbb26eb8f4
|
[
"Apache-2.0"
] | 3
|
2019-03-04T12:46:05.000Z
|
2019-08-14T18:53:19.000Z
|
from __future__ import absolute_import
from distutils.version import LooseVersion
import scipy # Weird bug in new pytorch when import scipy after import torch
import torch as th
import builtins
import numbers
from torch.utils import dlpack
from ... import ndarray as nd
from ..._deprecate import kernel as K
from ...function.base import TargetCode
from ...base import dgl_warning
if LooseVersion(th.__version__) < LooseVersion("1.5.0"):
dgl_warning("Detected an old version of PyTorch. Suggest using torch>=1.5.0 "
"for the best experience.")
def data_type_dict():
return {'float16' : th.float16,
'float32' : th.float32,
'float64' : th.float64,
'uint8' : th.uint8,
'int8' : th.int8,
'int16' : th.int16,
'int32' : th.int32,
'int64' : th.int64,
'bool' : th.bool}
def cpu():
return th.device('cpu')
def tensor(data, dtype=None):
if isinstance(data, numbers.Number):
data = [data]
if isinstance(data, th.Tensor):
return th.as_tensor(data, dtype=dtype, device=data.device)
else:
return th.as_tensor(data, dtype=dtype)
def as_scalar(data):
return data.item()
def get_preferred_sparse_format():
"""Get the preferred sparse matrix format supported by the backend.
Different backends have their preferred backend. This info is useful when
constructing a sparse matrix.
"""
return "coo"
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
spmat = th.sparse_coo_tensor(index[1], data, shape)
return spmat, None
def sparse_matrix_indices(spmat):
return ('coo', spmat._indices())
def is_tensor(obj):
return isinstance(obj, th.Tensor)
def shape(input):
return input.shape
def dtype(input):
return input.dtype
def ndim(input):
return input.dim()
def context(input):
return input.device
def device_type(ctx):
return th.device(ctx).type
def device_id(ctx):
ctx = th.device(ctx)
if ctx.index is None:
return 0
else:
return ctx.index
def to_backend_ctx(dglctx):
dev_type = dglctx.device_type
if dev_type == 1:
return th.device('cpu')
elif dev_type == 2:
return th.device('cuda', dglctx.device_id)
else:
raise ValueError('Unsupported DGL device context:', dglctx)
def astype(input, ty):
return input.type(ty)
def asnumpy(input):
if isinstance(input, th.sparse.FloatTensor):
return input.to_dense().cpu().detach().numpy()
else:
return input.cpu().detach().numpy()
def copy_to(input, ctx, **kwargs):
ctx = th.device(ctx)
if ctx.type == 'cpu':
return input.cpu()
elif ctx.type == 'cuda':
if ctx.index is not None:
th.cuda.set_device(ctx.index)
return input.cuda(**kwargs)
else:
raise RuntimeError('Invalid context', ctx)
def sum(input, dim, keepdims=False):
return th.sum(input, dim=dim, keepdim=keepdims)
def reduce_sum(input):
return input.sum()
def mean(input, dim):
return th.mean(input, dim=dim)
def reduce_mean(input):
return input.mean()
def max(input, dim):
# NOTE: the second argmax array is not returned
return th.max(input, dim=dim)[0]
def reduce_max(input):
return input.max()
def min(input, dim):
# NOTE: the second argmin array is not returned
return th.min(input, dim=dim)[0]
def reduce_min(input):
return input.min()
def argsort(input, dim, descending):
return th.argsort(input, dim=dim, descending=descending)
def topk(input, k, dim, descending=True):
return th.topk(input, k, dim, largest=descending)[0]
def argtopk(input, k, dim, descending=True):
return th.topk(input, k, dim, largest=descending)[1]
def exp(input):
return th.exp(input)
def sqrt(input):
return th.sqrt(input)
def softmax(input, dim=-1):
return th.softmax(input, dim=dim)
def cat(seq, dim):
return th.cat(seq, dim=dim)
def stack(seq, dim):
return th.stack(seq, dim=dim)
def split(input, sizes_or_sections, dim):
return th.split(input, sizes_or_sections, dim)
def repeat(input, repeats, dim):
return th.repeat_interleave(input, repeats, dim) # PyTorch 1.1
def gather_row(data, row_index):
return th.index_select(data, 0, row_index.long())
def slice_axis(data, axis, begin, end):
return th.narrow(data, axis, begin, end - begin)
def take(data, indices, dim):
new_shape = data.shape[:dim] + indices.shape + data.shape[dim+1:]
return th.index_select(data, dim, indices.view(-1)).view(new_shape)
def narrow_row(x, start, stop):
return x[start:stop]
def index_add_inplace(data, row_idx, value):
data.index_add_(0, row_idx, value)
def scatter_row(data, row_index, value):
return data.index_copy(0, row_index.long(), value)
def scatter_row_inplace(data, row_index, value):
data[row_index.long()] = value
def squeeze(input, dim):
return th.squeeze(input, dim)
def unsqueeze(input, dim):
return th.unsqueeze(input, dim)
def reshape(input, shape):
return th.reshape(input ,shape)
def swapaxes(input, axis1, axis2):
return th.transpose(input, axis1, axis2)
def zeros(shape, dtype, ctx):
return th.zeros(shape, dtype=dtype, device=ctx)
def zeros_like(input):
return th.zeros_like(input)
def ones(shape, dtype, ctx):
return th.ones(shape, dtype=dtype, device=ctx)
def uniform(shape, dtype, ctx, low, high):
return th.empty(shape, dtype=dtype, device=ctx).uniform_(low, high)
def randint(shape, dtype, ctx, low, high):
return th.randint(low, high, shape, dtype=dtype, device=ctx)
def pad_packed_tensor(input, lengths, value, l_min=None):
old_shape = input.shape
if isinstance(lengths, th.Tensor):
max_len = as_scalar(lengths.max())
else:
max_len = builtins.max(lengths)
if l_min is not None:
max_len = builtins.max(max_len, l_min)
batch_size = len(lengths)
device = input.device
x = input.new(batch_size * max_len, *old_shape[1:])
x.fill_(value)
index = []
for i, l in enumerate(lengths):
index.extend(range(i * max_len, i * max_len + l))
index = th.tensor(index).to(device)
return scatter_row(x, index, input).view(batch_size, max_len, *old_shape[1:])
def pack_padded_tensor(input, lengths):
batch_size, max_len = input.shape[:2]
device = input.device
index = []
for i, l in enumerate(lengths):
index.extend(range(i * max_len, i * max_len + l))
index = th.tensor(index).to(device)
return gather_row(input.view(batch_size * max_len, -1), index)
def boolean_mask(input, mask):
if 'bool' not in str(mask.dtype):
mask = th.tensor(mask, dtype=th.bool)
return input[mask]
def equal(x, y):
return x == y
def logical_not(input):
return ~input
def logical_and(input1, input2):
return input1 & input2
def clone(input):
return input.clone()
def clamp(data, min_val, max_val):
return th.clamp(data, min_val, max_val)
def replace_inf_with_zero(x):
return th.masked_fill(x, th.isinf(x), 0)
def unique(input):
if input.dtype == th.bool:
input = input.type(th.int8)
return th.unique(input)
def full_1d(length, fill_value, dtype, ctx):
return th.full((length,), fill_value, dtype=dtype, device=ctx)
def nonzero_1d(input):
x = th.nonzero(input, as_tuple=False).squeeze()
return x if x.dim() == 1 else x.view(-1)
def sort_1d(input):
return th.sort(input)
def arange(start, stop, dtype=th.int64, ctx=None):
return th.arange(start, stop, dtype=dtype, device=ctx)
def rand_shuffle(arr):
idx = th.randperm(len(arr))
return arr[idx]
def zerocopy_to_dlpack(input):
return dlpack.to_dlpack(input.contiguous())
def zerocopy_from_dlpack(dlpack_tensor):
return dlpack.from_dlpack(dlpack_tensor)
def zerocopy_to_numpy(input):
# NOTE: not zerocopy
return asnumpy(input)
def zerocopy_from_numpy(np_array):
return th.as_tensor(np_array)
def zerocopy_to_dgl_ndarray(data):
return nd.from_dlpack(dlpack.to_dlpack(data.contiguous()))
def zerocopy_to_dgl_ndarray_for_write(input):
return zerocopy_to_dgl_ndarray(input)
def zerocopy_from_dgl_ndarray(data):
if data.shape == (0,):
# NOTE: PyTorch v1.5 does not accept DLPack object representing empty CUDA tensor.
# Related issue: https://github.com/pytorch/pytorch/issues/41182
# The issue will be fixed in v1.6 and later.
return th.tensor([], dtype=getattr(th, data.dtype),
device=to_backend_ctx(data.ctx))
else:
return dlpack.from_dlpack(data.to_dlpack())
class BinaryReduce(th.autograd.Function):
@staticmethod
def forward(ctx, reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,
out_size, lhs_map, rhs_map, out_map):
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)
out_shape = feat_shape
if binary_op == 'dot':
out_shape = feat_shape[:-1]
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
K.binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, lhs_map[0], rhs_map[0], out_map[0])
# normalize if mean reducer
# NOTE(zihao): this is a temporary hack and we should have better solution in the future.
if reducer == 'mean':
degs = lhs_data.new_empty((out_data.shape[0],))
degs_nd = zerocopy_to_dgl_ndarray(degs)
if lhs != TargetCode.DST: # src or edge
target = lhs
n = lhs_data.shape[0]
in_map = lhs_map[0]
else: # rhs != TargetCode.DST
target = rhs
n = rhs_data.shape[0]
in_map = rhs_map[0]
in_ones = lhs_data.new_ones((n,))
in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
K.copy_reduce(
'sum', graph, target, in_ones_nd, degs_nd, in_map, out_map[0])
# reshape
degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)
out_data = out_data / degs
else:
degs = None
# save_for_backward can only save variables
ctx.backward_cache = (reducer, binary_op, graph, lhs, rhs, lhs_map,
rhs_map, out_map, feat_shape, degs)
ctx.save_for_backward(lhs_data, rhs_data, out_data)
return out_data
@staticmethod
def backward(ctx, grad_out):
reducer, binary_op, graph, lhs, rhs, lhs_map, rhs_map, out_map, \
feat_shape, degs = ctx.backward_cache
lhs_data, rhs_data, out_data = ctx.saved_tensors
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
grad_lhs = None
grad_rhs = None
if reducer == 'mean':
grad_out = grad_out / degs
grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)
if ctx.needs_input_grad[5]:
grad_lhs = grad_out.new_empty((lhs_data_nd.shape[0],) + feat_shape)
K.backward_lhs_binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_lhs),
lhs_map[1], rhs_map[1], out_map[1])
grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape)
if ctx.needs_input_grad[6]:
grad_rhs = grad_out.new_empty((rhs_data_nd.shape[0],) + feat_shape)
K.backward_rhs_binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_rhs),
lhs_map[1], rhs_map[1], out_map[1])
grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)
return None, None, None, None, None, grad_lhs, grad_rhs, None, None, None, \
None, None
def binary_reduce(reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data,
out_size, lhs_map=(None, None), rhs_map=(None, None), out_map=(None, None)):
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)
out_shape = feat_shape
if binary_op == 'dot':
out_shape = feat_shape[:-1]
out_data = lhs_data.new_empty((out_size,) + out_shape)
return BinaryReduce.apply(
reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,
out_size, lhs_map, rhs_map, out_map)
class CopyReduce(th.autograd.Function):
@staticmethod
def forward(ctx, reducer, graph, target, in_data, out_data, out_size, in_map,
out_map):
in_data_nd = zerocopy_to_dgl_ndarray(in_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
K.copy_reduce(
reducer if reducer != 'mean' else 'sum',
graph, target, in_data_nd, out_data_nd, in_map[0], out_map[0])
# normalize if mean reducer
# NOTE(zihao): this is a temporary hack and we should have better solution in the future.
if reducer == 'mean':
in_ones = in_data.new_ones((in_data.shape[0],))
degs = in_data.new_empty((out_data.shape[0],))
in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
degs_nd = zerocopy_to_dgl_ndarray(degs)
K.copy_reduce(
'sum', graph, target, in_ones_nd, degs_nd, in_map[0], out_map[0])
# reshape
degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)
out_data = out_data / degs
else:
degs = None
# save_for_backward can only save variables
ctx.backward_cache = (reducer, graph, target, in_map, out_map, degs)
ctx.save_for_backward(in_data, out_data)
return out_data
@staticmethod
def backward(ctx, grad_out):
reducer, graph, target, in_map, out_map, degs = ctx.backward_cache
in_data, out_data = ctx.saved_tensors
in_data_nd = zerocopy_to_dgl_ndarray(in_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
grad_in = None
if reducer == 'mean':
grad_out = grad_out / degs
grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)
if ctx.needs_input_grad[3]:
grad_in = grad_out.new_empty(in_data_nd.shape)
K.backward_copy_reduce(
reducer if reducer != 'mean' else 'sum',
graph, target, in_data_nd, out_data_nd, grad_out_nd,
zerocopy_to_dgl_ndarray(grad_in), in_map[1], out_map[1])
return None, None, None, grad_in, None, None, None, None
def copy_reduce(reducer, graph, target, in_data, out_size, in_map=(None, None),
out_map=(None, None)):
out_data = in_data.new_empty((out_size,) + in_data.shape[1:])
return CopyReduce.apply(reducer, graph, target, in_data, out_data, out_size, in_map, out_map)
def _reduce_grad(grad, shape):
"""Reduce gradient on the broadcast dimension
If there is broadcast in forward pass, gradients need to be reduced on
broadcast dimension. This function checks the input tensor shape and
gradient shape and perform the reduction.
Parameters
----------
grad: Tensor
Gradient tensor
shape: tuple
Shape of input tensor
Returns
-------
Tensor
"""
grad_shape = grad.shape[1:]
in_shape = shape[1:]
if in_shape == grad_shape:
# no need to reduce
return grad
num_to_squeeze = len(grad_shape) - len(in_shape)
# pad inshape
in_shape = (1,) * num_to_squeeze + in_shape
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))
reduce_idx += 1 # skip batch dim
grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)
return grad.view(shape)
def sync():
# Pytorch performs computation synchronously, so no need for synchronization.
pass
def attach_grad(x):
if x.grad is not None:
x.grad.zero_()
return x
else:
return x.requires_grad_()
def backward(x, head_gradient=None):
if head_gradient is not None and head_gradient.shape[0] == 1 and len(head_gradient.shape) == 1:
# Fix for torch 1.3.1
head_gradient = th.tensor(head_gradient.item()).to(head_gradient.device)
x.backward(head_gradient)
def grad(x):
return x.grad
def is_no_grad(x):
return x.grad is None or (x.grad == 0).all()
def is_recording():
return th.is_grad_enabled()
class record_grad(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
no_grad = th.no_grad
| 32.068519
| 99
| 0.650113
|
4a01b8839b9b9b6546fcafc4078d95b585237cf0
| 3,961
|
py
|
Python
|
sdk/python/pulumi_aws/licensemanager/association.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/licensemanager/association.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/licensemanager/association.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Association(pulumi.CustomResource):
license_configuration_arn: pulumi.Output[str]
"""
ARN of the license configuration.
"""
resource_arn: pulumi.Output[str]
"""
ARN of the resource associated with the license configuration.
"""
def __init__(__self__, resource_name, opts=None, license_configuration_arn=None, resource_arn=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a License Manager association.
> **Note:** License configurations can also be associated with launch templates by specifying the `license_specifications` block for an `ec2.LaunchTemplate`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] license_configuration_arn: ARN of the license configuration.
:param pulumi.Input[str] resource_arn: ARN of the resource associated with the license configuration.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if license_configuration_arn is None:
raise TypeError("Missing required property 'license_configuration_arn'")
__props__['license_configuration_arn'] = license_configuration_arn
if resource_arn is None:
raise TypeError("Missing required property 'resource_arn'")
__props__['resource_arn'] = resource_arn
super(Association, __self__).__init__(
'aws:licensemanager/association:Association',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, license_configuration_arn=None, resource_arn=None):
"""
Get an existing Association resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] license_configuration_arn: ARN of the license configuration.
:param pulumi.Input[str] resource_arn: ARN of the resource associated with the license configuration.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["license_configuration_arn"] = license_configuration_arn
__props__["resource_arn"] = resource_arn
return Association(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.011364
| 165
| 0.69124
|
4a01b902c2553e671bf68dd7e97ae315eaa0af72
| 2,124
|
py
|
Python
|
synapse/logging/utils.py
|
dsonck92/synapse
|
2560b1b6b2f74b5724253396c0e3665fa1f7968c
|
[
"Apache-2.0"
] | 9,945
|
2015-01-02T07:41:06.000Z
|
2022-03-31T23:22:42.000Z
|
synapse/logging/utils.py
|
t2bot/synapse
|
62ca554ef09330cb88d46fca8296a859d0adc143
|
[
"Apache-2.0"
] | 9,320
|
2015-01-08T14:09:03.000Z
|
2022-03-31T21:11:24.000Z
|
synapse/logging/utils.py
|
t2bot/synapse
|
62ca554ef09330cb88d46fca8296a859d0adc143
|
[
"Apache-2.0"
] | 2,299
|
2015-01-31T22:16:29.000Z
|
2022-03-31T06:08:26.000Z
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import wraps
from inspect import getcallargs
from typing import Callable, TypeVar, cast
_TIME_FUNC_ID = 0
def _log_debug_as_f(f, msg, msg_args):
name = f.__module__
logger = logging.getLogger(name)
if logger.isEnabledFor(logging.DEBUG):
lineno = f.__code__.co_firstlineno
pathname = f.__code__.co_filename
record = logger.makeRecord(
name=name,
level=logging.DEBUG,
fn=pathname,
lno=lineno,
msg=msg,
args=msg_args,
exc_info=None,
)
logger.handle(record)
F = TypeVar("F", bound=Callable)
def log_function(f: F) -> F:
"""Function decorator that logs every call to that function."""
func_name = f.__name__
@wraps(f)
def wrapped(*args, **kwargs):
name = f.__module__
logger = logging.getLogger(name)
level = logging.DEBUG
if logger.isEnabledFor(level):
bound_args = getcallargs(f, *args, **kwargs)
def format(value):
r = str(value)
if len(r) > 50:
r = r[:50] + "..."
return r
func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()]
msg_args = {"func_name": func_name, "args": ", ".join(func_args)}
_log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args)
return f(*args, **kwargs)
wrapped.__name__ = func_name
return cast(F, wrapped)
| 27.584416
| 87
| 0.621469
|
4a01b926ca238816fd61a82aa136c160340de729
| 486
|
py
|
Python
|
Chap3/plotcapacity.py
|
thomaskamalakis/telecomsystems
|
476350a121debfd580dced0823b4bcf94f1f896d
|
[
"MIT"
] | 2
|
2022-01-12T11:46:39.000Z
|
2022-01-25T16:49:45.000Z
|
Chap3/plotcapacity.py
|
thomaskamalakis/telecomsystems
|
476350a121debfd580dced0823b4bcf94f1f896d
|
[
"MIT"
] | null | null | null |
Chap3/plotcapacity.py
|
thomaskamalakis/telecomsystems
|
476350a121debfd580dced0823b4bcf94f1f896d
|
[
"MIT"
] | 7
|
2021-09-17T13:09:07.000Z
|
2022-01-26T16:58:25.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 21:00:00 2019
@author: thomas
"""
import numpy as np
import matplotlib.pyplot as plt
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 16}
plt.rc('font', **font)
T=4;
pe = np.arange(0.01, 1, 0.01)
Rmax=1.0+pe*np.log2(pe)+(1.0-pe)*np.log2(1.0-pe)
plt.figure(1)
plt.plot(pe, Rmax)
plt.xlabel('$p_\mathrm{e}$')
plt.ylabel('$R_\mathrm{max}$')
plt.tight_layout()
plt.savefig('Rmax.png')
| 18.692308
| 48
| 0.604938
|
4a01b9343b29a1538d35a8b6dfe3a50d1ddda9f5
| 1,997
|
py
|
Python
|
invenio_app_ils/document_requests/mail/messages.py
|
topless/invenio-app-ils
|
38f5a6b61cdeaf5fa5776613073fa46af28737a9
|
[
"MIT"
] | null | null | null |
invenio_app_ils/document_requests/mail/messages.py
|
topless/invenio-app-ils
|
38f5a6b61cdeaf5fa5776613073fa46af28737a9
|
[
"MIT"
] | 21
|
2018-11-02T14:19:53.000Z
|
2021-06-25T15:16:42.000Z
|
invenio_app_ils/document_requests/mail/messages.py
|
topless/invenio-app-ils
|
38f5a6b61cdeaf5fa5776613073fa46af28737a9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Document requests mail message objects."""
import os
from flask import current_app
from invenio_app_ils.mail.messages import BlockTemplatedMessage
class DocumentRequestMessage(BlockTemplatedMessage):
"""Document request message."""
TEMPLATES_DIR = "invenio_app_ils_document_requests/mail"
DEFAULT_TEMPLATES = dict(
request_accepted="document_request_accept.html",
request_rejected_user_cancel="document_request_reject_user_cancel.html",
request_rejected_in_catalog="document_request_reject_in_catalog.html",
request_rejected_not_found="document_request_reject_not_found.html",
)
def __init__(self, request, action=None, message_ctx={}, **kwargs):
"""Create an e-mail message based on the new doc request record."""
self.request = request
templates = dict(
self.DEFAULT_TEMPLATES,
**current_app.config["ILS_DOCUMENT_REQUEST_MAIL_TEMPLATES"]
)
if not action:
raise NotImplementedError
if action == "request_rejected":
reject_reason = request.get("reject_reason", "")
action = "{}_{}".format(action, reject_reason.lower())
if action not in templates:
raise KeyError(
"Invalid action argument `{0}` or not found in "
"templates `{1}`.".format(action, list(templates.keys()))
)
super().__init__(
template=os.path.join(self.TEMPLATES_DIR, templates[action]),
ctx=dict(request=dict(request), **message_ctx, **kwargs),
**kwargs
)
def dump(self):
"""Dump document request email data."""
data = super().dump()
data["document_request_pid"] = self.request["pid"]
return data
| 32.737705
| 80
| 0.651978
|
4a01b98b66772b85a4103e4442074d7b8f2af602
| 4,437
|
py
|
Python
|
sol/test_parser.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
sol/test_parser.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
sol/test_parser.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
import pytest
from sol.parser import SolParser, ParseError
from sol.lexer import Token, TokenType
from sol.ast import (
MsgAstNode,
IdentAstNode,
AssignAstNode,
ConstAstNode,
ConstType,
)
def test_msg_pass():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
root = parser.parse_program().root
assert isinstance(root, MsgAstNode)
assert root.target.ident == 'foo'
assert root.name.ident == 'bar'
assert not root.args
def test_invalid_msg_pass():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
with pytest.raises(ParseError):
parser.parse_program()
def test_nonsense():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.STRING, 'hello world'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
with pytest.raises(ParseError):
parser.parse_program()
def test_assignment():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.OPER, ':='),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'quux'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
root = parser.parse_program().root
assert isinstance(root, AssignAstNode)
assert isinstance(root.left, IdentAstNode)
assert root.left.ident == 'foo'
assert isinstance(root.right, MsgAstNode)
assert root.right.target.ident == 'bar'
assert root.right.name.ident == 'quux'
def test_assignment_string():
tokens = [
Token(TokenType.IDENT, 'str'),
Token(TokenType.OPER, ':='),
Token(TokenType.STRING, 'Hello, world!'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
root = parser.parse_program().root
assert isinstance(root, AssignAstNode)
assert isinstance(root.left, IdentAstNode)
assert root.left.ident == 'str'
assert isinstance(root.right, ConstAstNode)
assert root.right.const_type == ConstType.STRING
assert root.right.const_value == 'Hello, world!'
def test_argument():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'the_target'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'the_name'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
root = parser.parse_program().root
assert isinstance(root, MsgAstNode)
assert root.target.ident == 'foo'
assert root.name.ident == 'bar'
assert len(root.args) == 1
assert isinstance(root.args[0], MsgAstNode)
assert root.args[0].target.ident == 'the_target'
assert root.args[0].name.ident == 'the_name'
def test_incomplete_arguments():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'the_target'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'the_name'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
with pytest.raises(ParseError):
parser.parse_program()
def test_multiple_const_arguments():
tokens = [
Token(TokenType.IDENT, 'foo'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.LPAREN, '('),
Token(TokenType.STRING, 'first argument'),
Token(TokenType.COMMA, ','),
Token(TokenType.STRING, 'second argument'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.EOF, 'EOF'),
]
parser = SolParser(tokens)
root = parser.parse_program().root
assert isinstance(root, MsgAstNode)
assert root.target.ident == 'foo'
assert root.name.ident == 'bar'
assert len(root.args) == 2
assert isinstance(root.args[0], ConstAstNode)
assert isinstance(root.args[1], ConstAstNode)
assert root.args[0].const_value == 'first argument'
assert root.args[1].const_value == 'second argument'
| 26.254438
| 56
| 0.618211
|
4a01b98e2d8bf53902fb3537a86c9ef8ebcb8ff7
| 712
|
py
|
Python
|
Python implementation/CallKinDrape.py
|
chrkrogh/KinDrape
|
6a04c01470409741c8d7ab638636646aba24dcbe
|
[
"MIT"
] | 1
|
2022-03-04T08:27:49.000Z
|
2022-03-04T08:27:49.000Z
|
Python implementation/CallKinDrape.py
|
chrkrogh/KinDrape
|
6a04c01470409741c8d7ab638636646aba24dcbe
|
[
"MIT"
] | null | null | null |
Python implementation/CallKinDrape.py
|
chrkrogh/KinDrape
|
6a04c01470409741c8d7ab638636646aba24dcbe
|
[
"MIT"
] | 1
|
2021-06-04T13:52:17.000Z
|
2021-06-04T13:52:17.000Z
|
# Script to define input and call KinDrape
import KinDrape as kd
import time
# Input parameters to KinDrape:
# d: discretization distance
# Grid: Dimensions of fabric grid (rows, columns)
# Org: x,y origin point on mold
# Ang: Initial draping direction rel. to y-axis
# OrgNode: Origin node of grid (row, column)
# Starting point on the north pole
d = 0.075; Grid = [24,24]; Org = [-d/2, -d/2]; Ang = 0; OrgNode = [11,11]
# Starting point away from the north pole
#d = 0.075; Grid = [24,24]; Org = [0,-0.9]; Ang = 60; OrgNode = [3,3]
# Time and call KinDrape
tic = time.time()
Node, Shear, ax, fig = kd.KinDrape(d, Grid, Org, Ang, OrgNode)
print('Time spent on drape analysis: ', time.time() - tic, ' s')
| 30.956522
| 73
| 0.671348
|
4a01b9a1ec8ebceca25fbe8bba93749ac3bae972
| 7,795
|
py
|
Python
|
python/onshape_client/oas/models/bt_export_tessellated_edges_body890.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 14
|
2019-06-23T08:47:41.000Z
|
2021-11-29T16:28:45.000Z
|
python/onshape_client/oas/models/bt_export_tessellated_edges_body890.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 40
|
2019-05-22T14:39:46.000Z
|
2022-03-10T10:36:17.000Z
|
python/onshape_client/oas/models/bt_export_tessellated_edges_body890.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 24
|
2019-06-02T01:03:41.000Z
|
2022-03-29T13:25:36.000Z
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_export_tessellated_body3398
except ImportError:
bt_export_tessellated_body3398 = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_body3398"
]
try:
from onshape_client.oas.models import bt_export_tessellated_edges_body890_all_of
except ImportError:
bt_export_tessellated_edges_body890_all_of = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_edges_body890_all_of"
]
try:
from onshape_client.oas.models import bt_export_tessellated_edges_edge1364
except ImportError:
bt_export_tessellated_edges_edge1364 = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_edges_edge1364"
]
class BTExportTessellatedEdgesBody890(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"edges": (
[bt_export_tessellated_edges_edge1364.BTExportTessellatedEdgesEdge1364],
), # noqa: E501
"constituents": ([str],), # noqa: E501
"id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"edges": "edges", # noqa: E501
"constituents": "constituents", # noqa: E501
"id": "id", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_export_tessellated_edges_body890.BTExportTessellatedEdgesBody890 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
edges ([bt_export_tessellated_edges_edge1364.BTExportTessellatedEdgesEdge1364]): [optional] # noqa: E501
constituents ([str]): [optional] # noqa: E501
id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_export_tessellated_body3398.BTExportTessellatedBody3398,
bt_export_tessellated_edges_body890_all_of.BTExportTessellatedEdgesBody890AllOf,
],
"oneOf": [],
}
| 36.425234
| 117
| 0.62848
|
4a01b9d09f7f2f04e5b9790962caca29c0b42f5d
| 6,103
|
py
|
Python
|
detector/src/loss.py
|
cersar/2D_detection
|
620ccc1b120617ead1ccd98e27022f8746e82071
|
[
"Apache-2.0"
] | 3
|
2021-10-21T03:30:00.000Z
|
2022-03-02T13:12:19.000Z
|
detector/src/loss.py
|
cersar/2D_detection
|
620ccc1b120617ead1ccd98e27022f8746e82071
|
[
"Apache-2.0"
] | null | null | null |
detector/src/loss.py
|
cersar/2D_detection
|
620ccc1b120617ead1ccd98e27022f8746e82071
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.losses import binary_crossentropy
from detector.src.box_utils import *
def focal_loss(
y_true,
y_pred,
alpha=0.25,
gamma=2.0,
label_smoothing=0.0,
from_logits=False,
) -> tf.Tensor:
"""Implements the focal loss function.
Focal loss was first introduced in the RetinaNet paper
(https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
classification when you have highly imbalanced classes. It down-weights
well-classified examples and focuses on hard examples. The loss value is
much high for a sample which is misclassified by the classifier as compared
to the loss value corresponding to a well-classified example. One of the
best use-cases of focal loss is its usage in object detection where the
imbalance between the background class and other classes is extremely high.
Args:
y_true: true targets tensor.
y_pred: predictions tensor.
alpha: balancing factor.
gamma: modulating factor.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
same shape as `y_true`; otherwise, it is scalar.
"""
if gamma and gamma < 0:
raise ValueError("Value of gamma should be greater than or equal to zero.")
tf.assert_equal(tf.shape(y_true),tf.shape(y_pred))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, dtype=y_pred.dtype)
num_class = y_pred.shape[2]
feature_size = y_pred.shape[1]
# Get the cross_entropy for each entry
ce = binary_crossentropy(tf.reshape(y_true, (-1, 1)), tf.reshape(y_pred, (-1, 1)), from_logits=from_logits, label_smoothing=label_smoothing)
ce = tf.reshape(ce, (-1, feature_size, num_class))
# If logits are provided then convert the predictions into probabilities
if from_logits:
pred_prob = tf.sigmoid(y_pred)
else:
pred_prob = y_pred
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha = tf.cast(alpha, dtype=y_true.dtype)
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
gamma = tf.cast(gamma, dtype=y_true.dtype)
modulating_factor = tf.pow((1.0 - p_t), gamma)
loss = alpha_factor * modulating_factor * ce
loss = tf.reduce_mean(loss,-1)
return loss
def smooth_l1_loss(box_out,box_label,anchors):
reg_x = (box_label[..., 0] - anchors[..., 0]) / anchors[..., 2]
reg_y = (box_label[..., 1] - anchors[..., 1]) / anchors[..., 3]
reg_w = tf.math.log(box_label[..., 2] / anchors[..., 2])
reg_h = tf.math.log(box_label[..., 3] / anchors[..., 3])
reg_target = tf.stack([reg_x, reg_y, reg_w, reg_h], axis=-1)
diff = tf.math.abs(box_out-reg_target)
mask = tf.cast(diff<1,tf.float32)
loss = 0.5*diff**2*mask+(diff-0.5)*(1-mask)
loss = tf.reduce_mean(loss,axis=-1)
return loss
def iou_loss(box_out,box_label,anchors,CFG):
box_label_coord = xywh_to_x1y1x2y2(box_label)
box_pred = decode_box_outputs(box_out,anchors)
box_pred_coord = xywh_to_x1y1x2y2(box_pred)
iou = compute_iou(box_pred_coord,box_label_coord,CFG['iou_type'])
loss = 1-iou
return loss
def compute_cls_loss(cls_out,cls_label,CFG):
cls_label = tf.one_hot(tf.cast(cls_label,tf.int32),CFG['num_classes']+1)
cls_label = cls_label[...,:-1]
if CFG['cls_loss'] == 'focal_loss':
loss = focal_loss(cls_label,cls_out,alpha=CFG['focal_alpha'],gamma=CFG['focal_gamma'],label_smoothing=CFG['label_smooth'])
else:
raise ValueError('invalid cls_loss {}!'.format(CFG['cls_loss']))
return loss
def compute_reg_loss(box_out,box_label,anchors,CFG):
if CFG['reg_loss'] == 'smooth_l1':
loss = smooth_l1_loss(box_out,box_label,anchors)
elif CFG['reg_loss'] == 'iou':
loss = iou_loss(box_out,box_label,anchors,CFG)
else:
raise ValueError('invalid cls_loss {}!'.format(CFG['cls_loss']))
return loss
def compute_detect_loss(labels,outputs,anchors,CFG):
eps = 1e-10
cls_outputs,bbox_outputs = outputs[...,:CFG['num_classes']],outputs[...,CFG['num_classes']:]
batch_size = tf.shape(cls_outputs)[0]
feature_size = anchors.shape[0]
cls_labels = labels[..., 0]
box_labels = labels[..., 1:]
cls_labels = tf.reshape(cls_labels, (-1, CFG['obj_max_num']))
box_labels = tf.reshape(box_labels,(-1,CFG['obj_max_num'],4))
iou_anchor = compute_iou_array(anchors, box_labels)
ind1 = tf.reshape(tf.tile(tf.reshape(tf.range(batch_size), (-1, 1)), (1, feature_size)), (-1, 1))
max_iou = tf.reduce_max(iou_anchor, axis=-1)
max_ind = tf.argmax(iou_anchor, axis=-1, output_type=ind1.dtype)
ind2 = tf.reshape(max_ind,(-1,1))
matched_ind = tf.concat([ind1, ind2], -1)
cls_labels = tf.gather_nd(cls_labels, matched_ind)
box_labels = tf.gather_nd(box_labels, matched_ind)
cls_labels = tf.reshape(cls_labels,(-1,feature_size))
box_labels = tf.reshape(box_labels, (-1, feature_size,4))
valid_mask = cls_labels != -1
pos_mask = max_iou > CFG['pos_thres']
neg_mask = max_iou < CFG['neg_thres']
cls_mask = tf.logical_or(pos_mask,neg_mask)
cls_mask = tf.logical_and(cls_mask, valid_mask)
reg_mask = tf.logical_and(pos_mask, valid_mask)
cls_mask = tf.cast(cls_mask, outputs.dtype)
reg_mask = tf.cast(reg_mask, outputs.dtype)
cls_labels = cls_labels*reg_mask+(1-reg_mask)*CFG['num_classes']
cls_loss = compute_cls_loss(cls_outputs, cls_labels, CFG)
reg_loss = compute_reg_loss(bbox_outputs, box_labels, anchors, CFG)
cls_loss = tf.reduce_sum(cls_loss*cls_mask,-1)/tf.clip_by_value(tf.reduce_sum(reg_mask,-1),clip_value_min=1.0,clip_value_max=np.inf)
reg_loss = tf.math.divide_no_nan(tf.reduce_sum(reg_loss*reg_mask,-1),tf.reduce_sum(reg_mask,-1))
# print(tf.reduce_sum(reg_mask,-1))
# print('cls loss: {}, reg loss: {}'.format(tf.reduce_mean(CFG['cls_weight']*cls_loss),tf.reduce_mean(CFG['reg_weight']*reg_loss)))
loss = tf.reduce_mean(CFG['cls_weight']*cls_loss+CFG['reg_weight']*reg_loss)
return loss
| 39.121795
| 144
| 0.683271
|
4a01baff196a03bfd84e6178d214532bbba9ed8f
| 7,532
|
py
|
Python
|
mechroutines/es/runner/_wfn.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/runner/_wfn.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/runner/_wfn.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8
|
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
""" Set runtime parameters for multireference electronic structure calculations
"""
import copy
import automol
import elstruct
from phydat import act_space
from mechanalyzer.inf import spc as sinfo
from mechanalyzer.inf import rxn as rinfo
from mechlib.amech_io import printer as ioprinter
# BUILD THE MULTIREFERENCE WAVEFUNCTION TO PASS MOLPRO ELSTRUCT
def update_kwargs_for_multireference(kwargs, cas_kwargs):
""" Update the elstruct kwargs dict to handle the multiref params
"""
gen_lines = kwargs['gen_lines']
cas_gen_lines = cas_kwargs['gen_lines']
gen_lines = {
1: cas_gen_lines[1] + gen_lines[1],
2: gen_lines[2],
3: gen_lines[3]
}
new_kwargs = copy.deepcopy(kwargs)
new_kwargs.update(cas_kwargs)
new_kwargs.update(gen_lines)
return new_kwargs
def multireference_calculation_parameters(zma, spc_info, hs_spc_info,
aspace, mod_thy_info, rxn_info=None):
""" Prepares a keyword-argument dictionary that can be utilized by the
elstruct library to perform multireference electronic structure
calculations. The function is tasked with preparing two parts:
(1) A string which is placed at the top of the input file that
contains all neccessary commands for calculating a guess
multireference wavefunction for a given program, and
(2) Sets active-space casscf options used for the final part
of the calculation that matches the user's desired electronic
structure theory level
Symmetry is also automatically turned off for all calculations
Currently, this function only preps Molpro calculations
"""
if aspace is not None:
num_act_orb, num_act_elc, num_states, guess_str = aspace
guess_lines = guess_str.splitlines()
casscf_options = cas_options(
zma, spc_info, num_act_elc, num_act_orb, num_states,
add_two_closed=False)
ioprinter.info_message('Using wfn guess from file...', newline=1)
else:
_inf = rxn_info if rxn_info is not None else spc_info
typ = 'ts' if rxn_info is not None else 'spc'
num_act_orb, num_act_elc, num_states = active_space(
_inf, typ=typ)
# Build the elstruct CASSCF options list used to build the wfn guess
# (1) Build wfn with active space
# (2) Build wfn with active space + 2 closed orbitals for stability
cas_opt = (
cas_options(
zma, spc_info, num_act_elc, num_act_orb, num_states,
add_two_closed=False),
cas_options(
zma, spc_info, num_act_elc, num_act_orb, num_states,
add_two_closed=True)
)
# Write string that has all the components for building the wfn guess
guess_str = multiref_wavefunction_guess(
zma, spc_info, hs_spc_info, mod_thy_info, cas_opt)
guess_lines = guess_str.splitlines()
# Set casscf options
casscf_options = cas_opt[0]
ioprinter.info_message(
'Generating wfn guess from internal options...', newline=1)
# Manipulate the opt kwargs to use the wavefunction guess
cas_kwargs = {
'casscf_options': casscf_options,
'gen_lines': {1: guess_lines},
'mol_options': ('nosym',) # Turn off symmetry
}
return cas_kwargs
# BUILD THE CASSCF OPTIONS
def active_space(info_obj, typ='ts'):
""" Determine the active space for the multireference MEP scan
"""
def _active_space(ich, mul):
""" Determine the active sapce for an InChI string
"""
if ich in act_space.DCT:
num_act_orb = act_space.DCT[ich][0]
num_act_elc = act_space.DCT[ich][1]
num_states = act_space.DCT[ich][2]
else:
num_act_orb = (mul - 1)
num_act_elc = (mul - 1)
num_states = 1
return num_act_orb, num_act_elc, num_states
if typ == 'spc':
ich = sinfo.value(info_obj, 'inchi')
mul = sinfo.value(info_obj, 'mult')
num_act_orb, num_act_elec, num_states = _active_space(ich, mul)
elif typ == 'ts':
rct_ichs = rinfo.value(info_obj, 'inchi')[0]
rct_muls = rinfo.value(info_obj, 'mult')[0]
num_act_orb, num_act_elec, num_states = 0, 0, 1
for ich, mul in zip(rct_ichs, rct_muls):
norb, nelec, nstat = _active_space(ich, mul)
num_act_orb += norb
num_act_elec += nelec
num_states *= nstat
return num_act_orb, num_act_elec, num_states
def cas_options(zma, spc_info, num_act_elc, num_act_orb, num_states,
add_two_closed=False):
""" Prepare values prepare cas options for multireference wavefunctions
"""
# Set the number of closed and occupied orbitals
fml = automol.zmat.formula(zma)
elec_cnt = automol.formula.electron_count(fml)
closed_orb = (elec_cnt - num_act_elc) // 2
occ_orb = closed_orb + num_act_orb
if add_two_closed:
closed_orb -= 2
# Set the spin and charge values for the species
spin = spc_info[2] - 1
chg = spc_info[1]
# Combine into a CASSCF options for elstruct
cas_opt = (
elstruct.option.specify(
elstruct.Option.Scf.MAXITER_, 40),
elstruct.option.specify(
elstruct.Option.Casscf.OCC_, occ_orb),
elstruct.option.specify(
elstruct.Option.Casscf.CLOSED_, closed_orb),
elstruct.option.specify(
elstruct.Option.Casscf.WFN_, elec_cnt, 1, spin, chg, num_states)
)
return cas_opt
# CONSTRUCT MULTIREFERENCE WAVEFUNCTION STRINGS FOR COMPLEX GUESSES
def multiref_wavefunction_guess(zma, spc_info, hs_spc_info,
mod_thy_info, casscf_options):
""" Prepare wavefunction template for multireference electronic structure calcs
"""
# Set variables for the programs
[_, charge, mul] = spc_info
[_, _, high_mul] = hs_spc_info
prog, _, basis, _ = mod_thy_info
# Write a string to for high-spin UHF wfn calculation
guess_str1 = elstruct.writer.energy(
geo=zma,
charge=charge,
mult=high_mul,
method='hf',
basis=basis,
prog=prog,
orb_type='UU',
mol_options=['nosym'],
)
guess_str1 += '\n\n'
guess_str1 = '\n'.join(guess_str1.splitlines()[2:-6])
# Write a string for low-spin CASSCF wfn calc
guess_str2 = elstruct.writer.energy(
geo=zma,
charge=charge,
mult=mul,
method='casscf',
basis=basis,
prog=prog,
orb_type='RR',
casscf_options=casscf_options[0],
mol_options=['nosym'],
)
guess_str2 += '\n\n'
guess_str2 = '\n'.join(guess_str2.splitlines()[2:-6])
# Combine two strings together
guess_str = guess_str1 + '\n' + guess_str2 + '\n'
# Write a second string for low-spin, lg active space CASSCF wfn calc
if len(casscf_options) > 1:
guess_str3 = elstruct.writer.energy(
geo=zma,
charge=charge,
mult=mul,
method='casscf',
basis=basis,
prog=prog,
orb_type='RR',
casscf_options=casscf_options[1],
mol_options=['nosym'],
)
guess_str3 += '\n\n'
guess_str3 = '\n'.join(guess_str3.splitlines()[2:])
guess_str += guess_str3 + '\n'
return guess_str
| 33.035088
| 83
| 0.630244
|
4a01bb09054485f5b1b61a5cdd7184d0456869f3
| 24,828
|
py
|
Python
|
code/ranking_experiments.py
|
clips/clinspell
|
9ec38827b367cfa814f2adce56f909db15052c3e
|
[
"MIT"
] | 74
|
2017-05-16T07:07:12.000Z
|
2022-02-06T10:00:55.000Z
|
code/ranking_experiments.py
|
clips/clinspell
|
9ec38827b367cfa814f2adce56f909db15052c3e
|
[
"MIT"
] | 8
|
2018-01-24T04:14:23.000Z
|
2021-08-24T21:50:18.000Z
|
code/ranking_experiments.py
|
clips/clinspell
|
9ec38827b367cfa814f2adce56f909db15052c3e
|
[
"MIT"
] | 18
|
2017-07-03T12:47:33.000Z
|
2021-09-08T18:31:44.000Z
|
# dependencies
from pyxdameraulevenshtein import damerau_levenshtein_distance
import fasttext
from doublemetaphone import dm
import numpy as np
# built-in packages
from functools import reduce
from statistics import mean
from math import log
import random
import json
import time
# development experiments
class Development(object):
def __init__(self, parameters, language):
assert language in ["en", "nl"]
self.language = language
# load frequency list
pathtofrequencies = 'frequencies_' + language + '.json'
# load trained fasttext model
pathtomodel = 'embeddings_' + language + '.bin'
# PHASE 1
self.comp_function = parameters['comp_function'] # item from ["sum", "mult", "max"]
self.include_misspelling = parameters['include_misspelling'] # boolean
self.include_oov_candidates = parameters['include_oov_candidates'] # boolean
self.model = fasttext.load_model(pathtomodel) # path to fasttext model
# PHASE 2
self.window_size = parameters['window_size'] # number in range(0,11)
self.reciprocal = parameters['reciprocal'] # boolean
self.remove_stopwords = parameters['remove_stopwords'] # boolean
self.stopwords = frozenset(json.load(open('stopwords_' + str(self.language) + '.json', 'r')))
# PHASE 3
self.edit_distance = parameters['edit_distance'] # item from [1, 2, 3, 4]
# PHASE 4
self.oov_penalty = parameters['oov_penalty'] # oov penalty tuned with self.tune_oov()
# OUTPUT
self.ranking_method = parameters['ranking_method'] # item from ["context", "noisy_channel", "frequency",
# "ensemble"]
self.frequency_dict = json.load(open(pathtofrequencies, 'r')) # path to frequency list
self.k = parameters['k-best'] # positive natural number
@staticmethod
def comp_sum(vectors, reciprocal=False):
"""
:param vectors: vectors to be composed
:param reciprocal: if True, apply reciprocal weighting
:return: composed vector representation
"""
if not reciprocal:
composed_vector = np.sum(vectors, axis=0)
else:
weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
weighted_vectors = []
for i, weight in enumerate(weight_vector):
weighted_vectors.append(vectors[i] * weight)
composed_vector = np.sum(weighted_vectors, axis=0)
return composed_vector
@staticmethod
def comp_mult(vectors, reciprocal=False):
"""
:param vectors: vectors to be composed
:param reciprocal: if True, apply reciprocal weighting
:return: composed vector representation
"""
if not reciprocal:
composed_vector = reduce(lambda x, y: x * y, vectors)
else:
weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
weighted_vectors = []
for i, weight in enumerate(weight_vector):
weighted_vectors.append(vectors[i] * weight)
composed_vector = reduce(lambda x, y: x * y, weighted_vectors)
return composed_vector
@staticmethod
def comp_max(vectors, reciprocal=False):
"""
:param vectors: vectors to be composed
:param reciprocal: if True, apply reciprocal weighting
:return: composed vector representation
"""
if not reciprocal:
composed_vector = np.amax(vectors, axis=0)
else:
weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
weighted_vectors = []
for i, weight in enumerate(weight_vector):
weighted_vectors.append(vectors[i] * weight)
composed_vector = np.amax(weighted_vectors, axis=0)
return composed_vector
@staticmethod
def normalize(vector):
"""
Normalizes a vector.
:param vector: a numpy array or list to normalize.
:return a normalized vector.
"""
if not vector.any():
return vector
return vector / np.linalg.norm(vector)
def vectorize(self, sequence, remove_oov=True):
"""
:param sequence: sequence to be vectorized
:param remove_oov: whether to vectorize oov tokens
:return: vectorized sequence
"""
if remove_oov:
sequence = [x for x in sequence if x in self.model.words]
return [np.array(self.model[x]) for x in sequence]
@staticmethod
def spell_score(misspelling, candidates, method=1):
"""
Calculates the edit distance between a misspelling and each candidate according to the chosen method
:param misspelling: misspelling
:param candidates: list of candidates
:param method: chosen method from [1, 2, 3, 4]
:return: list of edit distances between misspelling and each candidate
"""
lexical_scores = [damerau_levenshtein_distance(misspelling, candidate)
for candidate in candidates]
if method == 1:
return lexical_scores
else:
phonetic_scores = [damerau_levenshtein_distance(dm(misspelling)[0], dm(candidate)[0])
for candidate in candidates]
if method == 2:
return [phonetic_score if phonetic_score != 0 else 1 for phonetic_score in phonetic_scores]
elif method == 3:
return [0.5 * (a + b) for a, b in zip(lexical_scores, phonetic_scores)]
elif method == 4:
return [(2 * a + b) ** 2 for a, b in zip(lexical_scores, phonetic_scores)]
else:
raise ValueError('Method must be element from [1, 2, 3, 4]')
def ranking_experiment(self, detection_list, detection_contexts, candidates_list):
"""
Experimental implementation of our context-sensitive ranking model.
:param detection_list: list of misspellings
:param detection_contexts: list of misspelling context tuples ('left context', 'right context')
:param candidates_list: list of candidate list per misspelling
:param r: loaded vector representations
:return: list with corrections or k-best corrections
"""
correction_list = []
for misspelling, context, candidates in zip(detection_list, detection_contexts, candidates_list):
# PHASE 1 AND 2: composition method and context weighting
processed_context = ['', '']
processed_context[0] = " ".join(context[0].split()[::-1][:self.window_size])
processed_context[1] = " ".join(context[1].split()[:self.window_size])
comp = self.comp_function
if comp == "sum":
comp_function = self.comp_sum
elif comp == "mult":
comp_function = self.comp_mult
else:
comp_function = self.comp_max
if self.remove_stopwords:
processed_context[0] = [t for t in processed_context[0] if t not in self.stopwords]
processed_context[1] = [t for t in processed_context[1] if t not in self.stopwords]
center = self.normalize(np.array(self.model[misspelling])) # create or call vector representation for misspelling
left_window = self.vectorize(processed_context[0], remove_oov=True) # take only in-voc tokens
right_window = self.vectorize(processed_context[1], remove_oov=True) # take only in-voc tokens
if left_window:
vectorized_left_window = comp_function(left_window, reciprocal=self.reciprocal)
else:
vectorized_left_window = np.zeros(len(self.model.dim))
if right_window:
vectorized_right_window = comp_function(right_window, reciprocal=self.reciprocal)
else:
vectorized_right_window = np.zeros(len(self.model.dim))
if self.include_misspelling:
vectorized_context = comp_function((vectorized_left_window, center, vectorized_right_window))
else:
vectorized_context = comp_function((vectorized_left_window, vectorized_right_window))
vectorized_context = self.normalize(vectorized_context)
candidate_vectors = []
remove_idxs = []
oov_idxs = []
# make vector representations of candidates
for i, candidate in enumerate(candidates):
if candidate in self.model.words:
candidate_vectors.append(self.normalize(np.array(self.model[candidate])))
else:
if self.include_oov_candidates:
candidate_vectors.append(self.normalize(np.array(self.model[candidate])))
oov_idxs.append(i)
else:
remove_idxs.append(i)
# update candidate list
candidates = [candidate for i, candidate in enumerate(candidates) if i not in remove_idxs]
# calculate cosine similarities
distances = [np.dot(vectorized_context, candidate) for candidate in candidate_vectors]
# PHASE 3: edit distance penalty
method = self.edit_distance
if method:
spell_scores = self.spell_score(misspelling, candidates, method=method)
distances = [a / b for a, b in zip(distances, spell_scores)]
# PHASE 4: oov criteria
if self.include_oov_candidates:
for i, d in enumerate(distances):
if i in oov_idxs:
distances[i] /= self.oov_penalty
# OUTPUT
if self.k == 1:
try:
correction_list.append(candidates[np.argmax(distances)])
except ValueError:
correction_list.append('')
elif self.k > 1:
correction_list.append([candidates[i] for i in np.argsort(distances)[::-1][:self.k]])
else:
raise ValueError('k must be positive natural number')
return correction_list
def noisychannel_ranking(self, detection_list, candidates_list):
"""
An approximate implementation of the ranking method described in (Lai et al. 2015)
:param detection_list: list of misspellings
:param candidates_list: list of candidate list per misspelling
:param frequency_dict: corpus frequencies from training data
:param k_best: if True, return k highest ranked candidates instead of single one
:return: list with corrections or k-best corrections
"""
correction_list = []
confidences = []
for misspelling, candidates in zip(detection_list, candidates_list):
score_list = []
for candidate in candidates:
orthographic_edit_distance = damerau_levenshtein_distance(misspelling, candidate)
phonetic_edit_distance = damerau_levenshtein_distance(dm(misspelling)[0], dm(candidate)[0])
spell_score = (2 * orthographic_edit_distance + phonetic_edit_distance) ** 2 # P(m|c)
try:
frequency = self.frequency_dict[candidate]
except KeyError:
frequency = 1
frequency_score = 1 / (1 + log(frequency)) # P(c)
score = spell_score * frequency_score # P(c|m) = P(m|c)*P(c)
score_list.append(score)
score_list = np.array(score_list)
if len(score_list) > 1:
sorted_distances = [score_list[i] for i in np.argsort(score_list)]
top1 = sorted_distances[0]
top2 = sorted_distances[1]
confidence = abs(top1 - top2) / top1
confidences.append(confidence)
else:
confidences.append(0)
if self.k == 1:
try:
correction_list.append(candidates[np.argmin(score_list)])
except ValueError:
correction_list.append('')
elif self.k > 1:
correction_list.append([candidates[i] for i in np.argsort(score_list)[:self.k]])
else:
raise ValueError('k must be positive natural number')
self.confidences = confidences
return correction_list
def frequency_baseline(self, detection_list, candidates_list):
"""
Majority frequency baseline
:param detection_list: list of misspellings
:param candidates_list: list of candidate list per misspelling
:return: list with corrections or k-best corrections
"""
correction_list = []
for misspelling, candidates in zip(detection_list, candidates_list):
candidates = [candidate for candidate in candidates if candidate in self.frequency_dict.keys()]
frequencies = [self.frequency_dict[candidate] for candidate in candidates]
if self.k == 1:
try:
correction_list.append(candidates[np.argmax(frequencies)])
except ValueError:
correction_list.append('')
elif self.k > 1:
correction_list.append([candidates[i] for i in np.argsort(frequencies)[::-1][:self.k]])
else:
raise ValueError('k must be positive natural number')
return correction_list
@staticmethod
def sub_sampling(correction_list, corrected_list, k=10):
"""
Calculates the correction accuracy averaged over k subsampled folds
:param correction_list: list of corrections
:param corrected_list: list of gold standard corrections
:param k: number of folds
:return: correction accuracy averaged over k subsampled folds
"""
length = len(correction_list)
all_idxs = list(range(length))
random.seed(0.56)
random.shuffle(all_idxs)
folds_length = length // k
heldout_parts = [all_idxs[folds_length*i:folds_length*(i+1)] for i in range(k)]
scores = []
for heldout_part in heldout_parts:
test_idxs = [i for i in all_idxs if i not in heldout_part]
corrects = 0
for i in test_idxs:
if correction_list[i] == corrected_list[i]:
corrects += 1
score = corrects/len(test_idxs)
scores.append(score)
return mean(scores)
def conduct_experiment(self, devcorpus, candidates_list):
"""
Streamlines experiments with the various ranking modules
:param devcorpus: devcorpus generated with make_devcorpus.py
:param candidates_list: list of candidate list per misspelling
:return: correction accuracy, list of corrections
"""
corrected_list = devcorpus[0]
detection_list = devcorpus[1]
detection_contexts = devcorpus[2]
self.corrected_list = corrected_list
self.detection_list = detection_list
self.detection_contexts = detection_contexts
self.candidates_list = candidates_list
if self.ranking_method == 'context':
correction_list = self.ranking_experiment(detection_list, detection_contexts, candidates_list)
elif self.ranking_method == 'noisy_channel':
correction_list = self.noisychannel_ranking(detection_list, candidates_list)
elif self.ranking_method == 'frequency':
correction_list = self.frequency_baseline(detection_list, candidates_list)
elif self.ranking_method == 'ensemble':
correction_list = self.ranking_experiment(detection_list, detection_contexts, candidates_list)
correction_list_2 = self.noisychannel_ranking(detection_list, candidates_list)
for i, confidence in enumerate(self.confidences):
if confidence > 1.3:
correction_list[i] = correction_list_2[i]
else:
raise ValueError('No valid ranking method given')
score = self.sub_sampling(correction_list, corrected_list)
self.correction_list = correction_list
self.score = score
return score, correction_list
@staticmethod
def grid_search(devcorpus, candidates_list, language):
"""
Conduct grid search to find best parameters for a corpus containing only in-vector-vocabulary corrections
:param devcorpus: devcorpus generated with make_devcorpus.py
:param candidates_list: list of candidate list per misspelling
:param language: language from ["en", "nl"]
:return: dictionary with parameter settings as keys and their correction accuracy as values
"""
# default parameters
parameters = {'comp_function': 'sum',
'include_misspelling': False,
'include_oov_candidates': False,
'window_size': 6,
'reciprocal': False,
'remove_stopwords': False,
'edit_distance': 1,
'oov_penalty': 1.5,
'ranking_method': 'context',
'k-best': 1}
dev = Development(parameters, language)
corrected_list = devcorpus[0]
detection_list = devcorpus[1]
detection_contexts = devcorpus[2]
scores_dict = {}
start_time = 0
end_time = 0
for comp_function in ["sum", "mult", "max"]:
print("New run")
run_time = end_time - start_time
print("Last run took " + str(run_time) + " seconds")
start_time = time.time()
dev.comp_function = comp_function
for include_misspelling in [True, False]:
dev.include_misspelling = include_misspelling
for window_size in range(11):
dev.window_size = window_size
for reciprocal in [True, False]:
dev.reciprocal = reciprocal
for remove_stopwords in [True, False]:
dev.remove_stopwords = remove_stopwords
for edit_distance in range(1, 5):
dev.edit_distance = edit_distance
correction_list = dev.ranking_experiment(detection_list,detection_contexts,
candidates_list)
accuracy = len([c for i, c in enumerate(correction_list)
if c == corrected_list[i]]) / len(correction_list)
parameters = (comp_function, include_misspelling, window_size, reciprocal,
remove_stopwords, edit_distance)
scores_dict[parameters] = accuracy
end_time = time.time()
return scores_dict
@staticmethod
def tune_oov(devcorpus, candidates_list, best_parameters, language):
"""
Conduct search for best oov penalty for corpus
:param devcorpus: devcorpus generated with make_devcorpus.py
:param candidates_list: list of candidate list per misspelling
:param best_parameters: best parameters for the devcorpus
:param language: language from ["en", "nl"]
:return: dictionary with oov penalties as keys and their correction accuracy as values
"""
dev = Development(best_parameters, language)
corrected_list = devcorpus[0]
detection_list = devcorpus[1]
detection_contexts = devcorpus[2]
scores_dict = {}
values = list(range(30))
values = [value / 10 for value in values]
for value in values:
dev.oov_penalty = value
correction_list = dev.ranking_experiment(detection_list, detection_contexts, candidates_list)
accuracy = len([c for i, c in enumerate(correction_list)
if c == corrected_list[i]]) / len(correction_list)
scores_dict[value] = accuracy
return scores_dict
@staticmethod
def define_best_parameters(**kwargs):
"""
Calculates the best parameters or oov penalty averaged over several corpora
:param kwargs: dictionary with obligatory 'iv' key, with as value a list of scores_dicts calculated with
Development.grid_search()
if also 'oov' key, it calculates the optimal oov penalty for all 'iv' and 'oov' scores_dicts calculated with
Development.tune_oov()
:return: best parameters or oov penalty averaged over several corpora
"""
if "oov" not in kwargs.keys(): # grid search
averaged_scores_dict = {}
for scores_dict in kwargs['iv']:
for key in scores_dict:
try:
averaged_scores_dict[key].append(scores_dict[key])
except ValueError:
averaged_scores_dict[key] = [scores_dict[key]]
for key in averaged_scores_dict:
averaged_scores_dict[key] = mean(averaged_scores_dict[key])
inverse_dict = {v: k for k, v in averaged_scores_dict.items()}
best_parameters = inverse_dict[max(inverse_dict.keys())]
parameters_dict = {}
parameters_dict["comp_function"] = best_parameters[0]
parameters_dict["include_misspelling"] = best_parameters[1]
parameters_dict["window_size"] = best_parameters[2]
parameters_dict["reciprocal"] = best_parameters[3]
parameters_dict["remove_stopwords"] = best_parameters[4]
parameters_dict["edit_distance"] = best_parameters[5]
return parameters_dict
else: # tune oov penalty
averaged_scores_dict = {}
for scores_dict in kwargs['iv']:
for key in scores_dict:
try:
averaged_scores_dict[key].append(scores_dict[key])
except ValueError:
averaged_scores_dict[key] = [scores_dict[key]]
for key in averaged_scores_dict:
averaged_scores_dict[key] = mean(averaged_scores_dict[key])
for key in kwargs['oov']:
averaged_scores_dict[key] = (averaged_scores_dict[key]*9 + kwargs['oov']['key']) / 10 # weighted avg
inverse_dict = {v: k for k, v in averaged_scores_dict.items()}
best_parameter = inverse_dict[max(inverse_dict.keys())]
return best_parameter
def frequency_analysis(self):
"""
Calculates the correction accuracies for 3 scenarios: correct replacement has highest frequency (0), second
highest frequency (1), or lower relative frequency (2) of all candidates
:return: dictionary with correction accuracy per scenario
"""
scores_dict = {}
for j in [0, 1, 2]:
idxs = []
for i, candidates in enumerate(self.candidates_list):
frequencies = [self.frequency_dict[c] if c in self.frequency_dict.keys() else 1 for c in candidates]
frequencies = np.array(frequencies)
sorted_candidates = [candidates[i] for i in np.argsort(frequencies)[::-1]]
if j in [0, 1]:
try:
eligible_candidates = sorted_candidates[j]
except IndexError:
eligible_candidates = []
if self.corrected_list[i] in eligible_candidates:
idxs.append(i)
else:
try:
eligible_candidates = sorted_candidates[2:]
except IndexError:
eligible_candidates = []
if self.corrected_list[i] in eligible_candidates:
idxs.append(i)
correction_list = [x for i, x in enumerate(self.correction_list) if i in idxs]
corrected_list = [x for i, x in enumerate(self.corrected_list) if i in idxs]
accuracy = len([c for i, c in enumerate(correction_list) if c == corrected_list[i]]) / len(correction_list)
scores_dict[j] = accuracy
print('Top k')
print(j)
print('Amount of instances')
print(len(idxs))
print('Accuracy')
print(accuracy)
print(scores_dict)
return scores_dict
| 41.242525
| 126
| 0.600209
|
4a01bb56173c21c5ae85d8803263240e473f8934
| 1,391
|
py
|
Python
|
src/ralph_scrooge/models/base.py
|
xliiv/ralph_pricing
|
88a295b6f0af66ae03c145205ada99f17ab51dd0
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_scrooge/models/base.py
|
xliiv/ralph_pricing
|
88a295b6f0af66ae03c145205ada99f17ab51dd0
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_scrooge/models/base.py
|
xliiv/ralph_pricing
|
88a295b6f0af66ae03c145205ada99f17ab51dd0
|
[
"Apache-2.0"
] | 1
|
2021-11-15T21:21:17.000Z
|
2021-11-15T21:21:17.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.db import models as db
from django.utils.translation import ugettext_lazy as _
from lck.django.choices import Choices
from lck.django.common.models import Named
class BaseUsageType(Choices):
_ = Choices.Choice
usage_type = _("Usage Type")
team = _("Team")
extra_cost = _("Extra Cost")
pricing_service = _("Pricing Service")
dynamic_extra_cost = _("Dynamic extra cost")
class BaseUsage(Named):
symbol = db.CharField(
verbose_name=_("symbol"),
max_length=255,
default="",
blank=True,
)
type = db.PositiveIntegerField(
verbose_name=_("type"),
choices=BaseUsageType(),
editable=False,
)
divide_by = db.IntegerField(
verbose_name=_("Divide by"),
help_text=_(
"Divide value by 10 to the power of entered value. Ex. with "
"divide by = 3 and value = 1 000 000, presented value is 1 000."
),
default=0,
)
rounding = db.IntegerField(
verbose_name=("Value rounding"),
help_text=_("Decimal places"),
default=0,
)
class Meta:
app_label = 'ralph_scrooge'
def __unicode__(self):
return self.name
| 25.759259
| 76
| 0.641984
|
4a01bde32b2da347d153fd3804ee629e68c7fcbf
| 60,482
|
py
|
Python
|
nmigen/hdl/ast.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | null | null | null |
nmigen/hdl/ast.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | null | null | null |
nmigen/hdl/ast.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | null | null | null |
from abc import ABCMeta, abstractmethod
import traceback
import warnings
import typing
from collections import OrderedDict
from collections.abc import Iterable, MutableMapping, MutableSet, MutableSequence
from enum import Enum
from .. import tracer
from .._utils import *
from .._unused import *
__all__ = [
"Shape", "signed", "unsigned",
"Value", "Const", "C", "AnyConst", "AnySeq", "Operator", "Mux", "Part", "Slice", "Cat", "Repl",
"Array", "ArrayProxy",
"Signal", "ClockSignal", "ResetSignal",
"UserValue",
"Sample", "Past", "Stable", "Rose", "Fell", "Initial",
"Statement", "Switch",
"Property", "Assign", "Assert", "Assume", "Cover",
"ValueKey", "ValueDict", "ValueSet", "SignalKey", "SignalDict", "SignalSet",
]
class DUID:
"""Deterministic Unique IDentifier."""
__next_uid = 0
def __init__(self):
self.duid = DUID.__next_uid
DUID.__next_uid += 1
class Shape:
"""Bit width and signedness of a value.
A ``Shape`` can be constructed using:
* explicit bit width and signedness;
* aliases :func:`signed` and :func:`unsigned`;
* casting from a variety of objects.
A ``Shape`` can be cast from:
* an integer, where the integer specifies the bit width;
* a range, where the result is wide enough to represent any element of the range, and is
signed if any element of the range is signed;
* an :class:`Enum` with all integer members or :class:`IntEnum`, where the result is wide
enough to represent any member of the enumeration, and is signed if any member of
the enumeration is signed.
Parameters
----------
width : int
The number of bits in the representation, including the sign bit (if any).
signed : bool
If ``False``, the value is unsigned. If ``True``, the value is signed two's complement.
"""
def __init__(self, width=1, signed=False):
if not isinstance(width, int) or width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(width))
self.width = width
self.signed = signed
def __iter__(self):
return iter((self.width, self.signed))
@staticmethod
def cast(obj, *, src_loc_at=0):
if isinstance(obj, Shape):
return obj
if isinstance(obj, int):
return Shape(obj)
if isinstance(obj, tuple):
width, signed = obj
warnings.warn("instead of `{tuple}`, use `{constructor}({width})`"
.format(constructor="signed" if signed else "unsigned", width=width,
tuple=obj),
DeprecationWarning, stacklevel=2 + src_loc_at)
return Shape(width, signed)
if isinstance(obj, range):
if len(obj) == 0:
return Shape(0, obj.start < 0)
signed = obj.start < 0 or (obj.stop - obj.step) < 0
width = max(bits_for(obj.start, signed),
bits_for(obj.stop - obj.step, signed))
return Shape(width, signed)
if isinstance(obj, type) and issubclass(obj, Enum):
min_value = min(member.value for member in obj)
max_value = max(member.value for member in obj)
if not isinstance(min_value, int) or not isinstance(max_value, int):
raise TypeError("Only enumerations with integer values can be used "
"as value shapes")
signed = min_value < 0 or max_value < 0
width = max(bits_for(min_value, signed), bits_for(max_value, signed))
return Shape(width, signed)
raise TypeError("Object {!r} cannot be used as value shape".format(obj))
def __repr__(self):
if self.signed:
return "signed({})".format(self.width)
else:
return "unsigned({})".format(self.width)
def __eq__(self, other):
if isinstance(other, tuple) and len(other) == 2:
width, signed = other
if isinstance(width, int) and isinstance(signed, bool):
return self.width == width and self.signed == signed
else:
raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, "
"not {!r}"
.format(other))
if not isinstance(other, Shape):
raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, "
"not {!r}"
.format(other))
return self.width == other.width and self.signed == other.signed
def unsigned(width):
"""Shorthand for ``Shape(width, signed=False)``."""
return Shape(width, signed=False)
def signed(width):
"""Shorthand for ``Shape(width, signed=True)``."""
return Shape(width, signed=True)
class Value(metaclass=ABCMeta):
@staticmethod
def cast(obj):
"""Converts ``obj`` to an nMigen value.
Booleans and integers are wrapped into a :class:`Const`. Enumerations whose members are
all integers are converted to a :class:`Const` with a shape that fits every member.
"""
if isinstance(obj, Value):
return obj
if isinstance(obj, int):
return Const(obj)
if isinstance(obj, Enum):
return Const(obj.value, Shape.cast(type(obj)))
raise TypeError("Object {!r} cannot be converted to an nMigen value".format(obj))
def __init__(self, *, src_loc_at=0):
super().__init__()
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
def __bool__(self):
raise TypeError("Attempted to convert nMigen value to Python boolean")
def __invert__(self):
return Operator("~", [self])
def __neg__(self):
return Operator("-", [self])
def __add__(self, other):
return Operator("+", [self, other])
def __radd__(self, other):
return Operator("+", [other, self])
def __sub__(self, other):
return Operator("-", [self, other])
def __rsub__(self, other):
return Operator("-", [other, self])
def __mul__(self, other):
return Operator("*", [self, other])
def __rmul__(self, other):
return Operator("*", [other, self])
def __check_divisor(self):
width, signed = self.shape()
if signed:
# Python's division semantics and Verilog's division semantics differ for negative
# divisors (Python uses div/mod, Verilog uses quo/rem); for now, avoid the issue
# completely by prohibiting such division operations.
raise NotImplementedError("Division by a signed value is not supported")
def __mod__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("%", [self, other])
def __rmod__(self, other):
self.__check_divisor()
return Operator("%", [other, self])
def __floordiv__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("//", [self, other])
def __rfloordiv__(self, other):
self.__check_divisor()
return Operator("//", [other, self])
def __check_shamt(self):
width, signed = self.shape()
if signed:
# Neither Python nor HDLs implement shifts by negative values; prohibit any shifts
# by a signed value to make sure the shift amount can always be interpreted as
# an unsigned value.
raise TypeError("Shift amount must be unsigned")
def __lshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator("<<", [self, other])
def __rlshift__(self, other):
self.__check_shamt()
return Operator("<<", [other, self])
def __rshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator(">>", [self, other])
def __rrshift__(self, other):
self.__check_shamt()
return Operator(">>", [other, self])
def __and__(self, other):
return Operator("&", [self, other])
def __rand__(self, other):
return Operator("&", [other, self])
def __xor__(self, other):
return Operator("^", [self, other])
def __rxor__(self, other):
return Operator("^", [other, self])
def __or__(self, other):
return Operator("|", [self, other])
def __ror__(self, other):
return Operator("|", [other, self])
def __eq__(self, other):
return Operator("==", [self, other])
def __ne__(self, other):
return Operator("!=", [self, other])
def __lt__(self, other):
return Operator("<", [self, other])
def __le__(self, other):
return Operator("<=", [self, other])
def __gt__(self, other):
return Operator(">", [self, other])
def __ge__(self, other):
return Operator(">=", [self, other])
def __abs__(self):
width, signed = self.shape()
if signed:
return Mux(self >= 0, self, -self)
else:
return self
def __len__(self):
return self.shape().width
def __getitem__(self, key):
n = len(self)
if isinstance(key, int):
if key not in range(-n, n):
raise IndexError(f"Index {key} is out of bounds for a {n}-bit value")
if key < 0:
key += n
return Slice(self, key, key + 1)
elif isinstance(key, slice):
start, stop, step = key.indices(n)
if step != 1:
return Cat(self[i] for i in range(start, stop, step))
return Slice(self, start, stop)
else:
raise TypeError("Cannot index value with {}".format(repr(key)))
def as_unsigned(self):
"""Conversion to unsigned.
Returns
-------
Value, out
This ``Value`` reinterpreted as a unsigned integer.
"""
return Operator("u", [self])
def as_signed(self):
"""Conversion to signed.
Returns
-------
Value, out
This ``Value`` reinterpreted as a signed integer.
"""
return Operator("s", [self])
def bool(self):
"""Conversion to boolean.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("b", [self])
def any(self):
"""Check if any bits are ``1``.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("r|", [self])
def all(self):
"""Check if all bits are ``1``.
Returns
-------
Value, out
``1`` if all bits are set, ``0`` otherwise.
"""
return Operator("r&", [self])
def xor(self):
"""Compute pairwise exclusive-or of every bit.
Returns
-------
Value, out
``1`` if an odd number of bits are set, ``0`` if an even number of bits are set.
"""
return Operator("r^", [self])
def implies(premise, conclusion):
"""Implication.
Returns
-------
Value, out
``0`` if ``premise`` is true and ``conclusion`` is not, ``1`` otherwise.
"""
return ~premise | conclusion
def bit_select(self, offset, width):
"""Part-select with bit granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts overlap by all but 1 bit.
Parameters
----------
offset : Value, int
Index of first selected bit.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value:offset.value + width]
return Part(self, offset, width, stride=1, src_loc_at=1)
def word_select(self, offset, width):
"""Part-select with word granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts do not overlap.
Parameters
----------
offset : Value, int
Index of first selected word.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value * width:(offset.value + 1) * width]
return Part(self, offset, width, stride=width, src_loc_at=1)
def matches(self, *patterns):
"""Pattern matching.
Matches against a set of patterns, which may be integers or bit strings, recognizing
the same grammar as ``Case()``.
Parameters
----------
patterns : int or str
Patterns to match against.
Returns
-------
Value, out
``1`` if any pattern matches the value, ``0`` otherwise.
"""
matches = []
for pattern in patterns:
if not isinstance(pattern, (int, str, Enum)):
raise SyntaxError("Match pattern must be an integer, a string, or an enumeration, "
"not {!r}"
.format(pattern))
if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern):
raise SyntaxError("Match pattern '{}' must consist of 0, 1, and - (don't care) "
"bits, and may include whitespace"
.format(pattern))
if (isinstance(pattern, str) and
len("".join(pattern.split())) != len(self)):
raise SyntaxError("Match pattern '{}' must have the same width as match value "
"(which is {})"
.format(pattern, len(self)))
if isinstance(pattern, int) and bits_for(pattern) > len(self):
warnings.warn("Match pattern '{:b}' is wider than match value "
"(which has width {}); comparison will never be true"
.format(pattern, len(self)),
SyntaxWarning, stacklevel=3)
continue
if isinstance(pattern, str):
pattern = "".join(pattern.split()) # remove whitespace
mask = int(pattern.replace("0", "1").replace("-", "0"), 2)
pattern = int(pattern.replace("-", "0"), 2)
matches.append((self & mask) == pattern)
elif isinstance(pattern, int):
matches.append(self == pattern)
elif isinstance(pattern, Enum):
matches.append(self == pattern.value)
else:
assert False
if not matches:
return Const(0)
elif len(matches) == 1:
return matches[0]
else:
return Cat(*matches).any()
def shift_left(self, amount):
"""Shift left by constant amount.
Parameters
----------
amount : int
Amount to shift by.
Returns
-------
Value, out
If the amount is positive, the input shifted left. Otherwise, the input shifted right.
"""
if not isinstance(amount, int):
raise TypeError("Shift amount must be an integer, not {!r}".format(amount))
if amount < 0:
return self.shift_right(-amount)
if self.shape().signed:
return Cat(Const(0, amount), self).as_signed()
else:
return Cat(Const(0, amount), self) # unsigned
def shift_right(self, amount):
"""Shift right by constant amount.
Parameters
----------
amount : int
Amount to shift by.
Returns
-------
Value, out
If the amount is positive, the input shifted right. Otherwise, the input shifted left.
"""
if not isinstance(amount, int):
raise TypeError("Shift amount must be an integer, not {!r}".format(amount))
if amount < 0:
return self.shift_left(-amount)
if self.shape().signed:
return self[amount:].as_signed()
else:
return self[amount:] # unsigned
def rotate_left(self, amount):
"""Rotate left by constant amount.
Parameters
----------
amount : int
Amount to rotate by.
Returns
-------
Value, out
If the amount is positive, the input rotated left. Otherwise, the input rotated right.
"""
if not isinstance(amount, int):
raise TypeError("Rotate amount must be an integer, not {!r}".format(amount))
amount %= len(self)
return Cat(self[-amount:], self[:-amount]) # meow :3
def rotate_right(self, amount):
"""Rotate right by constant amount.
Parameters
----------
amount : int
Amount to rotate by.
Returns
-------
Value, out
If the amount is positive, the input rotated right. Otherwise, the input rotated right.
"""
if not isinstance(amount, int):
raise TypeError("Rotate amount must be an integer, not {!r}".format(amount))
amount %= len(self)
return Cat(self[amount:], self[:amount])
def eq(self, value):
"""Assignment.
Parameters
----------
value : Value, in
Value to be assigned.
Returns
-------
Assign
Assignment statement that can be used in combinatorial or synchronous context.
"""
return Assign(self, value, src_loc_at=1)
@abstractmethod
def shape(self):
"""Bit width and signedness of a value.
Returns
-------
Shape
See :class:`Shape`.
Examples
--------
>>> Signal(8).shape()
Shape(width=8, signed=False)
>>> Const(0xaa).shape()
Shape(width=8, signed=False)
"""
pass # :nocov:
def _lhs_signals(self):
raise TypeError("Value {!r} cannot be used in assignments".format(self))
@abstractmethod
def _rhs_signals(self):
pass # :nocov:
def _as_const(self):
raise TypeError("Value {!r} cannot be evaluated as constant".format(self))
__hash__ = None
@final
class Const(Value):
"""A constant, literal integer value.
Parameters
----------
value : int
shape : int or tuple or None
Either an integer ``width`` or a tuple ``(width, signed)`` specifying the number of bits
in this constant and whether it is signed (can represent negative values).
``shape`` defaults to the minimum possible width and signedness of ``value``.
Attributes
----------
width : int
signed : bool
"""
src_loc = None
@staticmethod
def normalize(value, shape):
width, signed = shape
mask = (1 << width) - 1
value &= mask
if signed and value >> (width - 1):
value |= ~mask
return value
def __init__(self, value, shape=None, *, src_loc_at=0):
# We deliberately do not call Value.__init__ here.
self.value = int(value)
if shape is None:
shape = Shape(bits_for(self.value), signed=self.value < 0)
elif isinstance(shape, int):
shape = Shape(shape, signed=self.value < 0)
else:
shape = Shape.cast(shape, src_loc_at=1 + src_loc_at)
self.width, self.signed = shape
self.value = self.normalize(self.value, shape)
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return SignalSet()
def _as_const(self):
return self.value
def __repr__(self):
return "(const {}'{}d{})".format(self.width, "s" if self.signed else "", self.value)
C = Const # shorthand
class AnyValue(Value, DUID):
def __init__(self, shape, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if not isinstance(self.width, int) or self.width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(self.width))
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return SignalSet()
@final
class AnyConst(AnyValue):
def __repr__(self):
return "(anyconst {}'{})".format(self.width, "s" if self.signed else "")
@final
class AnySeq(AnyValue):
def __repr__(self):
return "(anyseq {}'{})".format(self.width, "s" if self.signed else "")
@final
class Operator(Value):
def __init__(self, operator, operands, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.operator = operator
self.operands = [Value.cast(op) for op in operands]
def shape(self):
def _bitwise_binary_shape(a_shape, b_shape):
a_bits, a_sign = a_shape
b_bits, b_sign = b_shape
if not a_sign and not b_sign:
# both operands unsigned
return Shape(max(a_bits, b_bits), False)
elif a_sign and b_sign:
# both operands signed
return Shape(max(a_bits, b_bits), True)
elif not a_sign and b_sign:
# first operand unsigned (add sign bit), second operand signed
return Shape(max(a_bits + 1, b_bits), True)
else:
# first signed, second operand unsigned (add sign bit)
return Shape(max(a_bits, b_bits + 1), True)
op_shapes = list(map(lambda x: x.shape(), self.operands))
if len(op_shapes) == 1:
(a_width, a_signed), = op_shapes
if self.operator in ("+", "~"):
return Shape(a_width, a_signed)
if self.operator == "-":
return Shape(a_width + 1, True)
if self.operator in ("b", "r|", "r&", "r^"):
return Shape(1, False)
if self.operator == "u":
return Shape(a_width, False)
if self.operator == "s":
return Shape(a_width, True)
elif len(op_shapes) == 2:
(a_width, a_signed), (b_width, b_signed) = op_shapes
if self.operator in ("+", "-"):
width, signed = _bitwise_binary_shape(*op_shapes)
return Shape(width + 1, signed)
if self.operator == "*":
return Shape(a_width + b_width, a_signed or b_signed)
if self.operator in ("//", "%"):
assert not b_signed
return Shape(a_width, a_signed)
if self.operator in ("<", "<=", "==", "!=", ">", ">="):
return Shape(1, False)
if self.operator in ("&", "^", "|"):
return _bitwise_binary_shape(*op_shapes)
if self.operator == "<<":
if b_signed:
extra = 2 ** (b_width - 1) - 1
else:
extra = 2 ** (b_width) - 1
return Shape(a_width + extra, a_signed)
if self.operator == ">>":
if b_signed:
extra = 2 ** (b_width - 1)
else:
extra = 0
return Shape(a_width + extra, a_signed)
elif len(op_shapes) == 3:
if self.operator == "m":
s_shape, a_shape, b_shape = op_shapes
return _bitwise_binary_shape(a_shape, b_shape)
raise NotImplementedError("Operator {}/{} not implemented"
.format(self.operator, len(op_shapes))) # :nocov:
def _rhs_signals(self):
return union(op._rhs_signals() for op in self.operands)
def __repr__(self):
return "({} {})".format(self.operator, " ".join(map(repr, self.operands)))
def Mux(sel, val1, val0):
"""Choose between two values.
Parameters
----------
sel : Value, in
Selector.
val1 : Value, in
val0 : Value, in
Input values.
Returns
-------
Value, out
Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``.
"""
sel = Value.cast(sel)
if len(sel) != 1:
sel = sel.bool()
return Operator("m", [sel, val1, val0])
@final
class Slice(Value):
def __init__(self, value, start, stop, *, src_loc_at=0):
if not isinstance(start, int):
raise TypeError("Slice start must be an integer, not {!r}".format(start))
if not isinstance(stop, int):
raise TypeError("Slice stop must be an integer, not {!r}".format(stop))
n = len(value)
if start not in range(-(n+1), n+1):
raise IndexError("Cannot start slice {} bits into {}-bit value".format(start, n))
if start < 0:
start += n
if stop not in range(-(n+1), n+1):
raise IndexError("Cannot stop slice {} bits into {}-bit value".format(stop, n))
if stop < 0:
stop += n
if start > stop:
raise IndexError("Slice start {} must be less than slice stop {}".format(start, stop))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.start = start
self.stop = stop
def shape(self):
return Shape(self.stop - self.start)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(slice {} {}:{})".format(repr(self.value), self.start, self.stop)
@final
class Part(Value):
def __init__(self, value, offset, width, stride=1, *, src_loc_at=0):
if not isinstance(width, int) or width < 0:
raise TypeError("Part width must be a non-negative integer, not {!r}".format(width))
if not isinstance(stride, int) or stride <= 0:
raise TypeError("Part stride must be a positive integer, not {!r}".format(stride))
super().__init__(src_loc_at=src_loc_at)
self.value = value
self.offset = Value.cast(offset)
self.width = width
self.stride = stride
def shape(self):
return Shape(self.width)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals() | self.offset._rhs_signals()
def __repr__(self):
return "(part {} {} {} {})".format(repr(self.value), repr(self.offset),
self.width, self.stride)
@final
class Cat(Value):
"""Concatenate values.
Form a compound ``Value`` from several smaller ones by concatenation.
The first argument occupies the lower bits of the result.
The return value can be used on either side of an assignment, that
is, the concatenated value can be used as an argument on the RHS or
as a target on the LHS. If it is used on the LHS, it must solely
consist of ``Signal`` s, slices of ``Signal`` s, and other concatenations
meeting these properties. The bit length of the return value is the sum of
the bit lengths of the arguments::
len(Cat(args)) == sum(len(arg) for arg in args)
Parameters
----------
*args : Values or iterables of Values, inout
``Value`` s to be concatenated.
Returns
-------
Value, inout
Resulting ``Value`` obtained by concatentation.
"""
def __init__(self, *args, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.parts = [Value.cast(v) for v in flatten(args)]
def shape(self):
return Shape(sum(len(part) for part in self.parts))
def _lhs_signals(self):
return union((part._lhs_signals() for part in self.parts), start=SignalSet())
def _rhs_signals(self):
return union((part._rhs_signals() for part in self.parts), start=SignalSet())
def _as_const(self):
value = 0
for part in reversed(self.parts):
value <<= len(part)
value |= part._as_const()
return value
def __repr__(self):
return "(cat {})".format(" ".join(map(repr, self.parts)))
@final
class Repl(Value):
"""Replicate a value
An input value is replicated (repeated) several times
to be used on the RHS of assignments::
len(Repl(s, n)) == len(s) * n
Parameters
----------
value : Value, in
Input value to be replicated.
count : int
Number of replications.
Returns
-------
Repl, out
Replicated value.
"""
def __init__(self, value, count, *, src_loc_at=0):
if not isinstance(count, int) or count < 0:
raise TypeError("Replication count must be a non-negative integer, not {!r}"
.format(count))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.count = count
def shape(self):
return Shape(len(self.value) * self.count)
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(repl {!r} {})".format(self.value, self.count)
# @final
class Signal(Value, DUID):
"""A varying integer value.
Parameters
----------
shape : ``Shape``-castable object or None
Specification for the number of bits in this ``Signal`` and its signedness (whether it
can represent negative values). See ``Shape.cast`` for details.
If not specified, ``shape`` defaults to 1-bit and non-signed.
name : str
Name hint for this signal. If ``None`` (default) the name is inferred from the variable
name this ``Signal`` is assigned to.
reset : int or integral Enum
Reset (synchronous) or default (combinatorial) value.
When this ``Signal`` is assigned to in synchronous context and the corresponding clock
domain is reset, the ``Signal`` assumes the given value. When this ``Signal`` is unassigned
in combinatorial context (due to conditional assignments not being taken), the ``Signal``
assumes its ``reset`` value. Defaults to 0.
reset_less : bool
If ``True``, do not generate reset logic for this ``Signal`` in synchronous statements.
The ``reset`` value is only used as a combinatorial default or as the initial value.
Defaults to ``False``.
attrs : dict
Dictionary of synthesis attributes.
decoder : function or Enum
A function converting integer signal values to human-readable strings (e.g. FSM state
names). If an ``Enum`` subclass is passed, it is concisely decoded using format string
``"{0.name:}/{0.value:}"``, or a number if the signal value is not a member of
the enumeration.
Attributes
----------
width : int
signed : bool
name : str
reset : int
reset_less : bool
attrs : dict
decoder : function
"""
def __init__(self, shape=None, *, name=None, reset=0, reset_less=False,
attrs=None, decoder=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at, default="$signal")
if shape is None:
shape = unsigned(1)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if isinstance(reset, Enum):
reset = reset.value
if not isinstance(reset, int):
raise TypeError("Reset value has to be an int or an integral Enum")
reset_width = bits_for(reset, self.signed)
if reset != 0 and reset_width > self.width:
warnings.warn("Reset value {!r} requires {} bits to represent, but the signal "
"only has {} bits"
.format(reset, reset_width, self.width),
SyntaxWarning, stacklevel=2 + src_loc_at)
self.reset = reset
self.reset_less = bool(reset_less)
self.attrs = OrderedDict(() if attrs is None else attrs)
if decoder is None and isinstance(shape, type) and issubclass(shape, Enum):
decoder = shape
if isinstance(decoder, type) and issubclass(decoder, Enum):
def enum_decoder(value):
try:
return "{0.name:}/{0.value:}".format(decoder(value))
except ValueError:
return str(value)
self.decoder = enum_decoder
self._enum_class = decoder
else:
self.decoder = decoder
self._enum_class = None
# Not a @classmethod because nmigen.compat requires it.
@staticmethod
def like(other, *, name=None, name_suffix=None, src_loc_at=0, **kwargs):
"""Create Signal based on another.
Parameters
----------
other : Value
Object to base this Signal on.
"""
if name is not None:
new_name = str(name)
elif name_suffix is not None:
new_name = other.name + str(name_suffix)
else:
new_name = tracer.get_var_name(depth=2 + src_loc_at, default="$like")
kw = dict(shape=Value.cast(other).shape(), name=new_name)
if isinstance(other, Signal):
kw.update(reset=other.reset, reset_less=other.reset_less,
attrs=other.attrs, decoder=other.decoder)
kw.update(kwargs)
return Signal(**kw, src_loc_at=1 + src_loc_at)
def shape(self):
return Shape(self.width, self.signed)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(sig {})".format(self.name)
@final
class ClockSignal(Value):
"""Clock signal for a clock domain.
Any ``ClockSignal`` is equivalent to ``cd.clk`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a clock signal for. Defaults to ``"sync"``.
"""
def __init__(self, domain="sync", *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a clock".format(domain))
self.domain = domain
def shape(self):
return Shape(1)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ClockSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(clk {})".format(self.domain)
@final
class ResetSignal(Value):
"""Reset signal for a clock domain.
Any ``ResetSignal`` is equivalent to ``cd.rst`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a reset signal for. Defaults to ``"sync"``.
allow_reset_less : bool
If the clock domain is reset-less, act as a constant ``0`` instead of reporting an error.
"""
def __init__(self, domain="sync", allow_reset_less=False, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a reset".format(domain))
self.domain = domain
self.allow_reset_less = allow_reset_less
def shape(self):
return Shape(1)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ResetSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(rst {})".format(self.domain)
class Array(MutableSequence):
"""Addressable multiplexer.
An array is similar to a ``list`` that can also be indexed by ``Value``s; indexing by an integer or a slice works the same as for Python lists, but indexing by a ``Value`` results
in a proxy.
The array proxy can be used as an ordinary ``Value``, i.e. participate in calculations and
assignments, provided that all elements of the array are values. The array proxy also supports
attribute access and further indexing, each returning another array proxy; this means that
the results of indexing into arrays, arrays of records, and arrays of arrays can all
be used as first-class values.
It is an error to change an array or any of its elements after an array proxy was created.
Changing the array directly will raise an exception. However, it is not possible to detect
the elements being modified; if an element's attribute or element is modified after the proxy
for it has been created, the proxy will refer to stale data.
Examples
--------
Simple array::
gpios = Array(Signal() for _ in range(10))
with m.If(bus.we):
m.d.sync += gpios[bus.addr].eq(bus.w_data)
with m.Else():
m.d.sync += bus.r_data.eq(gpios[bus.addr])
Multidimensional array::
mult = Array(Array(x * y for y in range(10)) for x in range(10))
a = Signal.range(10)
b = Signal.range(10)
r = Signal(8)
m.d.comb += r.eq(mult[a][b])
Array of records::
layout = [
("r_data", 16),
("r_en", 1),
]
buses = Array(Record(layout) for busno in range(4))
master = Record(layout)
m.d.comb += [
buses[sel].r_en.eq(master.r_en),
master.r_data.eq(buses[sel].r_data),
]
"""
def __init__(self, iterable=()):
self._inner = list(iterable)
self._proxy_at = None
self._mutable = True
def __getitem__(self, index):
if isinstance(index, Value):
if self._mutable:
self._proxy_at = tracer.get_src_loc()
self._mutable = False
return ArrayProxy(self, index)
else:
return self._inner[index]
def __len__(self):
return len(self._inner)
def _check_mutability(self):
if not self._mutable:
raise ValueError("Array can no longer be mutated after it was indexed with a value "
"at {}:{}".format(*self._proxy_at))
def __setitem__(self, index, value):
self._check_mutability()
self._inner[index] = value
def __delitem__(self, index):
self._check_mutability()
del self._inner[index]
def insert(self, index, value):
self._check_mutability()
self._inner.insert(index, value)
def __repr__(self):
return "(array{} [{}])".format(" mutable" if self._mutable else "",
", ".join(map(repr, self._inner)))
@final
class ArrayProxy(Value):
def __init__(self, elems, index, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.elems = elems
self.index = Value.cast(index)
def __getattr__(self, attr):
return ArrayProxy([getattr(elem, attr) for elem in self.elems], self.index)
def __getitem__(self, index):
return ArrayProxy([ elem[index] for elem in self.elems], self.index)
def _iter_as_values(self):
return (Value.cast(elem) for elem in self.elems)
def shape(self):
unsigned_width = signed_width = 0
has_unsigned = has_signed = False
for elem_width, elem_signed in (elem.shape() for elem in self._iter_as_values()):
if elem_signed:
has_signed = True
signed_width = max(signed_width, elem_width)
else:
has_unsigned = True
unsigned_width = max(unsigned_width, elem_width)
# The shape of the proxy must be such that it preserves the mathematical value of the array
# elements. I.e., shape-wise, an array proxy must be identical to an equivalent mux tree.
# To ensure this holds, if the array contains both signed and unsigned values, make sure
# that every unsigned value is zero-extended by at least one bit.
if has_signed and has_unsigned and unsigned_width >= signed_width:
# Array contains both signed and unsigned values, and at least one of the unsigned
# values won't be zero-extended otherwise.
return signed(unsigned_width + 1)
else:
# Array contains values of the same signedness, or else all of the unsigned values
# are zero-extended.
return Shape(max(unsigned_width, signed_width), has_signed)
def _lhs_signals(self):
signals = union((elem._lhs_signals() for elem in self._iter_as_values()),
start=SignalSet())
return signals
def _rhs_signals(self):
signals = union((elem._rhs_signals() for elem in self._iter_as_values()),
start=SignalSet())
return self.index._rhs_signals() | signals
def __repr__(self):
return "(proxy (array [{}]) {!r})".format(", ".join(map(repr, self.elems)), self.index)
class UserValue(Value):
"""Value with custom lowering.
A ``UserValue`` is a value whose precise representation does not have to be immediately known,
which is useful in certain metaprogramming scenarios. Instead of providing fixed semantics
upfront, it is kept abstract for as long as possible, only being lowered to a concrete nMigen
value when required.
Note that the ``lower`` method will only be called once; this is necessary to ensure that
nMigen's view of representation of all values stays internally consistent. If the class
deriving from ``UserValue`` is mutable, then it must ensure that after ``lower`` is called,
it is not mutated in a way that changes its representation.
The following is an incomplete list of actions that, when applied to an ``UserValue`` directly
or indirectly, will cause it to be lowered, provided as an illustrative reference:
* Querying the shape using ``.shape()`` or ``len()``;
* Creating a similarly shaped signal using ``Signal.like``;
* Indexing or iterating through individual bits;
* Adding an assignment to the value to a ``Module`` using ``m.d.<domain> +=``.
"""
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.__lowered = None
@abstractmethod
def lower(self):
"""Conversion to a concrete representation."""
pass # :nocov:
def _lazy_lower(self):
if self.__lowered is None:
lowered = self.lower()
if isinstance(lowered, UserValue):
lowered = lowered._lazy_lower()
self.__lowered = Value.cast(lowered)
return self.__lowered
def shape(self):
return self._lazy_lower().shape()
def _lhs_signals(self):
return self._lazy_lower()._lhs_signals()
def _rhs_signals(self):
return self._lazy_lower()._rhs_signals()
@final
class Sample(Value):
"""Value from the past.
A ``Sample`` of an expression is equal to the value of the expression ``clocks`` clock edges
of the ``domain`` clock back. If that moment is before the beginning of time, it is equal
to the value of the expression calculated as if each signal had its reset value.
"""
def __init__(self, expr, clocks, domain, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.value = Value.cast(expr)
self.clocks = int(clocks)
self.domain = domain
if not isinstance(self.value, (Const, Signal, ClockSignal, ResetSignal, Initial)):
raise TypeError("Sampled value must be a signal or a constant, not {!r}"
.format(self.value))
if self.clocks < 0:
raise ValueError("Cannot sample a value {} cycles in the future"
.format(-self.clocks))
if not (self.domain is None or isinstance(self.domain, str)):
raise TypeError("Domain name must be a string or None, not {!r}"
.format(self.domain))
def shape(self):
return self.value.shape()
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(sample {!r} @ {}[{}])".format(
self.value, "<default>" if self.domain is None else self.domain, self.clocks)
def Past(expr, clocks=1, domain=None):
return Sample(expr, clocks, domain)
def Stable(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) == Sample(expr, clocks, domain)
def Rose(expr, clocks=0, domain=None):
return ~Sample(expr, clocks + 1, domain) & Sample(expr, clocks, domain)
def Fell(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) & ~Sample(expr, clocks, domain)
@final
class Initial(Value):
"""Start indicator, for model checking.
An ``Initial`` signal is ``1`` at the first cycle of model checking, and ``0`` at any other.
"""
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
def shape(self):
return Shape(1)
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(initial)"
class _StatementList(list):
def __repr__(self):
return "({})".format(" ".join(map(repr, self)))
class Statement:
def __init__(self, *, src_loc_at=0):
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
@staticmethod
def cast(obj):
if isinstance(obj, Iterable):
return _StatementList(sum((Statement.cast(e) for e in obj), []))
else:
if isinstance(obj, Statement):
return _StatementList([obj])
else:
raise TypeError("Object {!r} is not an nMigen statement".format(obj))
@final
class Assign(Statement):
def __init__(self, lhs, rhs, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.lhs = Value.cast(lhs)
self.rhs = Value.cast(rhs)
def _lhs_signals(self):
return self.lhs._lhs_signals()
def _rhs_signals(self):
return self.lhs._rhs_signals() | self.rhs._rhs_signals()
def __repr__(self):
return "(eq {!r} {!r})".format(self.lhs, self.rhs)
class UnusedProperty(UnusedMustUse):
pass
class Property(Statement, MustUse):
_MustUse__warning = UnusedProperty
def __init__(self, test, *, _check=None, _en=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.test = Value.cast(test)
self._check = _check
self._en = _en
if self._check is None:
self._check = Signal(reset_less=True, name="${}$check".format(self._kind))
self._check.src_loc = self.src_loc
if _en is None:
self._en = Signal(reset_less=True, name="${}$en".format(self._kind))
self._en.src_loc = self.src_loc
def _lhs_signals(self):
return SignalSet((self._en, self._check))
def _rhs_signals(self):
return self.test._rhs_signals()
def __repr__(self):
return "({} {!r})".format(self._kind, self.test)
@final
class Assert(Property):
_kind = "assert"
@final
class Assume(Property):
_kind = "assume"
@final
class Cover(Property):
_kind = "cover"
# @final
class Switch(Statement):
def __init__(self, test, cases, *, src_loc=None, src_loc_at=0, case_src_locs={}):
if src_loc is None:
super().__init__(src_loc_at=src_loc_at)
else:
# Switch is a bit special in terms of location tracking because it is usually created
# long after the control has left the statement that directly caused its creation.
self.src_loc = src_loc
# Switch is also a bit special in that its parts also have location information. It can't
# be automatically traced, so whatever constructs a Switch may optionally provide it.
self.case_src_locs = {}
self.test = Value.cast(test)
self.cases = OrderedDict()
for orig_keys, stmts in cases.items():
# Map: None -> (); key -> (key,); (key...) -> (key...)
keys = orig_keys
if keys is None:
keys = ()
if not isinstance(keys, tuple):
keys = (keys,)
# Map: 2 -> "0010"; "0010" -> "0010"
new_keys = ()
for key in keys:
if isinstance(key, str):
key = "".join(key.split()) # remove whitespace
elif isinstance(key, int):
key = format(key, "b").rjust(len(self.test), "0")
elif isinstance(key, Enum):
key = format(key.value, "b").rjust(len(self.test), "0")
else:
raise TypeError("Object {!r} cannot be used as a switch key"
.format(key))
assert len(key) == len(self.test)
new_keys = (*new_keys, key)
if not isinstance(stmts, Iterable):
stmts = [stmts]
self.cases[new_keys] = Statement.cast(stmts)
if orig_keys in case_src_locs:
self.case_src_locs[new_keys] = case_src_locs[orig_keys]
def _lhs_signals(self):
signals = union((s._lhs_signals() for ss in self.cases.values() for s in ss),
start=SignalSet())
return signals
def _rhs_signals(self):
signals = union((s._rhs_signals() for ss in self.cases.values() for s in ss),
start=SignalSet())
return self.test._rhs_signals() | signals
def __repr__(self):
def case_repr(keys, stmts):
stmts_repr = " ".join(map(repr, stmts))
if keys == ():
return "(default {})".format(stmts_repr)
elif len(keys) == 1:
return "(case {} {})".format(keys[0], stmts_repr)
else:
return "(case ({}) {})".format(" ".join(keys), stmts_repr)
case_reprs = [case_repr(keys, stmts) for keys, stmts in self.cases.items()]
return "(switch {!r} {})".format(self.test, " ".join(case_reprs))
class _MappedKeyCollection(metaclass=ABCMeta):
@abstractmethod
def _map_key(self, key):
pass # :nocov:
@abstractmethod
def _unmap_key(self, key):
pass # :nocov:
class _MappedKeyDict(MutableMapping, _MappedKeyCollection):
def __init__(self, pairs=()):
self._storage = OrderedDict()
for key, value in pairs:
self[key] = value
def __getitem__(self, key):
key = None if key is None else self._map_key(key)
return self._storage[key]
def __setitem__(self, key, value):
key = None if key is None else self._map_key(key)
self._storage[key] = value
def __delitem__(self, key):
key = None if key is None else self._map_key(key)
del self._storage[key]
def __iter__(self):
for key in self._storage:
if key is None:
yield None
else:
yield self._unmap_key(key)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if len(self) != len(other):
return False
for ak, bk in zip(sorted(self._storage), sorted(other._storage)):
if ak != bk:
return False
if self._storage[ak] != other._storage[bk]:
return False
return True
def __len__(self):
return len(self._storage)
def __repr__(self):
pairs = ["({!r}, {!r})".format(k, v) for k, v in self.items()]
return "{}.{}([{}])".format(type(self).__module__, type(self).__name__,
", ".join(pairs))
class _MappedKeySet(MutableSet, _MappedKeyCollection):
def __init__(self, elements=()):
self._storage = OrderedDict()
for elem in elements:
self.add(elem)
def add(self, value):
self._storage[self._map_key(value)] = None
def update(self, values):
for value in values:
self.add(value)
def discard(self, value):
if value in self:
del self._storage[self._map_key(value)]
def __contains__(self, value):
return self._map_key(value) in self._storage
def __iter__(self):
for key in [k for k in self._storage]:
yield self._unmap_key(key)
def __len__(self):
return len(self._storage)
def __repr__(self):
return "{}.{}({})".format(type(self).__module__, type(self).__name__,
", ".join(repr(x) for x in self))
class ValueKey:
def __init__(self, value):
self.value = Value.cast(value)
if isinstance(self.value, Const):
self._hash = hash(self.value.value)
elif isinstance(self.value, (Signal, AnyValue)):
self._hash = hash(self.value.duid)
elif isinstance(self.value, (ClockSignal, ResetSignal)):
self._hash = hash(self.value.domain)
elif isinstance(self.value, Operator):
self._hash = hash((self.value.operator,
tuple(ValueKey(o) for o in self.value.operands)))
elif isinstance(self.value, Slice):
self._hash = hash((ValueKey(self.value.value), self.value.start, self.value.stop))
elif isinstance(self.value, Part):
self._hash = hash((ValueKey(self.value.value), ValueKey(self.value.offset),
self.value.width, self.value.stride))
elif isinstance(self.value, Cat):
self._hash = hash(tuple(ValueKey(o) for o in self.value.parts))
elif isinstance(self.value, ArrayProxy):
self._hash = hash((ValueKey(self.value.index),
tuple(ValueKey(e) for e in self.value._iter_as_values())))
elif isinstance(self.value, Sample):
self._hash = hash((ValueKey(self.value.value), self.value.clocks, self.value.domain))
elif isinstance(self.value, Initial):
self._hash = 0
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __hash__(self):
return self._hash
def __eq__(self, other):
if type(other) is not ValueKey:
return False
if type(self.value) is not type(other.value):
return False
if isinstance(self.value, Const):
return self.value.value == other.value.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value is other.value
elif isinstance(self.value, (ClockSignal, ResetSignal)):
return self.value.domain == other.value.domain
elif isinstance(self.value, Operator):
return (self.value.operator == other.value.operator and
len(self.value.operands) == len(other.value.operands) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.operands, other.value.operands)))
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.start == other.value.start and
self.value.stop == other.value.stop)
elif isinstance(self.value, Part):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
ValueKey(self.value.offset) == ValueKey(other.value.offset) and
self.value.width == other.value.width and
self.value.stride == other.value.stride)
elif isinstance(self.value, Cat):
return all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.parts, other.value.parts))
elif isinstance(self.value, ArrayProxy):
return (ValueKey(self.value.index) == ValueKey(other.value.index) and
len(self.value.elems) == len(other.value.elems) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value._iter_as_values(),
other.value._iter_as_values())))
elif isinstance(self.value, Sample):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.clocks == other.value.clocks and
self.value.domain == self.value.domain)
elif isinstance(self.value, Initial):
return True
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __lt__(self, other):
if not isinstance(other, ValueKey):
return False
if type(self.value) != type(other.value):
return False
if isinstance(self.value, Const):
return self.value < other.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value.duid < other.value.duid
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) < ValueKey(other.value.value) and
self.value.start < other.value.start and
self.value.end < other.value.end)
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections")
def __repr__(self):
return "<{}.ValueKey {!r}>".format(__name__, self.value)
class ValueDict(_MappedKeyDict):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class ValueSet(_MappedKeySet):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class SignalKey:
def __init__(self, signal):
self.signal = signal
if isinstance(signal, Signal):
self._intern = (0, signal.duid)
elif type(signal) is ClockSignal:
self._intern = (1, signal.domain)
elif type(signal) is ResetSignal:
self._intern = (2, signal.domain)
else:
raise TypeError("Object {!r} is not an nMigen signal".format(signal))
def __hash__(self):
return hash(self._intern)
def __eq__(self, other):
if type(other) is not SignalKey:
return False
return self._intern == other._intern
def __lt__(self, other):
if type(other) is not SignalKey:
raise TypeError("Object {!r} cannot be compared to a SignalKey".format(signal))
return self._intern < other._intern
def __repr__(self):
return "<{}.SignalKey {!r}>".format(__name__, self.signal)
class SignalDict(_MappedKeyDict):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
class SignalSet(_MappedKeySet):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
| 34.920323
| 183
| 0.578883
|
4a01bdf78d4cd2737597e62179d0c546db3fa385
| 8,258
|
py
|
Python
|
TikTok Video Generator/vidGen.py
|
ADH36/TikTok-Compilation-Video-Generator
|
375cdf41ee7e73b9e4bb8d6f0b9b6400892f136f
|
[
"MIT"
] | 199
|
2020-09-05T16:20:00.000Z
|
2022-03-31T15:57:25.000Z
|
TikTok Video Generator/vidGen.py
|
ozgurozkayaa/TikTok-Compilation-Video-Generator
|
9fce0e967682eb92a196f4ac4afa985eb593551c
|
[
"MIT"
] | 28
|
2020-09-24T22:01:14.000Z
|
2022-03-18T07:19:24.000Z
|
TikTok Video Generator/vidGen.py
|
ozgurozkayaa/TikTok-Compilation-Video-Generator
|
9fce0e967682eb92a196f4ac4afa985eb593551c
|
[
"MIT"
] | 56
|
2020-09-22T00:29:53.000Z
|
2022-03-20T19:22:51.000Z
|
import random
import os
import time
import shutil
import subprocess
import re
import cv2
from time import sleep
import datetime
from distutils.dir_util import copy_tree
import pickle
import settings
#File Paths
#Creating file paths that are needed
saved_videos = None
render_current_progress = None
render_max_progress = None
render_message = None
#------------------------------------------C O M P I L A T I O N G E N E R A T O R------------------------------------------
#Getting Filename without extension and storing it into a list
def getFileNames(file_path):
files = [os.path.splitext(filename)[0] for filename in os.listdir(file_path)]
return files
def deleteSkippedClips(clips):
for clip in clips:
os.remove(f'{clip}')
def deleteAllFilesInPath(path):
for file in os.listdir(path):
file_path = os.path.join(path, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def renderThread(renderingScreen):
global saved_videos
while True:
time.sleep(5)
savedFiles = getFileNames(f'{settings.temp_path}')
saved_videos = []
save_names = []
for file in savedFiles:
try:
with open(f'{settings.temp_path}/{file}/vid.data', 'rb') as pickle_file:
script = pickle.load(pickle_file)
saved_videos.append(script)
save_names.append(f'{settings.temp_path}/{file}')
except FileNotFoundError:
pass
#print("No vid.data file in %s" % file)
renderingScreen.script_queue_update.emit()
for i, video in enumerate(saved_videos):
print(f'Rendering script {i + 1}/{len(saved_videos)}')
t0 = datetime.datetime.now()
renderVideo(video, renderingScreen)
t1 = datetime.datetime.now()
total = t1-t0
print("Rendering Time %s" % total)
if settings.backupVideos:
backupName = save_names[i].replace(settings.temp_path, settings.backup_path)
if os.path.exists(backupName):
print("Backup for video %s already exists" % backupName)
else:
print("Making backup of video to %s" % backupName)
copy_tree(save_names[i], backupName)
print(f"Deleting video folder {save_names[i]}")
shutil.rmtree(save_names[i])
renderingScreen.update_backups.emit()
# delete all the temp videos
try:
deleteAllFilesInPath(settings.vid_finishedvids)
except Exception as e:
print(e)
print("Couldn't delete clips")
#Adding Streamer's name to the video clip
def renderVideo(video, rendering_screen):
global render_current_progress, render_max_progress, render_message
t0 = datetime.datetime.now()
clips = video.clips
videoName = video.name
subprocess._cleanup = lambda: None
credits = []
streamers_in_cred = []
render_current_progress = 0
# see where render_current_progress += 1
amount = 0
for clip in clips:
if clip.isUsed:
amount += 1
render_max_progress = amount * 2 + 1 + 1
render_message = "Beginning Rendering"
rendering_screen.render_progress.emit()
current_date = datetime.datetime.today().strftime("%m-%d-%Y__%H-%M-%S")
toCombine = []
fpsList = []
for i, clip in enumerate(clips):
mp4 = clip.mp4
mp4name = mp4
mp4path = f"{mp4}.mp4"
if len(mp4.split("/")) > 2:
name = len(mp4.split("/"))
mp4name = mp4.split("/")[name-1].replace(".mp4", "")
mp4path = mp4[1:]
cap=cv2.VideoCapture(mp4path)
fps = cap.get(cv2.CAP_PROP_FPS)
fpsList.append(fps)
chosenFps = settings.fps
if settings.useMinimumFps:
chosenFps = int(min(fpsList))
if settings.useMaximumFps:
chosenFps = int(max(fpsList))
print("Using Fps %s" % chosenFps)
# render progress 1
for i, clip in enumerate(clips):
if clip.isUsed:
name = clip.author_name
mp4 = clip.mp4
if name is not None and name not in streamers_in_cred and not clip.isUpload:
credits.append(f"{clip.author_name}")
streamers_in_cred.append(clip.author_name)
final_duration = round(clip.vid_duration, 1)
print(f"Rendering video ({i + 1}/{len(clips)}) to \"{settings.vid_finishedvids}\"/{mp4}_finished.mp4")
mp4name = mp4
mp4path = f"{mp4}.mp4"
if len(mp4.split("/")) > 2:
name = len(mp4.split("/"))
mp4name = mp4.split("/")[name-1].replace(".mp4", "")
mp4path = mp4[1:]
if not clip.isInterval and not clip.isIntro:
os.system(f"ffmpeg -i \"{mp4path}\" -vf \"scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1\" \"{settings.vid_finishedvids}/{mp4name}temp.mp4\"")
os.system(f"ffmpeg -i \"{settings.vid_finishedvids}/{mp4name}temp.mp4\" -filter:v fps=fps={chosenFps} \"{settings.vid_finishedvids}/{mp4name}_finished.mp4\"")
path = f"'{os.path.dirname(os.path.realpath(__file__))}/{settings.vid_finishedvids}/{mp4name}_finished.mp4'"
path = path.replace("\\", "/")
#path = f"'{mp4path}'"
#path = path.replace("\\", "/")
toCombine.append(path)
#os.system(f"ffmpeg -y -fflags genpts -i \"{mp4path}\" -vf \"ass=subtitleFile.ass, scale=1920:1080\" \"{settings.vid_finishedvids}/{mp4name}_finished.mp4\"")
render_current_progress += 1
render_message = f"Done Adding text to video ({i + 1}/{len(clips)})"
rendering_screen.render_progress.emit()
render_message = f"Adding clip to list ({i + 1}/{len(clips)})"
rendering_screen.render_progress.emit()
render_current_progress += 1
render_message = f"Done Adding clip to list ({i + 1}/{len(clips)})"
rendering_screen.render_progress.emit()
# render progress 2
render_message = "Creating audio loop"
rendering_screen.render_progress.emit()
#audio = AudioFileClip(f'{settings.asset_file_path}/Music/{musicFiles[0]}.mp3').fx(afx.volumex, float(video.background_volume))
render_current_progress += 1
render_message = "Done Creating audio loop"
rendering_screen.render_progress.emit()
# render progress 3
render_message = "Writing final video"
rendering_screen.render_progress.emit()
sleep(5)
vid_concat = open("concat.txt", "a")
#Adding comment thread video clips and interval video file paths to text file for concatenating
for files in toCombine:
vid_concat.write(f"file {files}\n")
vid_concat.close()
os.system(f"ffmpeg -safe 0 -f concat -segment_time_metadata 1 -i concat.txt -vf select=concatdec_select -af aselect=concatdec_select,aresample=async=1 \"{settings.final_video_path}/{videoName}_{current_date}.mp4\"")
#os.system(f"ffmpeg -f concat -safe 0 -i concat.txt -s 1920x1080 -c copy {settings.final_video_path}/TikTokMoments_{current_date}.mp4")
open("concat.txt", 'w').close()
#final_vid_with_music.write_videofile(f'{settings.final_video_path}/TikTokMoments_{current_date}.mp4', fps=settings.fps, threads=16)
render_current_progress += 1
t1 = datetime.datetime.now()
total = t1-t0
render_message = "Done writing final video (%s)" % total
rendering_screen.render_progress.emit()
f= open(f"{settings.final_video_path}/{videoName}_{current_date}.txt","w+")
f.write("A special thanks to the following: \n\n")
for cred in credits:
f.write(cred + "\n")
f.close()
sleep(10)
| 33.032
| 220
| 0.593848
|
4a01be0b8d95d183c319ac7bf960de5e3a204ee0
| 37
|
py
|
Python
|
sdk/template/azure-template/azure/template/_version.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/template/azure-template/azure/template/_version.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | null | null | null |
sdk/template/azure-template/azure/template/_version.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# matches SEMVER
VERSION = "0.0.13b1"
| 18.5
| 20
| 0.702703
|
4a01bf6d763de749dcf0615fa62b769d921afb30
| 1,898
|
py
|
Python
|
python/study_myself/12_json/steps.py
|
harkhuang/harkcode
|
1c9802bfc8d599e20ee9082eca14165a782ddf85
|
[
"MIT"
] | 3
|
2015-10-27T00:49:46.000Z
|
2019-04-19T08:14:46.000Z
|
python/study_myself/12_json/steps.py
|
harkhuang/harkcode
|
1c9802bfc8d599e20ee9082eca14165a782ddf85
|
[
"MIT"
] | 1
|
2018-11-05T07:54:55.000Z
|
2018-11-05T07:54:55.000Z
|
python/study_myself/12_json/steps.py
|
harkhuang/harkcode
|
1c9802bfc8d599e20ee9082eca14165a782ddf85
|
[
"MIT"
] | 1
|
2015-12-19T08:47:53.000Z
|
2015-12-19T08:47:53.000Z
|
import json
import array
from pprint import pprint
# get the version depend trees
versions_deps = []
with open('data.json') as f:
data = json.load(f)
for (k,v) in data["version"].items():
versions_deps.append(k)
#pprint(k + '\n')
from_v = "P2019-06-02"
to_v = "P2019-05-03"
steps = list()
#step1 degrade to base line version
step1 = from_v[0:-2] + "00"
temp = []
temp.append("0") # 0:degrade
temp.append(from_v)
temp.append(step1)
steps.append(temp)
# step2 up or down main version
### get the main version number
step2 = from_v[6:8]
#pprint(step2)
if step2[0] == "0":
step2 = step2[1]
#pprint (step2)
int_step2 = int(step2)
step3 = to_v[6:8]
#pprint(step3)
if step3[0] == "0":
step3 = step3[1]
#pprint (step3)
int_step3 = int(step3)
temp = []
f = min(int_step3, int_step2)
t = max(int_step2, int_step3)
num = 0
for i in range(f, t):
step_f = []
step_t = []
temp = []
pos = 0
if (int_step2 < int_step3):
temp.append("1") # 1:up
else:
temp.append("0") # 0:down
if i < 9 and int_step2 < int_step2:
step_f = from_v[0:7] + str(i) + "-00"
step_t = to_v[0:7] + str(i+1) + "-00"
elif i > 9 and int_step2 < int_step2:
step_f = from_v[0:6] + str(i) + "-00"
step_t = to_v[0:6] + str( i+1) + "-00"
if i < 9 and int_step2 > int_step3:
step_f = from_v[0:7] + str(int_step2 - num) + "-00"
step_t = to_v[0:7] + str(int_step2 - num - 1) + "-00"
num = num +1
elif i > 9 and int_step2 > int_step2:
step_f = from_v[0:6] + str(int_step2 - num) + "-00"
step_t = to_v[0:6] + str(int_step2 - num -1) + "-00"
temp.append(step_f)
temp.append(step_t)
steps.append(temp)
# step3 update min version
temp = []
temp.append("1")
temp.append(to_v[0:9] + "00")
temp.append(to_v)
steps.append(temp)
#print temp
print "from :" + from_v + " to " + to_v
#print steps
for i in steps:
pprint(i)
| 22.595238
| 58
| 0.600632
|
4a01bf9213393a7406727a3730853ab12a23ba8f
| 5,268
|
py
|
Python
|
modin/data_management/functions/default_methods/groupby_default.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-19T04:01:17.000Z
|
2021-05-19T04:01:17.000Z
|
modin/data_management/functions/default_methods/groupby_default.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 57
|
2021-01-22T15:52:03.000Z
|
2021-06-12T18:22:04.000Z
|
modin/data_management/functions/default_methods/groupby_default.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-29T12:12:42.000Z
|
2022-01-29T12:12:42.000Z
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from .default import DefaultMethod
import pandas
class GroupBy:
agg_aliases = [
"agg",
"dict_agg",
pandas.core.groupby.DataFrameGroupBy.agg,
pandas.core.groupby.DataFrameGroupBy.aggregate,
]
@classmethod
def validate_by(cls, by):
def try_cast_series(df):
if isinstance(df, pandas.DataFrame):
df = df.squeeze(axis=1)
if not isinstance(df, pandas.Series):
return df
if df.name == "__reduced__":
df.name = None
return df
if isinstance(by, pandas.DataFrame):
by = [try_cast_series(column) for _, column in by.items()]
elif isinstance(by, pandas.Series):
by = [try_cast_series(by)]
elif isinstance(by, list):
by = [try_cast_series(o) for o in by]
return by
@classmethod
def inplace_applyier_builder(cls, key, func=None):
inplace_args = [] if func is None else [func]
def inplace_applyier(grp, **func_kwargs):
return key(grp, *inplace_args, **func_kwargs)
return inplace_applyier
@classmethod
def get_func(cls, grp, key, **kwargs):
if "agg_func" in kwargs:
return kwargs["agg_func"]
elif "func_dict" in kwargs:
return cls.inplace_applyier_builder(key, kwargs["func_dict"])
else:
return cls.inplace_applyier_builder(key)
@classmethod
def build_aggregate_method(cls, key):
def fn(
df,
by,
groupby_args,
agg_args,
axis=0,
is_multi_by=None,
drop=False,
**kwargs
):
by = cls.validate_by(by)
grp = df.groupby(by, axis=axis, **groupby_args)
agg_func = cls.get_func(grp, key, **kwargs)
result = (
grp.agg(agg_func, **agg_args)
if isinstance(agg_func, dict)
else agg_func(grp, **agg_args)
)
return result
return fn
@classmethod
def build_groupby_reduce_method(cls, agg_func):
def fn(
df,
by,
axis,
groupby_args,
map_args,
numeric_only=True,
drop=False,
**kwargs
):
if not isinstance(by, (pandas.Series, pandas.DataFrame)):
by = cls.validate_by(by)
return agg_func(
df.groupby(by=by, axis=axis, **groupby_args), **map_args
)
if numeric_only:
df = df.select_dtypes(include="number")
by = by.squeeze(axis=1)
if (
drop
and isinstance(by, pandas.Series)
and by.name in df
and df[by.name].equals(by)
):
by = by.name
if isinstance(by, pandas.DataFrame):
df = pandas.concat([df] + [by[[o for o in by if o not in df]]], axis=1)
by = list(by.columns)
groupby_args = groupby_args.copy()
as_index = groupby_args.pop("as_index", True)
groupby_args["as_index"] = True
grp = df.groupby(by, axis=axis, **groupby_args)
result = agg_func(grp, **map_args)
if isinstance(result, pandas.Series):
result = result.to_frame()
if not as_index:
if (
len(result.index.names) == 1 and result.index.names[0] is None
) or all([name in result.columns for name in result.index.names]):
drop = False
elif kwargs.get("method") == "size":
drop = True
result = result.reset_index(drop=not drop)
if result.index.name == "__reduced__":
result.index.name = None
return result
return fn
@classmethod
def is_aggregate(cls, key):
return key in cls.agg_aliases
@classmethod
def build_groupby(cls, func):
if cls.is_aggregate(func):
return cls.build_aggregate_method(func)
return cls.build_groupby_reduce_method(func)
class GroupByDefault(DefaultMethod):
OBJECT_TYPE = "GroupBy"
@classmethod
def register(cls, func, **kwargs):
return cls.call(GroupBy.build_groupby(func), fn_name=func.__name__, **kwargs)
| 31.73494
| 87
| 0.567008
|
4a01bfeb49e116f78ed8583ab43ac5749c422eb6
| 1,732
|
py
|
Python
|
DataPrepUtils/normalize.py
|
shreyanshchordia/Chatbot
|
abdbda328d25b6c0deecc840fff625451f92d871
|
[
"MIT"
] | null | null | null |
DataPrepUtils/normalize.py
|
shreyanshchordia/Chatbot
|
abdbda328d25b6c0deecc840fff625451f92d871
|
[
"MIT"
] | null | null | null |
DataPrepUtils/normalize.py
|
shreyanshchordia/Chatbot
|
abdbda328d25b6c0deecc840fff625451f92d871
|
[
"MIT"
] | null | null | null |
# function to normalize string
'''
Involves making tokens more seperable and distinguishable,
for example:
"What are y'all gonn' do tonight?I hope it's fun or I better leave now!"
> 'What are y all gonn do tonight ? I hope it s fun or I better leave now !'
'''
import re
def normalize_string(string):
s = re.sub(r"([.!?])", r" \1 ", string)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# function to normalize data directory
def normalize_dir(data):
count = 0
invalid_count = 0
dir = dict()
for __class__ in data.keys():
for __list__ in data[__class__]:
temp_list = list()
for string in __list__:
count += 1
try:
# here does the normalization occur
temp_list.append(normalize_string(string))
except:
invalid_count += 1
if dir.get(__class__, 0) == 0:
dir[__class__] = [temp_list]
else:
dir[__class__].append(temp_list)
print(f'{invalid_count}/{count} were in invalid formats')
return dir
'''
When the structure of the data directory is somewhat like this:
{
'POLITICS' :
[
[Question, Answer_1, Answer_2, ...],
...
[Question, Answer_1, Answer_2, ...]
],
'GOSSIP' :
[
[Question, Answer_1, Answer_2, ...],
...
[Question, Answer_1, Answer_2, ...]
],
...
'SCIENCE' :
[
[Question, Answer_1, Answer_2, ...],
...
[Question, Answer_1, Answer_2, ...]
],
'LITERATURE' :
[
[Question, Answer_1, Answer_2, ...],
...
[Question, Answer_1, Answer_2, ...]
],
}
'''
| 23.093333
| 84
| 0.534642
|
4a01c0902e936efd2f571d102d672e286d6e6cc1
| 2,175
|
py
|
Python
|
videos/migrations/0001_initial.py
|
engineerscodes/PROTO-SEEK
|
f46a8a04f9a04151241015cc98cc79bfb4df0209
|
[
"MIT"
] | 1
|
2021-06-05T08:45:48.000Z
|
2021-06-05T08:45:48.000Z
|
videos/migrations/0001_initial.py
|
engineerscodes/PROTO-SEEK
|
f46a8a04f9a04151241015cc98cc79bfb4df0209
|
[
"MIT"
] | null | null | null |
videos/migrations/0001_initial.py
|
engineerscodes/PROTO-SEEK
|
f46a8a04f9a04151241015cc98cc79bfb4df0209
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.1 on 2021-05-27 10:36
import django.core.validators
from django.db import migrations, models
import videos.VideoSizeVal
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='videoUpload',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('captions', models.CharField(max_length=100)),
('username', models.CharField(max_length=100)),
('date', models.DateField(default='2001-04-12')),
('EventName', models.CharField(default='', max_length=50)),
('thumbnail', models.TextField()),
('video', models.FileField(upload_to='videos/%y', validators=[videos.VideoSizeVal.file_size, django.core.validators.FileExtensionValidator(allowed_extensions=['mp4', 'MOV', 'MKV'])])),
('url_64encoding', models.CharField(default='/upload/videos/', max_length=2048)),
('Total_marks', models.IntegerField(default=0)),
('EventID', models.CharField(default='welcome newbie', max_length=100)),
],
),
migrations.CreateModel(
name='Marks',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('videoId', models.CharField(max_length=250)),
('by_email', models.CharField(max_length=250)),
('marks', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('moderator_email', models.CharField(max_length=250)),
('video_link', models.CharField(max_length=100000)),
('date', models.DateField(default='2001-04-12')),
('EventName', models.CharField(default='', max_length=50)),
('verfiyed', models.BooleanField(default=False)),
],
options={
'unique_together': {('videoId', 'moderator_email')},
},
),
]
| 44.387755
| 200
| 0.585747
|
4a01c165b2af3ae5dc9cb9542f8197626df1cc0f
| 13,505
|
py
|
Python
|
torch/fx/experimental/fx2trt/lower.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | null | null | null |
torch/fx/experimental/fx2trt/lower.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | null | null | null |
torch/fx/experimental/fx2trt/lower.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | null | null | null |
import dataclasses as dc
import logging
import typing as t
from typing import Type, Set, Optional
import tensorrt as trt
import torch
import torch.fx as fx
import torch.fx.experimental.fx_acc.acc_tracer as acc_tracer
import torch.nn as nn
from torch.fx.experimental.const_fold import split_const_subgraphs
from .fx2trt import (
TRTInterpreter,
)
from .input_tensor_spec import (
InputTensorSpec,
)
from .passes.fuse_pass import (
fuse_permute_linear,
fuse_permute_matmul,
fuse_unsqueeze_cat_sum,
)
from .passes.remove_duplicate_output_args import (
RemoveDuplicateOutputArgsFunc,
remove_duplicate_output_args,
)
from .split import (
Splitter,
SplitFunc,
)
from .tools.timing_cache_utils import (
TimingCacheManager,
)
from .trt_module import (
TRTModule,
)
from torch.fx.experimental.fx_acc import acc_normalizer
logger = logging.getLogger(__name__)
Input = t.Sequence[t.Any]
TModule = t.TypeVar("TModule", bound=nn.Module)
def lower_to_trt(
module: nn.Module,
input,
max_batch_size: int = 2048,
max_workspace_size=1 << 25,
explicit_batch_dimension=False,
fp16_mode=True,
enable_fuse=True,
verbose_log=False,
timing_cache_prefix="",
save_timing_cache=True,
cuda_graph_batch_size=-1,
) -> nn.Module:
"""
Takes in original module, input and lowering setting, run lowering workflow to turn module
into lowered module, or so called TRTModule.
Args:
module: Original module for lowering.
input: Input for module.
max_batch_size: Maximum batch size (must be >= 1 to be set, 0 means not set)
max_workspace_size: Maximum size of workspace given to TensorRT.
explicit_batch_dimension: Use explicit batch dimension in TensorRT if set True, otherwise use implicit batch dimension.
fp16_mode: fp16 config given to TRTModule.
enable_fuse: Enable pass fusion during lowering if set to true. l=Lowering will try to find pattern defined
in torch.fx.experimental.fx2trt.passes from original module, and replace with optimized pass before apply lowering.
verbose_log: Enable verbose log for TensorRT if set True.
timing_cache_prefix: Timing cache file name for timing cache used by fx2trt.
save_timing_cache: Update timing cache with current timing cache data if set to True.
cuda_graph_batch_size: Cuda graph batch size, default to be -1.
Returns:
A torch.nn.Module lowered by TensorRT.
"""
lower_setting = LowerSetting(
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
explicit_batch_dimension=explicit_batch_dimension,
fp16_mode=fp16_mode,
enable_fuse=enable_fuse,
verbose_log=verbose_log,
timing_cache_prefix=timing_cache_prefix,
save_timing_cache=save_timing_cache,
)
lowerer = Lowerer.create(lower_setting=lower_setting)
return lowerer(module, input)
@dc.dataclass
class LowerSetting:
"""
Basic configuration for lowering stack.
Args:
max_batch_size: The maximum batch size which can be used at execution time,
and also the batch size for which the ICudaEngine will be optimized.
input_specs: Specs for inputs to engine, can either be a single size or a
range defined by Min, Optimal, Max sizes.
explicit_batch_dimension: Use explicit batch dimension during lowering.
explicit_precision: Use explicit precision during lowering.
fp16_mode: Enable FP16 dtype during lowering.
int8_mode: Enable Int8 dtype during lowering.
max_workspace_size: The maximum workspace size. The maximum GPU temporary
memory which the TensorRT engine can use at execution time.
strict_type_constraints: Require TensorRT engine to strictly follow data type
setting at execution time.
enable_fuse: Enable pass fuse duirng lowering, i.e. fuse multiple operations
as (a->b->c->d)=>(e). Current available fuse source patterns are:
sparse->matmul->add
permute->linear
permute->matmul
unsqueeze->cat->sum
enable_fuse_for_sparsity: Enable pass fuse for sparsity.
verbose_log: Enable TensorRT engine verbose log mode.
algo_selector: Enable TensorRT algorithm selector at execution time.
timing_cache_prefix: TensorRT timing cache file path. TensorRT engine will use timing
cache file at execution time if valid timing cache file is provided.
save_timing_cache: Save updated timing cache data into timing cache file if the timing
cache file is provided.
ast_rewriter_allow_list (Optional[Set[nn.Module]]): Optional allow list of
modules that need AST rewriting. This is aiming to eliminate input variable involve in
exception checking control flow.
leaf_module_list (Optional[Set[nn.Module]]): Optional leaf module list where
modules will not be traced into.
"""
max_batch_size: int = 2048
input_specs: t.List[InputTensorSpec] = dc.field(default_factory=list)
explicit_batch_dimension: bool = True
explicit_precision: bool = False
fp16_mode: bool = False
int8_mode: bool = False
max_workspace_size: int = 1 << 30
strict_type_constraints: bool = False
enable_fuse: bool = True
enable_fuse_for_sparsity = False
verbose_log: bool = False
algo_selector = None
timing_cache_prefix: str = ""
save_timing_cache: bool = False
ast_rewriter_allow_list: Optional[Set[Type[nn.Module]]] = None
leaf_module_list: Optional[Set[Type[nn.Module]]] = None
@dc.dataclass
class LowerTrtInterpreter:
lower_setting: LowerSetting
timing_cache_manager: TimingCacheManager
@classmethod
def create(cls, lower_setting):
timing_cache_manager = TimingCacheManager(
lower_setting.timing_cache_prefix, lower_setting.save_timing_cache
)
return LowerTrtInterpreter(lower_setting, timing_cache_manager)
def __call__(self, mod, input, split_name):
input_specs_val = (
self.lower_setting.input_specs
if self.lower_setting.input_specs
else InputTensorSpec.from_tensors(input)
)
if self.lower_setting.enable_fuse:
mod = fuse_permute_matmul(mod)
mod = fuse_permute_linear(mod)
mod = fuse_unsqueeze_cat_sum(mod)
# Prepare algorithm selector and timing_cache for TRTInterpreter
algo_selector = None
if self.lower_setting.algo_selector:
algo_selector = self.lower_setting.algo_selector(f"{split_name}.json")
cache_data = None
if self.timing_cache_manager:
try:
cache_data = self.timing_cache_manager.get_timing_cache_trt(split_name)
except Exception as e:
logger.warning(f"Cannot load timing cache for {split_name}: {str(e)}")
cache_data = None
interpreter = TRTInterpreter(
mod,
input_specs=input_specs_val,
explicit_batch_dimension=self.lower_setting.explicit_batch_dimension,
explicit_precision=self.lower_setting.explicit_precision,
logger_level=trt.Logger.VERBOSE
if self.lower_setting.verbose_log
else trt.Logger.WARNING,
)
interp_result = interpreter.run(
max_batch_size=self.lower_setting.max_batch_size,
max_workspace_size=self.lower_setting.max_workspace_size,
fp16_mode=self.lower_setting.fp16_mode,
int8_mode=self.lower_setting.int8_mode,
strict_type_constraints=self.lower_setting.strict_type_constraints,
algorithm_selector=algo_selector,
timing_cache=cache_data,
)
# Update timing cache file if needed
timing_cache = interp_result.serialized_cache
if timing_cache and self.timing_cache_manager:
self.timing_cache_manager.update_timing_cache(split_name, timing_cache)
return interp_result
class LowerFunc:
"""Signature for fx2trt lower functions"""
def __call__(
self,
module: fx.GraphModule,
input: Input,
) -> nn.Module:
"""Lowers a module using fx2trt
Args:
module: module to be lowered
input: sample input to the module
Returns:
the lowered module
"""
raise NotImplementedError()
def fx2trt_lower(module: nn.Module, sample_input: t.Any) -> fx.GraphModule:
"""Lowers the module using fx2trt
TODO: @kefeilu: this function's body should be moved into the actual calling
site in the model publisher workflow, since now the lowering function
signature (`LowerFunc`) is encapsulated in the `Lowerer` callable class.
"""
assert isinstance(
module, fx.GraphModule
), f"Expecting fx.GraphModule, got: {type(module)}"
logger.info(f"Module FX Graph: {module.graph}")
lower_setting = LowerSetting()
lower = Lowerer.create(lower_setting=lower_setting)
module_lowered = lower(module, sample_input)
assert isinstance(module_lowered, fx.GraphModule)
return module_lowered
@dc.dataclass(frozen=True)
class Lowerer(LowerFunc):
"""Lowers a module using fx2trt.
This is a composable class to facilitate fx2trt. A normal fx2trt process
composes of the following passes to transform an `fx.GraphModule`:
1. split - the input graph module is split into several sub-nets,
running either via TensorRT, or via regular CUDA.
For each split that need to run via TRT, the following passes are
invoked:
2. acc_trace - trace the module for TRT conversion
3. `remove_duplicate_output_args` - since fx2TRT doesn't support duplicate arguments in
an `fx.GraphModule`'s `output` node, this pass is needed to remove the duplicated
output args from the split and also update the parent module to fix their uses
accordingly.
4. `TRTInterpreter` - runs the acc traced module through `TRTInterpreter`
to build the TRT engine
5. Wraps the executable TRT engine into `TRTModule`, which is an `nn.Module`.
6. The lowered subnet is then set back onto the top-level module
# TODO: @kefeilu: also incorporates a validator to do inference (and optionally)
# result comparison along the way.
Attributes:
split: the fx2trt split function.
acc_trace: trace function for TRT conversion.
remove_duplicate_output_args: moduel transformation pass to remove duplicate args in
a subnet's `output` node.
trt_interpret: function to create and run `TRTInterpreter` to convert `fx.GraphModule`
into a TensorRT engine.
"""
split: SplitFunc
acc_trace: t.Callable[[fx.GraphModule, Input], fx.GraphModule]
remove_duplicate_output_args: RemoveDuplicateOutputArgsFunc
trt_interpreter: LowerTrtInterpreter
fp16: bool
@classmethod
def create(
cls,
lower_setting: LowerSetting,
) -> "Lowerer":
"""Instantiate a `Lowerer` instance."""
return Lowerer(
split=Splitter.create(not lower_setting.explicit_batch_dimension),
acc_trace=lambda mod, input:
acc_tracer.trace(
mod,
input, # type: ignore[arg-type]
ast_rewriter_allow_list=lower_setting.ast_rewriter_allow_list,
leaf_module_list=lower_setting.leaf_module_list), # type: ignore[arg-type]
remove_duplicate_output_args=remove_duplicate_output_args,
trt_interpreter=LowerTrtInterpreter.create(lower_setting),
fp16=lower_setting.fp16_mode,
)
def __call__(
self,
module: nn.Module,
input: Input,
cuda_graph_batch_size: int = -1,
skip_folding_node_fn: t.Optional[t.Callable[[fx.Node], bool]] = None,
) -> nn.Module:
"""See `LowerFunc` protocol"""
if self.fp16:
module.eval().half()
input = tuple(x.half() if x.dtype == torch.float32 else x for x in input)
# Exure ast_rewrite is done for input module before const_fold
module = self.acc_trace(module, input) # type: ignore[misc]
const_split_mod = split_const_subgraphs(module, skip_folding_node_fn)
const_split_mod.run_folding()
acc_normalizer.normalize(const_split_mod, expect_nodes_have_shapes=False)
split_module, splits = self.split(const_split_mod, input) # type: ignore[arg-type]
split_module.eval() # type: ignore[attr-defined]
for _split in splits: # type: ignore[attr-defined]
if _split.device == "acc":
# Ensure parent module is updated with the traced sub-net before running
# remove_duplicate_output_args.
self.remove_duplicate_output_args(_split.module, [_split.name]) # type: ignore[misc, operator]
interp_res = self.trt_interpreter(
_split.module, _split.input, _split.name
)
trt_module = TRTModule(
engine=interp_res.engine,
input_names=interp_res.input_names,
output_names=interp_res.output_names,
cuda_graph_batch_size=cuda_graph_batch_size,
)
setattr(split_module, _split.name, trt_module)
return split_module # type: ignore[return-value]
| 36.206434
| 127
| 0.691077
|
4a01c1f3119f6d3a6d689edb6b8b0fd2f8d79444
| 185
|
wsgi
|
Python
|
heroku_deploy.wsgi
|
doublechiang/pcdvd_getuserpost
|
654aa385396792cf0c51570cff81c4921e8c8293
|
[
"Apache-2.0"
] | null | null | null |
heroku_deploy.wsgi
|
doublechiang/pcdvd_getuserpost
|
654aa385396792cf0c51570cff81c4921e8c8293
|
[
"Apache-2.0"
] | 4
|
2021-05-12T00:03:03.000Z
|
2021-06-14T23:29:26.000Z
|
heroku_deploy.wsgi
|
doublechiang/pcdvd_userpost_by_thread
|
654aa385396792cf0c51570cff81c4921e8c8293
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# from QMFRacklog import app as application
from app import app as application
| 30.833333
| 62
| 0.789189
|
4a01c3499a364326143673f1c8d5448ca9366501
| 8,154
|
py
|
Python
|
jaseci_kit/jaseci_kit/modules/encoders/utils/tokenizer.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 6
|
2021-10-30T03:35:36.000Z
|
2022-02-10T02:06:18.000Z
|
jaseci_kit/jaseci_kit/modules/encoders/utils/tokenizer.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 85
|
2021-10-29T22:47:39.000Z
|
2022-03-31T06:11:52.000Z
|
jaseci_kit/jaseci_kit/modules/encoders/utils/tokenizer.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 12
|
2021-11-03T17:29:22.000Z
|
2022-03-30T16:01:53.000Z
|
import torch
from torch.utils.data import Dataset
# this class transforms data in required training format
# and inference for text and tokens
class SelectionDataset(Dataset):
def __init__(
self,
contexts,
candidates,
context_transform,
candidate_transform,
labels=None,
mode="train",
):
self.context_transform = context_transform
self.candidate_transform = candidate_transform
self.data_source = []
self.mode = mode
if mode == "eval":
for text in contexts:
group = {"context": None, "candidates": [], "labels": []}
for cand in candidates:
group["candidates"].append(cand)
# below label is 0 for all,used for help in inference
group["labels"].append(0)
group["context"] = [text]
self.data_source.append(group)
else:
group = {"context": None, "candidates": [], "labels": []}
for text, cand, lbl in zip(contexts, candidates, labels):
group = {"context": None, "candidates": [], "labels": []}
# below code is to combine positive and negetive sample
# if lbl == 1 and len(group['candidates']) > 0:
# self.data_source.append(group)
# group = {
# 'context': None,
# 'candidates': [],
# 'labels': []
# }
group["candidates"].append(cand)
group["labels"].append(lbl)
group["context"] = [text]
# if len(group['candidates']) > 0:
self.data_source.append(group)
group = {"context": None, "candidates": [], "labels": []}
if len(self.data_source) < 2 and mode != "eval":
group["context"] = ["This is sample text"]
group["candidates"].append("sampletext")
group["labels"].append(1)
if len(candidates) > 1:
group["context"] = ["This is a sample text"]
group["candidates"].append("notsampletext")
group["labels"].append(0)
self.data_source.append(group)
def __len__(self):
return len(self.data_source)
def __getitem__(self, index):
group = self.data_source[index]
context, candidates, labels = (
group["context"],
group["candidates"],
group["labels"],
)
transformed_context = self.context_transform(
context
) # [token_ids],[seg_ids],[masks]
transformed_candidates = self.candidate_transform(
candidates
) # [token_ids],[seg_ids],[masks]
ret = transformed_context, transformed_candidates, labels
return ret
def batchify_join_str(self, batch):
(
contexts_token_ids_list_batch,
contexts_input_masks_list_batch,
candidates_token_ids_list_batch,
candidates_input_masks_list_batch,
) = ([], [], [], [])
labels_batch = []
for sample in batch:
(contexts_token_ids_list, contexts_input_masks_list), (
candidates_token_ids_list,
candidates_input_masks_list,
) = sample[:2]
contexts_token_ids_list_batch.append(contexts_token_ids_list)
contexts_input_masks_list_batch.append(contexts_input_masks_list)
candidates_token_ids_list_batch.append(candidates_token_ids_list)
candidates_input_masks_list_batch.append(candidates_input_masks_list)
labels_batch.append(sample[-1])
long_tensors = [
contexts_token_ids_list_batch,
contexts_input_masks_list_batch,
candidates_token_ids_list_batch,
candidates_input_masks_list_batch,
]
(
contexts_token_ids_list_batch,
contexts_input_masks_list_batch,
candidates_token_ids_list_batch,
candidates_input_masks_list_batch,
) = (torch.tensor(t, dtype=torch.long) for t in long_tensors)
labels_batch = torch.tensor(labels_batch, dtype=torch.float)
return (
contexts_token_ids_list_batch,
contexts_input_masks_list_batch,
candidates_token_ids_list_batch,
candidates_input_masks_list_batch,
labels_batch,
)
# this class transforms data to generate embeddings
class EvalDataset(Dataset):
def __init__(
self, texts, context_transform=None, candidate_transform=None, mode="context"
):
self.context_transform = context_transform
self.candidate_transform = candidate_transform
self.data_source = []
self.mode = mode
group = {"text": []}
if mode == "context":
group["text"].append(texts)
else:
for text in texts:
group["text"].append(text)
self.data_source.append(group)
def __len__(self):
return len(self.data_source)
def __getitem__(self, index):
group = self.data_source[index]
text = group["text"]
if self.mode == "context":
transformed_text = self.context_transform(text) # [token_ids],[masks]
else:
transformed_text = self.candidate_transform(text) # [token_ids],[masks]
return transformed_text
def eval_str(self, batch):
token_ids_list_batch, input_masks_list_batch = [], []
for sample in batch:
token_ids_list, input_masks_list = sample
token_ids_list_batch.append(token_ids_list)
input_masks_list_batch.append(input_masks_list)
long_tensors = [token_ids_list_batch, input_masks_list_batch]
token_ids_list_batch, input_masks_list_batch = (
torch.tensor(t, dtype=torch.long) for t in long_tensors
)
return token_ids_list_batch, input_masks_list_batch
# this class is for creating token data for candidate
class SelectionSequentialTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
def __call__(self, texts):
input_ids_list, input_masks_list = [], []
for text in texts:
tokenized_dict = self.tokenizer.encode_plus(
text, padding="max_length", max_length=self.max_len, truncation=True
)
input_ids, input_masks = (
tokenized_dict["input_ids"],
tokenized_dict["attention_mask"],
)
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
input_ids_list.append(input_ids)
input_masks_list.append(input_masks)
return input_ids_list, input_masks_list
# this class is for creating token data for context
class SelectionJoinTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
self.cls_id = self.tokenizer.convert_tokens_to_ids("[CLS]")
self.sep_id = self.tokenizer.convert_tokens_to_ids("[SEP]")
self.pad_id = 0
def __call__(self, texts):
# another option is to use [SEP], but here we follow the discussion at:
# https://github.com/facebookresearch/ParlAI/issues/2306#issuecomment-599180186
context = "\n".join(texts)
tokenized_dict = self.tokenizer.encode_plus(context)
input_ids, input_masks = (
tokenized_dict["input_ids"],
tokenized_dict["attention_mask"],
)
input_ids = input_ids[-self.max_len :]
input_ids[0] = self.cls_id
input_masks = input_masks[-self.max_len :]
input_ids += [self.pad_id] * (self.max_len - len(input_ids))
input_masks += [0] * (self.max_len - len(input_masks))
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
return input_ids, input_masks
| 36.895928
| 87
| 0.597008
|
4a01c37a5e5bfb6de9b949c72f7f86d3598c8ee1
| 9,018
|
py
|
Python
|
test/functional/feature_signet.py
|
afco2021/afco_sourcecode
|
886fe458a4a7c7f4ac999a8bad64422ad1fd8736
|
[
"MIT"
] | null | null | null |
test/functional/feature_signet.py
|
afco2021/afco_sourcecode
|
886fe458a4a7c7f4ac999a8bad64422ad1fd8736
|
[
"MIT"
] | null | null | null |
test/functional/feature_signet.py
|
afco2021/afco_sourcecode
|
886fe458a4a7c7f4ac999a8bad64422ad1fd8736
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test basic signet functionality"""
from decimal import Decimal
from test_framework.test_framework import AFCOTestFramework
from test_framework.util import assert_equal
signet_blocks = [
'00000020f61eee3b63a380a477a063af32b2bbc97c9ff9f01f2c4225e973988108000000f575c83235984e7dc4afc1f30944c170462e84437ab6f2d52e16878a79e4678bd1914d5fae77031eccf4070001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025151feffffff0200f2052a010000001600149243f727dd5343293eb83174324019ec16c2630f0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402205e423a8754336ca99dbe16509b877ef1bf98d008836c725005b3c787c41ebe46022047246e4467ad7cc7f1ad98662afcaf14c115e0095a227c7b05c5182591c23e7e01000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020533b53ded9bff4adc94101d32400a144c54edc5ed492a3b26c63b2d686000000b38fef50592017cfafbcab88eb3d9cf50b2c801711cad8299495d26df5e54812e7914d5fae77031ecfdd0b0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025251feffffff0200f2052a01000000160014fd09839740f0e0b4fc6d5e2527e4022aa9b89dfa0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022031d64a1692cdad1fc0ced69838169fe19ae01be524d831b95fcf5ea4e6541c3c02204f9dea0801df8b4d0cd0857c62ab35c6c25cc47c930630dc7fe723531daa3e9b01000120000000000000000000000000000000000000000000000000000000000000000000000000',
'000000202960f3752f0bfa8858a3e333294aedc7808025e868c9dc03e71d88bb320000007765fcd3d5b4966beb338bba2675dc2cf2ad28d4ad1d83bdb6f286e7e27ac1f807924d5fae77031e81d60b0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025351feffffff0200f2052a010000001600141e5fb426042692ae0e87c070e78c39307a5661c20000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402205de93694763a42954865bcf1540cb82958bc62d0ec4eee02070fb7937cd037f4022067f333753bce47b10bc25eb6e1f311482e994c862a7e0b2d41ab1c8679fd1b1101000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020b06443a13ae1d3d50faef5ecad38c6818194dc46abca3e972e2aacdae800000069a5829097e80fee00ac49a56ea9f82d741a6af84d32b3bc455cf31871e2a8ac27924d5fae77031e9c91050001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025451feffffff0200f2052a0100000016001430db2f8225dcf7751361ab38735de08190318cb70000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402200936f5f9872f6df5dd242026ad52241a68423f7f682e79169a8d85a374eab9b802202cd2979c48b321b3453e65e8f92460db3fca93cbea8539b450c959f4fbe630c601000120000000000000000000000000000000000000000000000000000000000000000000000000',
'000000207ed403758a4f228a1939418a155e2ebd4ae6b26e5ffd0ae433123f7694010000542e80b609c5bc58af5bdf492e26d4f60cd43a3966c2e063c50444c29b3757a636924d5fae77031ee8601d0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025551feffffff0200f2052a01000000160014edc207e014df34fa3885dff97d1129d356e1186a0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022021a3656609f85a66a2c5672ed9322c2158d57251040d2716ed202a1fe14f0c12022057d68bc6611f7a9424a7e00bbf3e27e6ae6b096f60bac624a094bc97a59aa1ff01000120000000000000000000000000000000000000000000000000000000000000000000000000',
'000000205bea0a88d1422c3df08d766ad72df95084d0700e6f873b75dd4e986c7703000002b57516d33ed60c2bdd9f93d6d56140335834c837e68e5ba6e04287a7285633585924d5fae77031ed171960001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025651feffffff0200f2052a010000001600143ae612599cf96f2442ce572633e0251116eaa52f0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022059a7c54de76bfdbb1dd44c78ea2dbd2bb4e97f4abad38965f41e76433e56423c022054bf17f04fe17415c0141f60eebd2b839200f574d8ad8d55a0917b92b0eb913401000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020daf3b60d374b19476461f97540498dcfa2eb7016238ec6b1d022f82fb60100007a7ae65b53cb988c2ec92d2384996713821d5645ffe61c9acea60da75cd5edfa1a944d5fae77031e9dbb050001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025751feffffff0200f2052a01000000160014ef2dceae02e35f8137de76768ae3345d99ca68860000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402202b3f946d6447f9bf17d00f3696cede7ee70b785495e5498274ee682a493befd5022045fc0bcf9332243168b5d35507175f9f374a8eba2336873885d12aada67ea5f601000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020457cc5f3c2e1a5655bc20e20e48d33e1b7ea68786c614032b5c518f0b6000000541f36942d82c6e7248275ff15c8933487fbe1819c67a9ecc0f4b70bb7e6cf672a944d5fae77031e8f39860001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025851feffffff0200f2052a0100000016001472a27906947c06d034b38ba2fa13c6391a4832790000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402202d62805ce60cbd60591f97f949b5ea5bd7e2307bcde343e6ea8394da92758e72022053a25370b0aa20da100189b7899a8f8675a0fdc60e38ece6b8a4f98edd94569e01000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020a2eb61eb4f3831baa3a3363e1b42db4462663f756f07423e81ed30322102000077224de7dea0f8d0ec22b1d2e2e255f0a987b96fe7200e1a2e6373f48a2f5b7894954d5fae77031e36867e0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025951feffffff0200f2052a01000000160014aa0ad9f26801258382e0734dceec03a4a75f60240000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402206fa0d59990eed369bd7375767c9a6c9369fae209152b8674e520da270605528c0220749eed3b12dbe3f583f505d21803e4aef59c8e24c5831951eafa4f15a8f92c4e01000120000000000000000000000000000000000000000000000000000000000000000000000000',
'00000020a868e8514be5e46dabd6a122132f423f36a43b716a40c394e2a8d063e1010000f4c6c717e99d800c699c25a2006a75a0c5c09f432a936f385e6fce139cdbd1a5e9964d5fae77031e7d026e0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025a51feffffff0200f2052a01000000160014aaa671c82b138e3b8f510cd801e5f2bd0aa305940000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022042309f4c3c7a1a2ac8c24f890f962df1c0086cec10be0868087cfc427520cb2702201dafee8911c269b7e786e242045bb57cef3f5b0f177010c6159abae42f646cc501000120000000000000000000000000000000000000000000000000000000000000000000000000',
]
class SignetBasicTest(AFCOTestFramework):
def set_test_params(self):
self.chain = "signet"
self.num_nodes = 6
self.setup_clean_chain = True
shared_args1 = ["-signetchallenge=51"] # OP_TRUE
shared_args2 = [] # default challenge
# we use the exact same challenge except we do it as a 2-of-2, which means it should fail
shared_args3 = ["-signetchallenge=522103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae"]
self.extra_args = [
shared_args1, shared_args1,
shared_args2, shared_args2,
shared_args3, shared_args3,
]
def run_test(self):
self.log.info("basic tests using OP_TRUE challenge")
self.log.info('getmininginfo')
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 0)
assert_equal(mining_info['chain'], 'signet')
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['networkhashps'], Decimal('0'))
assert_equal(mining_info['pooledtx'], 0)
self.nodes[0].generate(1)
self.log.info("pregenerated signet blocks check")
height = 0
for block in signet_blocks:
assert_equal(self.nodes[2].submitblock(block), None)
height += 1
assert_equal(self.nodes[2].getblockcount(), height)
self.log.info("pregenerated signet blocks check (incompatible solution)")
assert_equal(self.nodes[4].submitblock(signet_blocks[0]), 'bad-signet-blksig')
self.log.info("test that signet logs the network magic on node start")
with self.nodes[0].assert_debug_log(["Signet derived magic (message start)"]):
self.restart_node(0)
if __name__ == '__main__':
SignetBasicTest().main()
| 120.24
| 666
| 0.91018
|
4a01c3ea33b7d1ca60ce8d7ce1b23f4a5bd4dad6
| 1,416
|
py
|
Python
|
setup.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 6
|
2016-09-25T07:26:22.000Z
|
2022-03-16T06:30:05.000Z
|
setup.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 14
|
2016-10-04T00:02:20.000Z
|
2017-02-22T03:06:21.000Z
|
setup.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 5
|
2016-10-06T04:53:32.000Z
|
2019-12-08T13:48:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
from distutils.core import setup
from distutils.command.install import install
import nps
SCRIPTS = ['bin/nps', 'bin/arp_mon']
# Get the long description from the README file
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=nps.__NPS_MODULE_NAME__,
packages=['nps', 'nps/tools'],
scripts= SCRIPTS,
version=nps.__NPS_VERSION__,
description='network packet simulator',
long_description=long_description,
url='http://www.penatasecurity.com',
author='morenice',
author_email='hyounggu_lee@pentasecurity.com',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 28.897959
| 63
| 0.629237
|
4a01c4a41c0015c92fb5661e02c46885765dae8e
| 6,143
|
py
|
Python
|
cherami_client/publisher.py
|
fakeNetflix/uber-repo-cherami-client-python
|
05274549a39e29748f7649c3455707d32fe08a15
|
[
"MIT"
] | null | null | null |
cherami_client/publisher.py
|
fakeNetflix/uber-repo-cherami-client-python
|
05274549a39e29748f7649c3455707d32fe08a15
|
[
"MIT"
] | null | null | null |
cherami_client/publisher.py
|
fakeNetflix/uber-repo-cherami-client-python
|
05274549a39e29748f7649c3455707d32fe08a15
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import threading
from six.moves import queue
from cherami_client.lib import cherami, cherami_input, util
from cherami_client.publisher_thread import PublisherThread
from cherami_client.reconfigure_thread import ReconfigureThread
class Publisher(object):
def __init__(self,
logger,
path,
tchannel,
deployment_str,
headers,
timeout_seconds,
reconfigure_interval_seconds):
self.logger = logger
self.path = path
self.tchannel = tchannel
self.deployment_str = deployment_str
self.headers = headers
self.timeout_seconds = timeout_seconds
self.task_queue = queue.Queue()
self.workers = {}
self.reconfigure_signal = threading.Event()
self.reconfigure_interval_seconds = reconfigure_interval_seconds
self.reconfigure_thread = None
def _reconfigure(self):
self.logger.info('publisher reconfiguration started')
result = util.execute_frontend(
self.tchannel, self.deployment_str, self.headers, self.timeout_seconds, 'readPublisherOptions',
cherami.ReadPublisherOptionsRequest(
path=self.path,
))
hostAddresses = []
for host_protocol in result.hostProtocols:
if host_protocol.protocol == cherami.Protocol.TCHANNEL:
hostAddresses = host_protocol.hostAddresses
break
if not hostAddresses:
raise Exception("tchannel protocol is not supported by cherami server")
host_connection_set = set(map(lambda h: util.get_connection_key(h), hostAddresses))
existing_connection_set = set(self.workers.keys())
missing_connection_set = host_connection_set - existing_connection_set
extra_connection_set = existing_connection_set - host_connection_set
# clean up
for extra_conn in extra_connection_set:
self.logger.info('cleaning up connection %s', extra_conn)
self.workers[extra_conn].stop()
del self.workers[extra_conn]
# start up
for missing_conn in missing_connection_set:
self.logger.info('creating new connection %s', missing_conn)
worker = PublisherThread(
path=self.path,
task_queue=self.task_queue,
tchannel=self.tchannel,
hostport=missing_conn,
headers=self.headers,
timeout_seconds=self.timeout_seconds,
checksum_option=result.checksumOption
)
self.workers[missing_conn] = worker
worker.start()
self.logger.info('publisher reconfiguration succeeded')
# open the publisher. If succeed, we can start to publish messages
# Otherwise, we should retry opening (with backoff)
def open(self):
try:
self._reconfigure()
self.reconfigure_thread = ReconfigureThread(
interval_seconds=self.reconfigure_interval_seconds,
reconfigure_signal=self.reconfigure_signal,
reconfigure_func=self._reconfigure,
logger=self.logger,
)
self.reconfigure_thread.start()
except Exception as e:
self.logger.exception('Failed to open publisher: %s', e)
self.close()
raise e
# close the publisher
def close(self):
if self.reconfigure_thread:
self.reconfigure_thread.stop()
for worker in self.workers.itervalues():
worker.stop()
# publish a message. Returns an ack(type is cherami.PutMessageAck)
# the Status field of the ack indicates whether the publish was successful or not
# id: an identifier client can use to identify messages \
# (cherami doesn't care about this field but just pass through)
# data: message payload
# user context: user specified context to pass through
def publish(self, id, data, userContext={}):
done_signal = threading.Event()
result = []
def done_callback(r):
result.append(r)
done_signal.set()
# publish and later on wait
self.publish_async(id, data, done_callback, userContext)
done = done_signal.wait(self.timeout_seconds)
if not done:
return util.create_timeout_message_ack(id)
if len(result) == 0:
return util.create_failed_message_ack(id, 'unexpected: callback does not carry result')
return result[0]
# asynchronously publish a message.
# A callback function needs to be provided(it expects a cherami.PutMessageAck object as parameter)
def publish_async(self, id, data, callback, userContext={}):
msg = cherami_input.PutMessage(
id=id,
delayMessageInSeconds=0,
data=data,
userContext=userContext
)
self.task_queue.put((msg, callback))
| 39.88961
| 107
| 0.659775
|
4a01c4e24baac464ffdbd33dfa73231212f528ba
| 4,780
|
py
|
Python
|
bokeh/_testing/util/filesystem.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 1
|
2020-03-21T04:11:51.000Z
|
2020-03-21T04:11:51.000Z
|
bokeh/_testing/util/filesystem.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 2
|
2021-05-08T11:43:21.000Z
|
2021-05-10T19:16:43.000Z
|
bokeh/_testing/util/filesystem.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide functions for manipulating files and directories in tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import codecs
import errno
import os
import shutil
import sys
import tempfile
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'makedirs_ok_if_exists',
'TmpDir',
'with_directory_contents',
'with_temporary_file',
'WorkingDir',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def makedirs_ok_if_exists(path):
'''
'''
try:
os.makedirs(path)
except IOError as e: # pragma: no cover (py3 only)
if e.errno != errno.EEXIST:
raise e
except OSError as e: # pragma: no cover (py2 only)
if e.errno != errno.EEXIST:
raise e
return path
class TmpDir(object):
'''
'''
def __init__(self, prefix):
self._dir = tempfile.mkdtemp(prefix=prefix, dir=_LOCAL_TMP)
def __exit__(self, type, value, traceback):
try:
shutil.rmtree(path=self._dir)
except Exception as e:
# prefer original exception to rmtree exception
if value is None:
print("Exception cleaning up TmpDir %s: %s" % (self._dir, str(e)), file=sys.stderr)
raise e
else:
print("Failed to clean up TmpDir %s: %s" % (self._dir, str(e)), file=sys.stderr)
raise value
def __enter__(self):
return self._dir
def with_directory_contents(contents, func):
'''
'''
with (TmpDir(prefix="test-")) as dirname:
for filename, file_content in contents.items():
path = os.path.join(dirname, filename)
if file_content is None:
# make a directory
makedirs_ok_if_exists(path)
else:
makedirs_ok_if_exists(os.path.dirname(path))
with codecs.open(path, 'w', 'utf-8') as f:
f.write(file_content)
return func(os.path.realpath(dirname))
def with_file_contents(contents, func, dir=None, suffix=''):
'''
'''
def with_file_object(f):
f.write(contents.encode("UTF-8"))
f.flush()
# Windows will get mad if we try to rename it without closing,
# and some users of with_file_contents want to rename it.
f.close()
func(f.name)
with_temporary_file(with_file_object, dir=dir, suffix=suffix)
def with_temporary_file(func, dir=None, suffix=''):
'''
'''
if dir is None:
dir = _LOCAL_TMP
import tempfile
# Windows throws a permission denied if we use delete=True for
# auto-delete, and then try to open the file again ourselves
# with f.name. So we manually delete in the finally block
# below.
f = tempfile.NamedTemporaryFile(dir=dir, delete=False, suffix=suffix)
try:
func(f)
finally:
f.close()
os.remove(f.name)
class WorkingDir(object):
'''
'''
def __init__(self, pwd):
self._new = pwd
self._old = os.getcwd()
def __exit__(self, type, value, traceback):
os.chdir(self._old)
def __enter__(self):
os.chdir(self._new)
return self._new
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_LOCAL_TMP = os.path.abspath("./build/tmp")
makedirs_ok_if_exists(_LOCAL_TMP)
| 29.506173
| 99
| 0.449582
|
4a01c568bfff51604ce570b79e0f232db1c1cad5
| 1,797
|
py
|
Python
|
clustergrammer/proc_df_labels.py
|
scvannost/clustergrammer-py
|
6e5987694e52c5cabfd25a90f0bc628d95c09140
|
[
"MIT"
] | 13
|
2017-03-30T18:03:06.000Z
|
2021-12-09T17:36:12.000Z
|
clustergrammer/proc_df_labels.py
|
scvannost/clustergrammer-py
|
6e5987694e52c5cabfd25a90f0bc628d95c09140
|
[
"MIT"
] | 14
|
2016-11-06T00:57:23.000Z
|
2022-03-25T00:51:07.000Z
|
clustergrammer/proc_df_labels.py
|
scvannost/clustergrammer-py
|
6e5987694e52c5cabfd25a90f0bc628d95c09140
|
[
"MIT"
] | 13
|
2016-11-05T22:21:00.000Z
|
2021-05-05T14:52:51.000Z
|
def main(df):
'''
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
'''
import numpy as np
from ast import literal_eval as make_tuple
test = {}
test['row'] = df['mat'].index.tolist()
test['col'] = df['mat'].columns.tolist()
# if type( test_row ) is not str and type( test_row ) is not tuple:
found_tuple = {}
found_number = {}
for inst_rc in ['row','col']:
inst_name = test[inst_rc][0]
found_tuple[inst_rc] = False
found_number[inst_rc] = False
if type(inst_name) != tuple:
if type(inst_name) is int or type(inst_name) is float or type(inst_name) is np.int64:
found_number[inst_rc] = True
else:
check_open = inst_name[0]
check_comma = inst_name.find(',')
check_close = inst_name[-1]
if check_open == '(' and check_close == ')' and check_comma > 0 \
and check_comma < len(inst_name):
found_tuple[inst_rc] = True
# convert to tuple if necessary
#################################################
if found_tuple['row']:
row_names = df['mat'].index.tolist()
row_names = [make_tuple(x) for x in row_names]
df['mat'].index = row_names
if found_tuple['col']:
col_names = df['mat'].columns.tolist()
col_names = [make_tuple(x) for x in col_names]
df['mat'].columns = col_names
# convert numbers to string if necessary
#################################################
if found_number['row']:
row_names = df['mat'].index.tolist()
row_names = [str(x) for x in row_names]
df['mat'].index = row_names
if found_number['col']:
col_names = df['mat'].columns.tolist()
col_names = [str(x) for x in col_names]
df['mat'].columns = col_names
return df
| 28.983871
| 91
| 0.597663
|
4a01c6645cdd4827e46964019e573b6cf25a2107
| 1,013
|
py
|
Python
|
from_python_community/is_happy_ip.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
from_python_community/is_happy_ip.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
from_python_community/is_happy_ip.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
# Условие:
# Найти «счастливый» ip. Он считается счастливым, если сумма двух байтов с левой стороны равняются сумме двух байтов с правой стороны.
# На вход функции всегда идёт строка с ipv4 адресом.
# Пример:
# is_happy_ip("255.255.255.255") -> True
# is_happy_ip("0.0.0.1") -> False
# is_happy_ip("101.78.170.9") -> True
import unittest
def is_happy_ip(ip: str) -> bool:
list_num = [int(i) for i in ip.split('.')]
if len(list_num) < 4:
return False
if sum(list_num[0:2]) == sum(list_num[2:4]):
return True
return False
class TestIsHappyIp(unittest.TestCase):
def test_one(self):
""" Should return True """
self.assertEqual(True, is_happy_ip('255.255.255.255'))
self.assertEqual(True, is_happy_ip('101.78.170.9'))
def test_two(self):
""" Should return False """
self.assertEqual(False, is_happy_ip('0.0.0.1'))
self.assertEqual(False, is_happy_ip('1.2.3'))
if __name__ == '__main__':
unittest.main()
| 28.138889
| 135
| 0.63771
|
4a01c70ad20038568712260355c5c0200d7d6187
| 379
|
py
|
Python
|
materials/sp20/hw/hw09/tests/q2_4.py
|
ds-modules/Deepnote-demo
|
548c12ced6cae774ecd0036aa1e8bb833af6472c
|
[
"BSD-3-Clause"
] | null | null | null |
materials/sp20/hw/hw09/tests/q2_4.py
|
ds-modules/Deepnote-demo
|
548c12ced6cae774ecd0036aa1e8bb833af6472c
|
[
"BSD-3-Clause"
] | null | null | null |
materials/sp20/hw/hw09/tests/q2_4.py
|
ds-modules/Deepnote-demo
|
548c12ced6cae774ecd0036aa1e8bb833af6472c
|
[
"BSD-3-Clause"
] | null | null | null |
test = {
'name': 'q2_4',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> 118 < np.mean(bootstrap_mean_based_estimates) < 126
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| 16.478261
| 65
| 0.361478
|
4a01ca28d74407355dcc04564275e7698bfa9f2e
| 375
|
py
|
Python
|
myssite/polls/urls.py
|
hotai1806/web-server-django
|
47ff074bce0a192749cb9b246bb700b97cc194d3
|
[
"Unlicense"
] | null | null | null |
myssite/polls/urls.py
|
hotai1806/web-server-django
|
47ff074bce0a192749cb9b246bb700b97cc194d3
|
[
"Unlicense"
] | null | null | null |
myssite/polls/urls.py
|
hotai1806/web-server-django
|
47ff074bce0a192749cb9b246bb700b97cc194d3
|
[
"Unlicense"
] | null | null | null |
from django.urls import path
from . import views
app_name = "polls"
urlpatterns = [
# # ex: /polls/5/vote/
path('', views.IndexView.as_view(), name='index'),
path('<int:question_id>/vote/', views.vote, name='vote'),
path('<int:pk>',views.DetailView.as_view(),name ='detail'),
path('<int:pk>/results/',views.ResultsView.as_view(),name ='results'),
]
| 28.846154
| 74
| 0.637333
|
4a01ca2f25f5df0f4142049dae8c84c0f786d873
| 20,232
|
py
|
Python
|
fewshot_re_kit/multiadversarial_framework.py
|
snji-khjuria/RelationClassificationFewShotModels
|
a5047f44a57a81ab3281bf1290fa149a4c456486
|
[
"MIT"
] | null | null | null |
fewshot_re_kit/multiadversarial_framework.py
|
snji-khjuria/RelationClassificationFewShotModels
|
a5047f44a57a81ab3281bf1290fa149a4c456486
|
[
"MIT"
] | null | null | null |
fewshot_re_kit/multiadversarial_framework.py
|
snji-khjuria/RelationClassificationFewShotModels
|
a5047f44a57a81ab3281bf1290fa149a4c456486
|
[
"MIT"
] | null | null | null |
import os
import sklearn.metrics
import numpy as np
import sys
import time
from . import sentence_encoder
from . import data_loader
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
from itertools import chain
from numpy.random import uniform
class FewShotAdversarialREModel(nn.Module):
def __init__(self):
'''
sentence_encoder: Sentence encoder
You need to set self.cost as your own loss function.
'''
nn.Module.__init__(self)
#self.cost = nn.NLLLoss()
self.cost = nn.CrossEntropyLoss()
#self.cost = nn.NLLLoss()
def forward(self, support, query, N, K, Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
return: logits, pred
'''
raise NotImplementedError
def disc_loss(self, logits, label):
N = logits.size(-1)
return self.cost(logits.view(-1, N), label.view(-1))
def gen_loss(self, logits, label):
N = logits.size(-1)
return -1*self.cost(logits.view(-1, N), label.view(-1))
def accuracy(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Accuracy] (A single value)
'''
return torch.mean((pred.view(-1) == label.view(-1)).type(torch.FloatTensor))
class FewShotAdversarialREFramework:
def __init__(self, train_data_loader, val_data_loader, test_data_loader):
'''
train_data_loader: DataLoader for training.
val_data_loader: DataLoader for validating.
test_data_loader: DataLoader for testing.
'''
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
def __load_model__(self, ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def item(self, x):
'''
PyTorch before and after 0.4
'''
torch_version = torch.__version__.split('.')
if int(torch_version[0]) == 0 and int(torch_version[1]) < 4:
return x[0]
else:
return x.item()
def create_task_stats(self, support, B, N, K, D):
support = support.view(B, 1, N * K, D)
max_pool_op = nn.AvgPool2d((K, 5), (K, 2))
support = max_pool_op(support)
task_stats = support.view(B, -1)
noise_vector = self.noise(B)
gen_input = torch.cat([task_stats, noise_vector], 1)
return gen_input
def noise(self, size):
n = torch.FloatTensor(size, 200).normal_().cuda()
# n = Variable(torch.cuda.randn(size, 200))
return n
def produce_query_set(self, generator, support, N, K, D):
support = support.view(-1, N, K, D)
B, _, _, _ = support.size()
gen_input = self.create_task_stats(support, B, N, K, D)
gen_output = generator(gen_input)
gen_output = gen_output.view(B, N*K, D)
return gen_output
def augment_support_set(self, generator, support, N, K, D):
support = support.view(-1, N, K, D)
#print("Support is ", support.size())
support_saved = support
B, _, _, _ = support.size()
gen_input = self.create_task_stats(support, B, N, K, D)
gen_output = generator(gen_input)
gen_output = gen_output.view(B, N, K, D)
resulting_support = torch.cat([support_saved, gen_output], 1)
resulting_support = resulting_support.view(-1, D)
return resulting_support
def train_disc(self, sentence_encoder, support, query, gen_model, disc_model, N_for_train, K, Q):
encoded_support = sentence_encoder(support)
_, D = encoded_support.size()
#print("encoded support set size is ", encoded_support.size())
#import sys
#sys.exit(1)
augmented_support = self.augment_support_set(gen_model, encoded_support, N_for_train, K, D)
encoded_query = sentence_encoder(query)
# print("encoded support set is ", encoded_support.size())
# print("augmented support set is ", augmented_support.size())
# print("encoded query is ", encoded_query.size())
# print("Batch size is ", B)
# print("size K is ", K)
# print("Dim is ", D)
# print("N before is ", N_for_train)
N_train = N_for_train*2
N_queries = N_for_train * Q
# print("training with classes ", N_train)
# print("training with queries ", N_queries)
logits, pred = disc_model(augmented_support, encoded_query, N_train, K, N_queries, True)
return logits, pred
# print("logits size is ", logits.size())
def train_gen(self, sentence_encoder, support, query, gen_model, disc_model, N_for_train, K):
gen_support_encoded = sentence_encoder(support)
_, D = gen_support_encoded.size()
augmented_support = self.augment_support_set(gen_model, gen_support_encoded, N_for_train, K, D)
fake_queries = self.produce_query_set(gen_model, gen_support_encoded, N_for_train, K, D)
gen_logits, gen_pred = disc_model(augmented_support, fake_queries, N_for_train*2, N_for_train*K, K, True)
return gen_logits, gen_pred
def train2(self, disc_model, gen_model, sentence_encoder,
model_name,
B, N_for_train, N_for_eval, K, Q, disc_learning_rate,
ckpt_dir='./checkpoint',
test_result_dir='./test_result',
learning_rate=1e-1,
lr_step_size=20000,
weight_decay=1e-5,
train_iter=30000,
val_iter=100,
val_step=200,
test_iter=3000,
gen_learning_rate=1e-6,
cuda=True,
pretrain_model=None,
optimizer=optim.SGD):
'''
model: a FewShotREModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
test_result_dir: Directory of test results
learning_rate: Initial learning rate
lr_step_size: Decay learning rate every lr_step_size steps
weight_decay: Rate of decaying weight
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
test_iter: Num of iterations of testing
cuda: Use CUDA or not
pretrain_model: Pre-trained checkpoint path
'''
for i in range(100):
# learning_rate = 10** uniform(-6, -3)
# Init
disc_parameters = chain(disc_model.parameters(), sentence_encoder.parameters())
disc_parameters_to_optimize = filter(lambda x: x.requires_grad, disc_parameters)
gen_parameters_to_optimize = filter(lambda x: x.requires_grad, gen_model.parameters())
disc_optimizer = optim.Adam(disc_parameters_to_optimize, disc_learning_rate)
gen_optimizer = optim.Adam(gen_parameters_to_optimize, gen_learning_rate)
start_iter = 0
if cuda:
disc_model = disc_model.cuda()
gen_model = gen_model.cuda()
disc_model.train()
gen_model.train()
# Training
best_acc = 0
not_best_count = 0 # Stop training after several epochs without improvement.
# iter_loss = 0.0
# iter_right = 0.0
iter_sample = 0.0
disc_iter_loss = 0.0
disc_iter_right = 0.0
gen_iter_loss = 0.0
gen_iter_right = 0.0
for it in range(start_iter, start_iter + train_iter):
support, query, label_disc = self.train_data_loader.next_batch(B, N_for_train, K, Q)
logits_disc, pred_disc = self.train_disc(sentence_encoder, support, query, gen_model, disc_model,
N_for_train, K, Q)
disc_loss = disc_model.disc_loss(logits_disc, label_disc)
disc_right = disc_model.accuracy(pred_disc, label_disc)
disc_optimizer.zero_grad()
disc_loss.backward()
disc_optimizer.step()
disc_iter_loss += self.item(disc_loss.data)
disc_iter_right += self.item(disc_right.data)
iter_sample += 1
gen_support, gen_query, label_gen = self.train_data_loader.next_batch(B, N_for_train, K, Q)
# label_gen = produce_labels_for_generator(B, K)
label_gen = torch.ones((B, K)).new_full((B, K), N_for_train, dtype=torch.int64).cuda()
logits_gen, pred_gen = self.train_gen(sentence_encoder, support, query, gen_model, disc_model,
N_for_train,
K)
gen_loss = gen_model.gen_loss(logits_gen, label_gen)
gen_right = gen_model.accuracy(pred_gen, label_gen)
gen_optimizer.zero_grad()
gen_loss.backward()
gen_optimizer.step()
gen_iter_loss += self.item(gen_loss.data)
gen_iter_right += self.item(gen_right.data)
if it >= 400:
break
print('learning_rate: {} | disc loss: {:f}, gen loss: {:f} accuracy: {:f}%'.format(disc_learning_rate,
disc_iter_loss / iter_sample,
gen_iter_loss / iter_sample,
100 * disc_iter_right / iter_sample) + '\r')
return
#0.00044487909314557
def train(self, disc_model, gen_model, sentence_encoder,
model_name,
B, N_for_train, N_for_eval, K, Q,
ckpt_dir='./checkpoint',
test_result_dir='./test_result',
learning_rate=1e-1,
lr_step_size=30000,
weight_decay=1e-5,
train_iter=20000,
val_iter=1000,
val_step=300,
test_iter=3000,
disc_learning_rate=1e-4,
gen_learning_rate=1e-12,
cuda=True,
pretrain_model=None,
optimizer=optim.SGD):
'''
model: a FewShotREModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
test_result_dir: Directory of test results
learning_rate: Initial learning rate
lr_step_size: Decay learning rate every lr_step_size steps
weight_decay: Rate of decaying weight
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
test_iter: Num of iterations of testing
cuda: Use CUDA or not
pretrain_model: Pre-trained checkpoint path
'''
print("Start training...")
# Init
disc_parameters = chain(disc_model.parameters(), sentence_encoder.parameters())
disc_parameters_to_optimize = filter(lambda x: x.requires_grad, disc_parameters)
gen_parameters_to_optimize = filter(lambda x: x.requires_grad, gen_model.parameters())
disc_optimizer = optim.Adam(disc_parameters_to_optimize, disc_learning_rate, amsgrad=True,
weight_decay=weight_decay)
gen_optimizer = optim.Adam(gen_parameters_to_optimize, gen_learning_rate, amsgrad=True,
weight_decay=weight_decay)
start_iter = 0
if cuda:
disc_model = disc_model.cuda()
gen_model = gen_model.cuda()
disc_model.train()
gen_model.train()
# Training
best_acc = 0
not_best_count = 0 # Stop training after several epochs without improvement.
# iter_loss = 0.0
# iter_right = 0.0
iter_sample = 0.0
disc_iter_loss = 0.0
disc_iter_right = 0.0
gen_iter_loss = 0.0
gen_iter_right = 0.0
with autograd.detect_anomaly():
for it in range(start_iter, start_iter + train_iter):
support, query, label_disc = self.train_data_loader.next_batch(B, N_for_train, K, Q)
logits_disc, pred_disc = self.train_disc(sentence_encoder, support, query, gen_model, disc_model,
N_for_train, K, Q)
disc_loss = disc_model.disc_loss(logits_disc, label_disc)
disc_right = disc_model.accuracy(pred_disc, label_disc)
disc_optimizer.zero_grad()
disc_loss.backward()
nn.utils.clip_grad_norm(disc_parameters_to_optimize, 10)
disc_optimizer.step()
disc_iter_loss += self.item(disc_loss.data)
disc_iter_right += self.item(disc_right.data)
iter_sample += 1
gen_support, gen_query, label_gen = self.train_data_loader.next_batch(B, N_for_train, K, Q)
# label_gen = produce_labels_for_generator(B, K)
label_gen = torch.ones((B, N_for_train*K)).new_full((B, N_for_train*K), N_for_train, dtype=torch.int64).cuda()
logits_gen, pred_gen = self.train_gen(sentence_encoder, support, query, gen_model, disc_model,
N_for_train, K)
gen_loss = gen_model.gen_loss(logits_gen, label_gen)
gen_right = gen_model.accuracy(pred_gen, label_gen)
gen_optimizer.zero_grad()
gen_loss.backward()
nn.utils.clip_grad_norm(gen_parameters_to_optimize, 10)
gen_optimizer.step()
gen_iter_loss += self.item(gen_loss.data)
gen_iter_right += self.item(gen_right.data)
sys.stdout.write('step: {} | disc loss: {:f}, gen loss: {:f} accuracy: {:f}%'.format(it + 1,
disc_iter_loss / iter_sample,
gen_iter_loss / iter_sample,
100 * disc_iter_right / iter_sample) + '\r')
# sys.stdout.write('step: {0:4} | disc loss: {1:2.6f}, gen loss: {1:2.6f} accuracy: {2:3.2f}%'.format(it + 1, disc_iter_loss / iter_sample, gen_iter_loss / iter_sample,
# 100 * disc_iter_right / iter_sample) + '\r')
sys.stdout.flush()
if it % val_step == 0:
disc_iter_loss = 0.
disc_iter_right = 0.
gen_iter_loss = 0.
gen_iter_right = 0.
iter_sample = 0.
if (it + 1) % val_step == 0:
acc = self.do_eval(disc_model, sentence_encoder, B, N_for_eval, K, Q, val_iter)
testing_acc = self.do_eval(disc_model, sentence_encoder, B, N_for_eval, K, Q, val_iter, True)
print("Validation acc: ", acc)
print("Testing acc: ", testing_acc)
disc_model.train()
if acc > best_acc:
print('Best checkpoint')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_path = os.path.join(ckpt_dir, model_name + ".pth.tar")
torch.save({'state_dict': disc_model.state_dict()}, save_path)
best_acc = acc
print("\n####################\n")
print("Finish training " + model_name)
test_acc = self.eval(disc_model, B, N_for_eval, K, Q, test_iter,
ckpt=os.path.join(ckpt_dir, model_name + '.pth.tar'))
print("Test accuracy: {}".format(test_acc))
def do_eval(self,
model, sentence_encoder,
B, N, K, Q,
eval_iter, testing=False,
ckpt=None):
'''
model: a FewShotREModel instance
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if testing==False:
eval_dataset = self.val_data_loader
else:
eval_dataset = self.test_data_loader
iter_right = 0.0
iter_sample = 0.0
for it in range(eval_iter):
support, query, label = eval_dataset.next_batch(B, N, K, Q)
support = sentence_encoder(support)
query = sentence_encoder(query)
# print("support set is ", support.size())
# print("query set is ", query.size())
logits, pred = model(support, query, N, K, N * Q)
right = model.accuracy(pred, label)
iter_right += self.item(right.data)
iter_sample += 1
sys.stdout.write(
'[EVAL] step: {0:4} | accuracy: {1:3.2f}%'.format(it + 1, 100 * iter_right / iter_sample) + '\r')
sys.stdout.flush()
print("")
return iter_right / iter_sample
def eval(self,
model, sentence_encoder,
B, N, K, Q,
eval_iter,
ckpt=None):
'''
model: a FewShotREModel instance
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if ckpt is None:
eval_dataset = self.val_data_loader
else:
checkpoint = self.__load_model__(ckpt)
model.load_state_dict(checkpoint['state_dict'])
eval_dataset = self.test_data_loader
iter_right = 0.0
iter_sample = 0.0
for it in range(eval_iter):
support, query, label = eval_dataset.next_batch(B, N, K, Q)
support = sentence_encoder(support)
query = sentence_encoder(query)
# print("support set is ", support.size())
# print("query set is ", query.size())
logits, pred = model(support, query, N, K, N * Q)
right = model.accuracy(pred, label)
iter_right += self.item(right.data)
iter_sample += 1
sys.stdout.write(
'[EVAL] step: {0:4} | accuracy: {1:3.2f}%'.format(it + 1, 100 * iter_right / iter_sample) + '\r')
sys.stdout.flush()
print("")
return iter_right / iter_sample
| 44.271335
| 184
| 0.564304
|
4a01cb19e48cb06ea6831681ce677d691c2fee27
| 208
|
py
|
Python
|
sar_objects/__init__.py
|
goldmanm/atmospheric-sar-comparison
|
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
|
[
"MIT"
] | null | null | null |
sar_objects/__init__.py
|
goldmanm/atmospheric-sar-comparison
|
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
|
[
"MIT"
] | null | null | null |
sar_objects/__init__.py
|
goldmanm/atmospheric-sar-comparison
|
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from atkinson2007 import sar as sar_a
from vereecken2009 import sar as sar_v
from orlando2003 import sar as sar_o
from mereau2000 import sar as sar_m
sars = [sar_a,sar_v,sar_o,sar_m]
| 26
| 38
| 0.769231
|
4a01cc86e7227bac8d7a7762643ec704b0b8e337
| 7,223
|
py
|
Python
|
Python scripts/Rookie/pathing.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
Python scripts/Rookie/pathing.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
Python scripts/Rookie/pathing.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
from pathlib import Path
myFiles = ['accounts.txt', 'details.csv', 'invite.docx']
for filename in myFiles:
print(Path(r'C:\Users\Al', filename))
#C:\Users\Al\accounts.txt
#C:\Users\Al\details.csv
#C:\Users\Al\invite.docx
Path('spam', 'bacon', 'eggs')
#WindowsPath('spam/bacon/eggs')
str(Path('spam', 'bacon', 'eggs'))
'spam\\bacon\\eggs'
Path('spam') / 'bacon' / 'eggs'
#WindowsPath('spam/bacon/eggs')
Path('spam') / Path('bacon/eggs')
#WindowsPath('spam/bacon/eggs')
Path('spam') / Path('bacon', 'eggs')
#WindowsPath('spam/bacon/eggs')
homeFolder = Path('C:/Users/Al')
subFolder = Path('spam')
homeFolder / subFolder
WindowsPath('C:/Users/Al/spam')
str(homeFolder / subFolder)
'C:\\Users\\Al\\spam'
#The Current Working Directory
import os
Path.cwd()
#WindowsPath('C:/Users/Al/AppData/Local/Programs/Python/Python37')'
os.chdir('C:\\Windows\\System32')
Path.cwd()
#WindowsPath('C:/Windows/System32')
#The Home Directory
Path.home()
#WindowsPath('C:/Users/Al')
#Creating New Folders Using the os.makedirs() Function
import os
os.makedirs('C:\\delicious\\walnut\\waffles')
Path(r'C:\Users\Al\spam').mkdir()
# Handling Absolute and Relative Paths
Path.cwd()
WindowsPath('C:/Users/Al/AppData/Local/Programs/Python/Python37')
Path.cwd().is_absolute()
True
Path('spam/bacon/eggs').is_absolute()
False
os.path.abspath('.')
'C:\\Users\\Al\\AppData\\Local\\Programs\\Python\\Python37'
os.path.abspath('.\\Scripts')
'C:\\Users\\Al\\AppData\\Local\\Programs\\Python\\Python37\\Scripts'
os.path.isabs('.')
False
os.path.isabs(os.path.abspath('.'))
True
os.path.relpath('C:\\Windows', 'C:\\')
'Windows'
os.path.relpath('C:\\Windows', 'C:\\spam\\eggs')
'..\\..\\Windows'
#Getting the Parts of a File Path
p = Path('C:/Users/Al/spam.txt')
p.anchor
'C:\\'
p.parent # This is a Path object, not a string.
WindowsPath('C:/Users/Al')
p.name
'spam.txt'
p.stem
'spam'
p.suffix
'.txt'
p.drive
'C:'
##The parents attribute (which is different from the parent attribute) evaluates to the ancestor folders of a Path object with an integer index:
Path.cwd()
#WindowsPath('C:/Users/Al/AppData/Local/Programs/Python/Python37')
Path.cwd().parents[0]
#WindowsPath('C:/Users/Al/AppData/Local/Programs/Python')
Path.cwd().parents[1]
#WindowsPath('C:/Users/Al/AppData/Local/Programs')
Path.cwd().parents[2]
#WindowsPath('C:/Users/Al/AppData/Local')
Path.cwd().parents[3]
#WindowsPath('C:/Users/Al/AppData')
Path.cwd().parents[4]
#WindowsPath('C:/Users/Al')
Path.cwd().parents[5]
#WindowsPath('C:/Users')
Path.cwd().parents[6]
#WindowsPath('C:/')
#The older os.path module also has similar functions for getting the different parts of a path written in a string value.
calcFilePath = 'C:\\Windows\\System32\\calc.exe'
os.path.basename(calcFilePath)
#'calc.exe'
os.path.dirname(calcFilePath)
#'C:\\Windows\\System32'
#If you need a path’s dir name and base name together, you can just call os.path.split() to get a tuple
calcFilePath = 'C:\\Windows\\System32\\calc.exe'
os.path.split(calcFilePath)
#('C:\\Windows\\System32', 'calc.exe')
'''
Also, note that os.path.split() does not take a file path and return a list of strings of each folder.
For that, use the split() string method and split on the string in os.sep. (Note that sep is in os, not os.path.)
The os.sep variable is set to the correct folder-separating slash for the computer running the program, '\\' on Windows
and '/' on macOS and Linux, and splitting on it will return a list of the individual folders.
'''
calcFilePath.split(os.sep)
#['C:', 'Windows', 'System32', 'calc.exe']
##Finding File Sizes and Folder Contents
'''
The os.path module provides functions for finding the size of a file in bytes and the files and folders inside a given folder:
Calling os.path.getsize(path) will return the size in bytes of the file in the path argument.
Calling os.listdir(path) will return a list of filename strings for each file in the path argument. (Note that this function is in the os module, not os.path.)
'''
os.path.getsize('C:\\Windows\\System32\\calc.exe')
#27648
os.listdir('C:\\Windows\\System32')
#['0409', '12520437.cpx', '12520850.cpx', '5U877.ax', 'aaclient.dll',
#--snip--
#'xwtpdui.dll', 'xwtpw32.dll', 'zh-CN', 'zh-HK', 'zh-TW', 'zipfldr.dll']
totalSize = 0
for filename in os.listdir('C:\\Windows\\System32'):
totalSize = totalSize + os.path.getsize(os.path.join('C:\\Windows\\System32', filename))
print(totalSize)
##Modifying a List of Files Using Glob Patterns
p = Path('C:/Users/Al/Desktop')
p.glob('*')
#<generator object Path.glob at 0x000002A6E389DED0>
#The asterisk (*) stands for “multiple of any characters,”
#so p.glob('*') returns a generator of all files in the path stored in p.
'''
list(p.glob('*')) # Make a list from the generator.
[WindowsPath('C:/Users/Al/Desktop/1.png'), WindowsPath('C:/Users/Al/
Desktop/22-ap.pdf'), WindowsPath('C:/Users/Al/Desktop/cat.jpg'),
--snip--
WindowsPath('C:/Users/Al/Desktop/zzz.txt')]
'''
list(p.glob('*.txt') # Lists all text files.
'''
[WindowsPath('C:/Users/Al/Desktop/foo.txt'),
--snip--
WindowsPath('C:/Users/Al/Desktop/zzz.txt')]
'''
#In contrast with the asterisk, the question mark (?) stands for any single character:
list(p.glob('project?.docx')
'''
[WindowsPath('C:/Users/Al/Desktop/project1.docx'), WindowsPath('C:/Users/Al/
Desktop/project2.docx'),
--snip--
WindowsPath('C:/Users/Al/Desktop/project9.docx')]
'''
list(p.glob('*.?x?') #will return files with any name and any three-character extension where the middle character is an 'x'.
'''
[WindowsPath('C:/Users/Al/Desktop/calc.exe'), WindowsPath('C:/Users/Al/
Desktop/foo.txt'),
--snip--
WindowsPath('C:/Users/Al/Desktop/zzz.txt')]
'''
p = Path('C:/Users/Al/Desktop')
for textFilePathObj in p.glob('*.txt'):
print(textFilePathObj) # Prints the Path object as a string.
# Do something with the text file.
# .....
'''
C:\Users\Al\Desktop\foo.txt
C:\Users\Al\Desktop\spam.txt
C:\Users\Al\Desktop\zzz.txt
'''
##Checking Path Validity
'''
Many Python functions will crash with an error if you supply them with a path that does not exist.
Luckily, Path objects have methods to check whether a given path exists and whether it is a file or folder.
Assuming that a variable p holds a Path object, you could expect the following:
Calling p.exists() returns True if the path exists or returns False if it doesn’t exist.
Calling p.is_file() returns True if the path exists and is a file, or returns False otherwise.
Calling p.is_dir() returns True if the path exists and is a directory, or returns False otherwise.
'''
#You can determine whether there is a DVD or flash drive currently attached to the computer by checking for it with the exists() method.
winDir.exists()
#True
winDir.is_dir()
#True
notExistsDir.exists()
#False
calcFile.is_file()
#True
calcFile.is_dir()
#False
winDir = Path('C:/Windows')
notExistsDir = Path('C:/This/Folder/Does/Not/Exist')
calcFile = Path('C:/Windows/System32/calc.exe')
| 29.125
| 164
| 0.684065
|
4a01cc8e250b59a0b35cb25434c70589866d9d5e
| 28,702
|
py
|
Python
|
sqlite_utils/cli.py
|
tomviner/sqlite-utils
|
367082e787101fb90901ef3214804ab23a92ce46
|
[
"Apache-2.0"
] | 1
|
2020-09-26T23:57:02.000Z
|
2020-09-26T23:57:02.000Z
|
sqlite_utils/cli.py
|
simonwiles/sqlite-utils
|
367082e787101fb90901ef3214804ab23a92ce46
|
[
"Apache-2.0"
] | null | null | null |
sqlite_utils/cli.py
|
simonwiles/sqlite-utils
|
367082e787101fb90901ef3214804ab23a92ce46
|
[
"Apache-2.0"
] | null | null | null |
import base64
import click
from click_default_group import DefaultGroup
from datetime import datetime
import hashlib
import pathlib
import sqlite_utils
from sqlite_utils.db import AlterError
import itertools
import json
import sys
import csv as csv_std
import tabulate
from .utils import sqlite3, decode_base64_values
VALID_COLUMN_TYPES = ("INTEGER", "TEXT", "FLOAT", "BLOB")
def output_options(fn):
for decorator in reversed(
(
click.option(
"--nl",
help="Output newline-delimited JSON",
is_flag=True,
default=False,
),
click.option(
"--arrays",
help="Output rows as arrays instead of objects",
is_flag=True,
default=False,
),
click.option("-c", "--csv", is_flag=True, help="Output CSV"),
click.option("--no-headers", is_flag=True, help="Omit CSV headers"),
click.option("-t", "--table", is_flag=True, help="Output as a table"),
click.option(
"-f",
"--fmt",
help="Table format - one of {}".format(
", ".join(tabulate.tabulate_formats)
),
default="simple",
),
click.option(
"--json-cols",
help="Detect JSON cols and output them as JSON, not escaped strings",
is_flag=True,
default=False,
),
)
):
fn = decorator(fn)
return fn
@click.group(cls=DefaultGroup, default="query", default_if_no_args=True)
@click.version_option()
def cli():
"Commands for interacting with a SQLite database"
pass
@cli.command()
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.option(
"--fts4", help="Just show FTS4 enabled tables", default=False, is_flag=True
)
@click.option(
"--fts5", help="Just show FTS5 enabled tables", default=False, is_flag=True
)
@click.option(
"--counts", help="Include row counts per table", default=False, is_flag=True
)
@output_options
@click.option(
"--columns",
help="Include list of columns for each table",
is_flag=True,
default=False,
)
@click.option(
"--schema",
help="Include schema for each table",
is_flag=True,
default=False,
)
def tables(
path,
fts4,
fts5,
counts,
nl,
arrays,
csv,
no_headers,
table,
fmt,
json_cols,
columns,
schema,
views=False,
):
"""List the tables in the database"""
db = sqlite_utils.Database(path)
headers = ["view" if views else "table"]
if counts:
headers.append("count")
if columns:
headers.append("columns")
if schema:
headers.append("schema")
def _iter():
if views:
items = db.view_names()
else:
items = db.table_names(fts4=fts4, fts5=fts5)
for name in items:
row = [name]
if counts:
row.append(db[name].count)
if columns:
cols = [c.name for c in db[name].columns]
if csv:
row.append("\n".join(cols))
else:
row.append(cols)
if schema:
row.append(db[name].schema)
yield row
if table:
print(tabulate.tabulate(_iter(), headers=headers, tablefmt=fmt))
elif csv:
writer = csv_std.writer(sys.stdout)
if not no_headers:
writer.writerow(headers)
for row in _iter():
writer.writerow(row)
else:
for line in output_rows(_iter(), headers, nl, arrays, json_cols):
click.echo(line)
@cli.command()
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.option(
"--counts", help="Include row counts per view", default=False, is_flag=True
)
@output_options
@click.option(
"--columns",
help="Include list of columns for each view",
is_flag=True,
default=False,
)
@click.option(
"--schema",
help="Include schema for each view",
is_flag=True,
default=False,
)
def views(
path,
counts,
nl,
arrays,
csv,
no_headers,
table,
fmt,
json_cols,
columns,
schema,
):
"""List the views in the database"""
tables.callback(
path=path,
fts4=False,
fts5=False,
counts=counts,
nl=nl,
arrays=arrays,
csv=csv,
no_headers=no_headers,
table=table,
fmt=fmt,
json_cols=json_cols,
columns=columns,
schema=schema,
views=True,
)
@cli.command()
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def vacuum(path):
"""Run VACUUM against the database"""
sqlite_utils.Database(path).vacuum()
@cli.command()
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("tables", nargs=-1)
@click.option("--no-vacuum", help="Don't run VACUUM", default=False, is_flag=True)
def optimize(path, tables, no_vacuum):
"""Optimize all FTS tables and then run VACUUM - should shrink the database file"""
db = sqlite_utils.Database(path)
if not tables:
tables = db.table_names(fts4=True) + db.table_names(fts5=True)
with db.conn:
for table in tables:
db[table].optimize()
if not no_vacuum:
db.vacuum()
@cli.command(name="rebuild-fts")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("tables", nargs=-1)
def rebuild_fts(path, tables):
"""Rebuild specific FTS tables, or all FTS tables if none are specified"""
db = sqlite_utils.Database(path)
if not tables:
tables = db.table_names(fts4=True) + db.table_names(fts5=True)
with db.conn:
for table in tables:
db[table].rebuild_fts()
@cli.command()
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def vacuum(path):
"""Run VACUUM against the database"""
sqlite_utils.Database(path).vacuum()
@cli.command(name="add-column")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("col_name")
@click.argument(
"col_type",
type=click.Choice(
["integer", "float", "blob", "text", "INTEGER", "FLOAT", "BLOB", "TEXT"]
),
required=False,
)
@click.option(
"--fk", type=str, required=False, help="Table to reference as a foreign key"
)
@click.option(
"--fk-col",
type=str,
required=False,
help="Referenced column on that foreign key table - if omitted will automatically use the primary key",
)
@click.option(
"--not-null-default",
type=str,
required=False,
help="Add NOT NULL DEFAULT 'TEXT' constraint",
)
def add_column(path, table, col_name, col_type, fk, fk_col, not_null_default):
"Add a column to the specified table"
db = sqlite_utils.Database(path)
db[table].add_column(
col_name, col_type, fk=fk, fk_col=fk_col, not_null_default=not_null_default
)
@cli.command(name="add-foreign-key")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("column")
@click.argument("other_table", required=False)
@click.argument("other_column", required=False)
def add_foreign_key(path, table, column, other_table, other_column):
"""
Add a new foreign key constraint to an existing table. Example usage:
$ sqlite-utils add-foreign-key my.db books author_id authors id
WARNING: Could corrupt your database! Back up your database file first.
"""
db = sqlite_utils.Database(path)
try:
db[table].add_foreign_key(column, other_table, other_column)
except AlterError as e:
raise click.ClickException(e)
@cli.command(name="index-foreign-keys")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def index_foreign_keys(path):
"""
Ensure every foreign key column has an index on it.
"""
db = sqlite_utils.Database(path)
db.index_foreign_keys()
@cli.command(name="create-index")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("column", nargs=-1, required=True)
@click.option("--name", help="Explicit name for the new index")
@click.option("--unique", help="Make this a unique index", default=False, is_flag=True)
@click.option(
"--if-not-exists",
help="Ignore if index already exists",
default=False,
is_flag=True,
)
def create_index(path, table, column, name, unique, if_not_exists):
"Add an index to the specified table covering the specified columns"
db = sqlite_utils.Database(path)
db[table].create_index(
column, index_name=name, unique=unique, if_not_exists=if_not_exists
)
@cli.command(name="enable-fts")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("column", nargs=-1, required=True)
@click.option("--fts4", help="Use FTS4", default=False, is_flag=True)
@click.option("--fts5", help="Use FTS5", default=False, is_flag=True)
@click.option("--tokenize", help="Tokenizer to use, e.g. porter")
@click.option(
"--create-triggers",
help="Create triggers to update the FTS tables when the parent table changes.",
default=False,
is_flag=True,
)
def enable_fts(path, table, column, fts4, fts5, tokenize, create_triggers):
"Enable FTS for specific table and columns"
fts_version = "FTS5"
if fts4 and fts5:
click.echo("Can only use one of --fts4 or --fts5", err=True)
return
elif fts4:
fts_version = "FTS4"
db = sqlite_utils.Database(path)
db[table].enable_fts(
column,
fts_version=fts_version,
tokenize=tokenize,
create_triggers=create_triggers,
)
@cli.command(name="populate-fts")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("column", nargs=-1, required=True)
def populate_fts(path, table, column):
"Re-populate FTS for specific table and columns"
db = sqlite_utils.Database(path)
db[table].populate_fts(column)
@cli.command(name="disable-fts")
@click.argument(
"path",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
def disable_fts(path, table):
"Disable FTS for specific table"
db = sqlite_utils.Database(path)
db[table].disable_fts()
@cli.command(name="enable-wal")
@click.argument(
"path",
nargs=-1,
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def enable_wal(path):
"Enable WAL for database files"
for path_ in path:
sqlite_utils.Database(path_).enable_wal()
@cli.command(name="disable-wal")
@click.argument(
"path",
nargs=-1,
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def disable_wal(path):
"Disable WAL for database files"
for path_ in path:
sqlite_utils.Database(path_).disable_wal()
def insert_upsert_options(fn):
for decorator in reversed(
(
click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
),
click.argument("table"),
click.argument("json_file", type=click.File(), required=True),
click.option(
"--pk", help="Columns to use as the primary key, e.g. id", multiple=True
),
click.option("--nl", is_flag=True, help="Expect newline-delimited JSON"),
click.option("-c", "--csv", is_flag=True, help="Expect CSV"),
click.option("--tsv", is_flag=True, help="Expect TSV"),
click.option(
"--batch-size", type=int, default=100, help="Commit every X records"
),
click.option(
"--alter",
is_flag=True,
help="Alter existing table to add any missing columns",
),
click.option(
"--not-null",
multiple=True,
help="Columns that should be created as NOT NULL",
),
click.option(
"--default",
multiple=True,
type=(str, str),
help="Default value that should be set for a column",
),
)
):
fn = decorator(fn)
return fn
def insert_upsert_implementation(
path,
table,
json_file,
pk,
nl,
csv,
tsv,
batch_size,
alter,
upsert,
ignore=False,
replace=False,
truncate=False,
not_null=None,
default=None,
):
db = sqlite_utils.Database(path)
if (nl + csv + tsv) >= 2:
raise click.ClickException("Use just one of --nl, --csv or --tsv")
if pk and len(pk) == 1:
pk = pk[0]
if csv or tsv:
dialect = "excel-tab" if tsv else "excel"
reader = csv_std.reader(json_file, dialect=dialect)
headers = next(reader)
docs = (dict(zip(headers, row)) for row in reader)
elif nl:
docs = (json.loads(line) for line in json_file)
else:
docs = json.load(json_file)
if isinstance(docs, dict):
docs = [docs]
extra_kwargs = {"ignore": ignore, "replace": replace, "truncate": truncate}
if not_null:
extra_kwargs["not_null"] = set(not_null)
if default:
extra_kwargs["defaults"] = dict(default)
if upsert:
extra_kwargs["upsert"] = upsert
# Apply {"$base64": true, ...} decoding, if needed
docs = (decode_base64_values(doc) for doc in docs)
db[table].insert_all(
docs, pk=pk, batch_size=batch_size, alter=alter, **extra_kwargs
)
@cli.command()
@insert_upsert_options
@click.option(
"--ignore", is_flag=True, default=False, help="Ignore records if pk already exists"
)
@click.option(
"--replace",
is_flag=True,
default=False,
help="Replace records if pk already exists",
)
@click.option(
"--truncate",
is_flag=True,
default=False,
help="Truncate table before inserting records, if table already exists",
)
def insert(
path,
table,
json_file,
pk,
nl,
csv,
tsv,
batch_size,
alter,
ignore,
replace,
truncate,
not_null,
default,
):
"""
Insert records from JSON file into a table, creating the table if it
does not already exist.
Input should be a JSON array of objects, unless --nl or --csv is used.
"""
insert_upsert_implementation(
path,
table,
json_file,
pk,
nl,
csv,
tsv,
batch_size,
alter=alter,
upsert=False,
ignore=ignore,
replace=replace,
truncate=truncate,
not_null=not_null,
default=default,
)
@cli.command()
@insert_upsert_options
def upsert(
path, table, json_file, pk, nl, csv, tsv, batch_size, alter, not_null, default
):
"""
Upsert records based on their primary key. Works like 'insert' but if
an incoming record has a primary key that matches an existing record
the existing record will be updated.
"""
insert_upsert_implementation(
path,
table,
json_file,
pk,
nl,
csv,
tsv,
batch_size,
alter=alter,
upsert=True,
not_null=not_null,
default=default,
)
@cli.command(name="create-table")
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument("columns", nargs=-1, required=True)
@click.option("--pk", help="Column to use as primary key")
@click.option(
"--not-null",
multiple=True,
help="Columns that should be created as NOT NULL",
)
@click.option(
"--default",
multiple=True,
type=(str, str),
help="Default value that should be set for a column",
)
@click.option(
"--fk",
multiple=True,
type=(str, str, str),
help="Column, other table, other column to set as a foreign key",
)
@click.option(
"--ignore",
is_flag=True,
help="If table already exists, do nothing",
)
@click.option(
"--replace",
is_flag=True,
help="If table already exists, replace it",
)
def create_table(path, table, columns, pk, not_null, default, fk, ignore, replace):
"Add an index to the specified table covering the specified columns"
db = sqlite_utils.Database(path)
if len(columns) % 2 == 1:
raise click.ClickException(
"columns must be an even number of 'name' 'type' pairs"
)
coltypes = {}
columns = list(columns)
while columns:
name = columns.pop(0)
ctype = columns.pop(0)
if ctype.upper() not in VALID_COLUMN_TYPES:
raise click.ClickException(
"column types must be one of {}".format(VALID_COLUMN_TYPES)
)
coltypes[name] = ctype.upper()
# Does table already exist?
if table in db.table_names():
if ignore:
return
elif replace:
db[table].drop()
else:
raise click.ClickException(
'Table "{}" already exists. Use --replace to delete and replace it.'.format(
table
)
)
db[table].create(
coltypes, pk=pk, not_null=not_null, defaults=dict(default), foreign_keys=fk
)
@cli.command(name="drop-table")
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
def drop_table(path, table):
"Drop the specified table"
db = sqlite_utils.Database(path)
if table in db.table_names():
db[table].drop()
else:
raise click.ClickException('Table "{}" does not exist'.format(table))
@cli.command(name="create-view")
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("view")
@click.argument("select")
@click.option(
"--ignore",
is_flag=True,
help="If view already exists, do nothing",
)
@click.option(
"--replace",
is_flag=True,
help="If view already exists, replace it",
)
def create_view(path, view, select, ignore, replace):
"Create a view for the provided SELECT query"
db = sqlite_utils.Database(path)
# Does view already exist?
if view in db.view_names():
if ignore:
return
elif replace:
db[view].drop()
else:
raise click.ClickException(
'View "{}" already exists. Use --replace to delete and replace it.'.format(
view
)
)
db.create_view(view, select)
@cli.command(name="drop-view")
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("view")
def drop_view(path, view):
"Drop the specified view"
db = sqlite_utils.Database(path)
if view in db.view_names():
db[view].drop()
else:
raise click.ClickException('View "{}" does not exist'.format(view))
@cli.command()
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("sql")
@output_options
@click.option("-r", "--raw", is_flag=True, help="Raw output, first column of first row")
@click.option(
"-p",
"--param",
multiple=True,
type=(str, str),
help="Named :parameters for SQL query",
)
@click.option(
"--load-extension",
multiple=True,
help="SQLite extensions to load",
)
def query(
path,
sql,
nl,
arrays,
csv,
no_headers,
table,
fmt,
json_cols,
raw,
param,
load_extension,
):
"Execute SQL query and return the results as JSON"
db = sqlite_utils.Database(path)
if load_extension:
db.conn.enable_load_extension(True)
for ext in load_extension:
db.conn.load_extension(ext)
with db.conn:
cursor = db.execute(sql, dict(param))
if cursor.description is None:
# This was an update/insert
headers = ["rows_affected"]
cursor = [[cursor.rowcount]]
else:
headers = [c[0] for c in cursor.description]
if raw:
data = cursor.fetchone()[0]
if isinstance(data, bytes):
sys.stdout.buffer.write(data)
else:
sys.stdout.write(str(data))
elif table:
print(tabulate.tabulate(list(cursor), headers=headers, tablefmt=fmt))
elif csv:
writer = csv_std.writer(sys.stdout)
if not no_headers:
writer.writerow(headers)
for row in cursor:
writer.writerow(row)
else:
for line in output_rows(cursor, headers, nl, arrays, json_cols):
click.echo(line)
@cli.command()
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("dbtable")
@output_options
@click.pass_context
def rows(ctx, path, dbtable, nl, arrays, csv, no_headers, table, fmt, json_cols):
"Output all rows in the specified table"
ctx.invoke(
query,
path=path,
sql="select * from [{}]".format(dbtable),
nl=nl,
arrays=arrays,
csv=csv,
no_headers=no_headers,
table=table,
fmt=fmt,
json_cols=json_cols,
)
@cli.command(name="insert-files")
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument("table")
@click.argument(
"file_or_dir",
nargs=-1,
required=True,
type=click.Path(file_okay=True, dir_okay=True, allow_dash=True),
)
@click.option(
"-c",
"--column",
type=str,
multiple=True,
help="Column definitions for the table",
)
@click.option("--pk", type=str, help="Column to use as primary key")
@click.option("--alter", is_flag=True, help="Alter table to add missing columns")
@click.option("--replace", is_flag=True, help="Replace files with matching primary key")
@click.option("--upsert", is_flag=True, help="Upsert files with matching primary key")
@click.option("--name", type=str, help="File name to use")
def insert_files(path, table, file_or_dir, column, pk, alter, replace, upsert, name):
"""
Insert one or more files using BLOB columns in the specified table
Example usage:
\b
sqlite-utils insert-files pics.db images *.gif \\
-c name:name \\
-c content:content \\
-c content_hash:sha256 \\
-c created:ctime_iso \\
-c modified:mtime_iso \\
-c size:size \\
--pk name
"""
if not column:
column = ["path:path", "content:content", "size:size"]
if not pk:
pk = "path"
def yield_paths_and_relative_paths():
for f_or_d in file_or_dir:
path = pathlib.Path(f_or_d)
if f_or_d == "-":
yield "-", "-"
elif path.is_dir():
for subpath in path.rglob("*"):
if subpath.is_file():
yield subpath, subpath.relative_to(path)
elif path.is_file():
yield path, path
# Load all paths so we can show a progress bar
paths_and_relative_paths = list(yield_paths_and_relative_paths())
with click.progressbar(paths_and_relative_paths) as bar:
def to_insert():
for path, relative_path in bar:
row = {}
lookups = FILE_COLUMNS
if path == "-":
stdin_data = sys.stdin.buffer.read()
# We only support a subset of columns for this case
lookups = {
"name": lambda p: name or "-",
"path": lambda p: name or "-",
"content": lambda p: stdin_data,
"sha256": lambda p: hashlib.sha256(stdin_data).hexdigest(),
"md5": lambda p: hashlib.md5(stdin_data).hexdigest(),
"size": lambda p: len(stdin_data),
}
for coldef in column:
if ":" in coldef:
colname, coltype = coldef.rsplit(":", 1)
else:
colname, coltype = coldef, coldef
try:
value = lookups[coltype](path)
row[colname] = value
except KeyError:
raise click.ClickException(
"'{}' is not a valid column definition - options are {}".format(
coltype, ", ".join(lookups.keys())
)
)
# Special case for --name
if coltype == "name" and name:
row[colname] = name
yield row
db = sqlite_utils.Database(path)
with db.conn:
db[table].insert_all(
to_insert(), pk=pk, alter=alter, replace=replace, upsert=upsert
)
FILE_COLUMNS = {
"name": lambda p: p.name,
"path": lambda p: str(p),
"fullpath": lambda p: str(p.resolve()),
"sha256": lambda p: hashlib.sha256(p.resolve().read_bytes()).hexdigest(),
"md5": lambda p: hashlib.md5(p.resolve().read_bytes()).hexdigest(),
"mode": lambda p: p.stat().st_mode,
"content": lambda p: p.resolve().read_bytes(),
"mtime": lambda p: p.stat().st_mtime,
"ctime": lambda p: p.stat().st_ctime,
"mtime_int": lambda p: int(p.stat().st_mtime),
"ctime_int": lambda p: int(p.stat().st_ctime),
"mtime_iso": lambda p: datetime.utcfromtimestamp(p.stat().st_mtime).isoformat(),
"ctime_iso": lambda p: datetime.utcfromtimestamp(p.stat().st_ctime).isoformat(),
"size": lambda p: p.stat().st_size,
}
def output_rows(iterator, headers, nl, arrays, json_cols):
# We have to iterate two-at-a-time so we can know if we
# should output a trailing comma or if we have reached
# the last row.
current_iter, next_iter = itertools.tee(iterator, 2)
next(next_iter, None)
first = True
for row, next_row in itertools.zip_longest(current_iter, next_iter):
is_last = next_row is None
data = row
if json_cols:
# Any value that is a valid JSON string should be treated as JSON
data = [maybe_json(value) for value in data]
if not arrays:
data = dict(zip(headers, data))
line = "{firstchar}{serialized}{maybecomma}{lastchar}".format(
firstchar=("[" if first else " ") if not nl else "",
serialized=json.dumps(data, default=json_binary),
maybecomma="," if (not nl and not is_last) else "",
lastchar="]" if (is_last and not nl) else "",
)
yield line
first = False
def maybe_json(value):
if not isinstance(value, str):
return value
stripped = value.strip()
if not (stripped.startswith("{") or stripped.startswith("[")):
return value
try:
return json.loads(stripped)
except ValueError:
return value
def json_binary(value):
if isinstance(value, bytes):
return {"$base64": True, "encoded": base64.b64encode(value).decode("latin-1")}
else:
raise TypeError
| 28.16683
| 107
| 0.596927
|
4a01cc9bdb42270ace4a7aa0dc2b78940a2450b0
| 10,744
|
py
|
Python
|
newcomers_guide/tests/test_review_data.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 12
|
2017-08-30T18:21:00.000Z
|
2021-12-09T04:04:17.000Z
|
newcomers_guide/tests/test_review_data.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 424
|
2017-08-08T18:32:14.000Z
|
2022-03-30T21:42:51.000Z
|
newcomers_guide/tests/test_review_data.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 7
|
2017-09-29T21:14:37.000Z
|
2019-12-30T21:07:37.000Z
|
from django.test import TestCase
from utility.review_data import compare_data, partition_files
class PartitionFilesForReview(TestCase):
def test_includes_file_in_reference_language(self):
result = partition_files('en', 'fr', {
'somePath': ['en.content.md', 'fr.content.md'],
})
self.assertEqual(result[0].reference_file, 'somePath/en.content.md')
def test_includes_file_in_target_language(self):
result = partition_files('en', 'fr', {
'somePath': ['en.content.md', 'fr.content.md'],
})
self.assertEqual(result[0].target_file, 'somePath/fr.content.md')
def test_ignore_if_only_reference_file_exists(self):
result = partition_files('en', 'fr', {
'somePath': ['en.content.md'],
})
self.assertEqual(len(result), 0)
def test_ignore_if_only_target_file_exists(self):
result = partition_files('en', 'fr', {
'somePath': ['fr.content.md'],
})
self.assertEqual(len(result), 0)
class CompareDataForReviewTests(TestCase):
def test_detects_heading_missing_at_start_of_file(self):
target_text = 'Heading'
reference_text = '# Heading'
result = compare_data(target_text, reference_text)
self.assertRegexpMatches(result, r'contains 0 headings, reference has 1')
def test_ignores_matching_heading(self):
target_text = 'some text.\n#Heading'
reference_text = 'some text.\n#Heading'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_heading_missing(self):
target_text = 'some text.\nHeading'
reference_text = 'some text.\n#Heading'
result = compare_data(target_text, reference_text)
self.assertRegexpMatches(result, r'contains 0 headings, reference has 1')
def test_detects_extra_heading(self):
target_text = 'some text.\n#Heading'
reference_text = 'some text.\nHeading'
result = compare_data(target_text, reference_text)
self.assertRegexpMatches(result, r'contains 1 headings, reference has 0')
def test_ignores_hash_signs_within_lines(self):
target_text = 'foo bar baz'
reference_text = 'foo # bar baz'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_bullet_missing_at_start_of_file(self):
target_text = 'Bullet'
reference_text = '* Bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 bullets, reference has 1')
def test_detects_bullet_missing(self):
target_text = 'some text.Bullet'
reference_text = 'some text.\n* Bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 bullets, reference has 1')
def test_detects_minus_bullet_missing(self):
target_text = 'some text.Bullet'
reference_text = 'some text.\n- Bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 bullets, reference has 1')
def test_detects_plus_bullet_missing(self):
target_text = 'some text.Bullet'
reference_text = 'some text.\n+ Bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 bullets, reference has 1')
def test_ignored_difference_between_bullet_types(self):
target_text = 'some text.\n* Bullet'
reference_text = 'some text.\n+ Bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_extra_bullet(self):
target_text = 'some text.\n* Bullet'
reference_text = 'some text.\nBullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 1 bullets, reference has 0')
def test_ignores_bullet_within_lines(self):
target_text = 'some text. Not a bullet'
reference_text = 'some text. * Not a bullet'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_numbered_list_item_is_missing(self):
target_text = 'some text.\nList item'
reference_text = 'some text.\n12. List item'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 numbered list items, reference has 1')
def test_detects_extra_numbered_list_item(self):
target_text = 'some text.\n12. List item'
reference_text = 'some text.\nList item'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 1 numbered list items, reference has 0')
def test_ignores_numbered_list_item_within_line(self):
target_text = 'some text. 12. List item'
reference_text = 'some text. List item'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_missing_paragraph_break(self):
target_text = 'One paragraph. A second paragraph'
reference_text = 'One paragraph.\n\nA second paragraph'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 paragraph breaks, reference has 1')
def test_detects_extra_paragraph_break(self):
target_text = 'One paragraph.\n\nA second paragraph'
reference_text = 'One paragraph. nA second paragraph'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 1 paragraph breaks, reference has 0')
def test_detects_missing_line_break(self):
target_text = 'One paragraph. A second paragraph'
reference_text = 'One paragraph. \nA second paragraph'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'contains 0 line breaks, reference has 1')
def test_one_trailing_space_is_not_considered_line_break(self):
target_text = 'One paragraph. A second paragraph'
reference_text = 'One paragraph. \nA second paragraph'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_two_newlines_is_not_considered_line_break(self):
target_text = 'One paragraph. A second paragraph'
reference_text = 'One paragraph. \n\nA second paragraph'
result = compare_data(target_text, reference_text)
self.assertNotRegex(result, r'line breaks')
def test_ignores_matching_urls(self):
target_text = 'http://www.foo.com'
reference_text = 'http://www.foo.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_ignores_stuff_on_the_next_line(self):
target_text = 'http://www.foo.com\nFoo'
reference_text = 'http://www.foo.com\nBar'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_different_urls(self):
target_text = 'http://www.foo.com'
reference_text = 'http://www.bar.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, ('contains link http://www.foo.com\n'
'the reference has http://www.bar.com')
)
def test_detects_missing_url(self):
target_text = ''
reference_text = 'http://www.bar.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'missing link http://www.bar.com, it\'s there in the reference')
def test_detects_extra_url(self):
target_text = 'http://www.bar.com'
reference_text = ''
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'extra link http://www.bar.com is not there in the reference')
def test_ignores_matching_email_addresses(self):
target_text = 'user@foo.com'
reference_text = 'user@foo.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_different_email_addresses(self):
target_text = 'user@foo.com'
reference_text = 'user@bar.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, ('contains email address user@foo.com\n'
'the reference has user@bar.com')
)
def test_detects_missing_email_address(self):
target_text = ''
reference_text = 'user@foo.com'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'missing email address user@foo.com, it\'s there in the reference')
def test_detects_extra_email_address(self):
target_text = 'user@foo.com'
reference_text = ''
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'extra email address user@foo.com is not there in the reference')
def test_ignores_matching_phone_numbers(self):
target_text = '888-888-8888'
reference_text = '888-888-8888'
result = compare_data(target_text, reference_text)
self.assertEqual(result, '')
def test_detects_different_phone_number(self):
target_text = '888-888-8888'
reference_text = '111-888-8888'
result = compare_data(target_text, reference_text)
self.assertEqual(result, ('contains phone number 888-888-8888\n'
'the reference has 111-888-8888')
)
def test_detects_mising_phone_number(self):
target_text = ''
reference_text = '111-888-8888'
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'missing phone number 111-888-8888, it\'s there in the reference')
def test_detects_extra_phone_number(self):
target_text = '111-888-8888'
reference_text = ''
result = compare_data(target_text, reference_text)
self.assertEqual(result, 'extra phone number 111-888-8888 is not there in the reference')
def test_multiple_differences_are_joined_with_newline(self):
target_text = 'some text http://www.example.com 1-800-345-6789'
reference_text = 'some different text http://www.example2.com 1-800-987-6543'
result = compare_data(target_text, reference_text)
self.assertEqual(
result,
('contains link http://www.example.com\n'
'the reference has http://www.example2.com\n\n'
'contains phone number 1-800-345-6789\n'
'the reference has 1-800-987-6543')
)
| 43.497976
| 100
| 0.671258
|
4a01cd396f5f5f97b51ed2201dc1895ce7d7b21a
| 2,764
|
py
|
Python
|
Tools/c-analyzer/c_analyzer/__init__.py
|
mshumko/cpython
|
c0590c0033e86f98cdf5f2ca6898656f98ab4053
|
[
"0BSD"
] | 4
|
2020-11-28T08:49:09.000Z
|
2020-12-13T03:43:55.000Z
|
Tools/c-analyzer/c_analyzer/__init__.py
|
mshumko/cpython
|
c0590c0033e86f98cdf5f2ca6898656f98ab4053
|
[
"0BSD"
] | 12
|
2020-12-01T05:40:22.000Z
|
2022-03-01T18:00:53.000Z
|
Tools/c-analyzer/c_analyzer/__init__.py
|
mshumko/cpython
|
c0590c0033e86f98cdf5f2ca6898656f98ab4053
|
[
"0BSD"
] | null | null | null |
from c_parser import (
parse_files as _parse_files,
)
from c_parser.info import (
KIND,
TypeDeclaration,
filter_by_kind,
collate_by_kind_group,
resolve_parsed,
)
from . import (
analyze as _analyze,
datafiles as _datafiles,
)
from .info import Analysis
def analyze(filenmes, **kwargs):
results = iter_analyis_results(filenames, **kwargs)
return Analysis.from_results(results)
def iter_analysis_results(filenmes, *,
known=None,
**kwargs
):
decls = iter_decls(filenames, **kwargs)
yield from analyze_decls(decls, known)
def iter_decls(filenames, *,
kinds=None,
parse_files=_parse_files,
**kwargs
):
kinds = KIND.DECLS if kinds is None else (KIND.DECLS & set(kinds))
parse_files = parse_files or _parse_files
parsed = parse_files(filenames, **kwargs)
parsed = filter_by_kind(parsed, kinds)
for item in parsed:
yield resolve_parsed(item)
def analyze_decls(decls, known, *,
analyze_resolved=None,
handle_unresolved=True,
relroot=None,
):
knowntypes, knowntypespecs = _datafiles.get_known(
known,
handle_unresolved=handle_unresolved,
analyze_resolved=analyze_resolved,
relroot=relroot,
)
decls = list(decls)
collated = collate_by_kind_group(decls)
types = {decl: None for decl in collated['type']}
typespecs = _analyze.get_typespecs(types)
def analyze_decl(decl):
return _analyze.analyze_decl(
decl,
typespecs,
knowntypespecs,
types,
knowntypes,
analyze_resolved=analyze_resolved,
)
_analyze.analyze_type_decls(types, analyze_decl, handle_unresolved)
for decl in decls:
if decl in types:
resolved = types[decl]
else:
resolved = analyze_decl(decl)
if resolved and handle_unresolved:
typedeps, _ = resolved
if not isinstance(typedeps, TypeDeclaration):
if not typedeps or None in typedeps:
raise NotImplementedError((decl, resolved))
yield decl, resolved
#######################################
# checks
def check_all(analysis, checks, *, failfast=False):
for check in checks or ():
for data, failure in check(analysis):
if failure is None:
continue
yield data, failure
if failfast:
yield None, None
break
else:
continue
# We failed fast.
break
| 26.576923
| 71
| 0.573444
|
4a01cd40d102946142d7649191bc4c11f8bf230e
| 443
|
py
|
Python
|
sedldata/migrate/versions/954def4c1b92_identifier_for_data_load.py
|
OpenDataServices/sedldata
|
c7f3b13969bb9c9a494a5fadf1456cc85e9bf2cc
|
[
"BSD-3-Clause"
] | null | null | null |
sedldata/migrate/versions/954def4c1b92_identifier_for_data_load.py
|
OpenDataServices/sedldata
|
c7f3b13969bb9c9a494a5fadf1456cc85e9bf2cc
|
[
"BSD-3-Clause"
] | null | null | null |
sedldata/migrate/versions/954def4c1b92_identifier_for_data_load.py
|
OpenDataServices/sedldata
|
c7f3b13969bb9c9a494a5fadf1456cc85e9bf2cc
|
[
"BSD-3-Clause"
] | 1
|
2019-01-20T19:39:11.000Z
|
2019-01-20T19:39:11.000Z
|
"""identifier for data load
Revision ID: 954def4c1b92
Revises: 601c0b812f50
Create Date: 2018-08-22 13:56:11.419775
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '954def4c1b92'
down_revision = '601c0b812f50'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('data', sa.Column('load_name', sa.Text))
def downgrade():
op.drop_column('data', 'load_name')
| 17.72
| 58
| 0.735892
|
4a01cd779e19d62891c24dd75df5c97f5016849f
| 549
|
py
|
Python
|
languages/models.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
languages/models.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
languages/models.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
from django.db import models
from .fields import LanguageField
# Create your models here.
class Language (models.Model):
"""
Language model (move to Language app)
"""
user = models.ForeignKey(
'instructors.User',
on_delete=models.CASCADE,
blank=False,
related_name='languages',
)
language = LanguageField(blank=False)
def __str__(self):
return self.language
class Meta:
# make the user/category a unique pair in the db
unique_together = ("user", "language")
| 24.954545
| 57
| 0.639344
|
4a01ce0f082c4036e643a52aae0a64ca444f8803
| 15,002
|
py
|
Python
|
projects/image_chat/transresnet_multimodal/transresnet_multimodal.py
|
harunpehlivan/ParlAI
|
e1f2942feb8f158964477f4a46bc2c4c741b2ccd
|
[
"MIT"
] | 1
|
2019-07-25T17:30:18.000Z
|
2019-07-25T17:30:18.000Z
|
projects/image_chat/transresnet_multimodal/transresnet_multimodal.py
|
abisee/ParlAI
|
5507d4745ca23b23af311673a6b0d1b7e72eb5cd
|
[
"MIT"
] | null | null | null |
projects/image_chat/transresnet_multimodal/transresnet_multimodal.py
|
abisee/ParlAI
|
5507d4745ca23b23af311673a6b0d1b7e72eb5cd
|
[
"MIT"
] | 1
|
2019-07-28T14:53:18.000Z
|
2019-07-28T14:53:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Transresnet Multimodal Model (https://arxiv.org/abs/1811.00945)."""
from parlai.core.dict import DictionaryAgent
from parlai.core.utils import round_sigfigs
from .modules import TransresnetMultimodalModel
from projects.personality_captions.transresnet.transresnet import TransresnetAgent
import torch
from torch import optim
import random
import os
import numpy as np
import tqdm
from collections import deque
class TransresnetMultimodalAgent(TransresnetAgent):
"""
Model from "Engaging Image Chat: Modeling Personality in Grounded Dialogue".
See paper for more details: (https://arxiv.org/abs/1811.00945)
An extension of the model from https://arxiv.org/abs/1810.10665; given
an image, personality, and dialogue history, predicts the next utterance
in a dialogue.
"""
######################################
# Initialization and argument parsers
######################################
@staticmethod
def add_cmdline_args(argparser):
"""Override to add personality-override option."""
TransresnetMultimodalModel.add_cmdline_args(argparser)
TransresnetAgent.add_cmdline_args(argparser)
arg_group = argparser.add_argument_group("TransresnetMultimodal Arguments")
argparser.add_argument(
"--personality-override",
type=str,
default=None,
help="for use in other tasks where no personality "
"is given. This will give the model a personality "
"(whichever is specifed).",
)
argparser.add_argument(
"--personalities-path",
type=str,
default=None,
help="Path to personalities list",
)
DictionaryAgent.add_cmdline_args(argparser)
return arg_group
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.metrics = {
k: {"hits@1/100": 0.0, "loss": 0.0, "num_samples": 0, "med_rank": []}
for k in ["first_round", "second_round", "third_round+"]
}
if shared is None:
self.optimizer = optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
self.opt["learningrate"],
)
else:
self.optimizer = shared["optimizer"]
self.history = deque(maxlen=None)
self.personality_override = opt.get("personality_override")
def _build_model(self, path=None):
init_model_path = None
if self.opt.get("init_model") and os.path.isfile(self.opt["init_model"]):
init_model_path = self.opt["init_model"]
elif self.opt.get("model_file") and os.path.isfile(self.opt["model_file"]):
init_model_path = self.opt["model_file"]
elif path is not None:
init_model_path = path
print("Creating or loading model")
self.model = TransresnetMultimodalModel(
self.opt, self.personalities_list, self.dict
)
if init_model_path is not None:
self.load(init_model_path)
if self.use_cuda:
self.model.cuda()
def _setup_cands(self):
"""Override for different call to model."""
self.fixed_cands = None
self.fixed_cands_enc = None
if self.fcp is not None:
with open(self.fcp) as f:
self.fixed_cands = [c.replace("\n", "") for c in f.readlines()]
cands_enc_file = "{}.cands_enc".format(self.fcp)
print("loading saved cand encodings")
if os.path.isfile(cands_enc_file):
self.fixed_cands_enc = torch.load(
cands_enc_file, map_location=lambda cpu, _: cpu
)
else:
print("Extracting cand encodings")
self.model.eval()
pbar = tqdm.tqdm(
total=len(self.fixed_cands),
unit="cand",
unit_scale=True,
desc="Extracting candidate encodings",
)
fixed_cands_enc = []
for _, batch in enumerate(
[
self.fixed_cands[i:i + 50]
for i in range(0, len(self.fixed_cands) - 50, 50)
]
):
embedding = self.model.forward_text_encoder(batch).detach()
fixed_cands_enc.append(embedding)
pbar.update(50)
self.fixed_cands_enc = torch.cat(fixed_cands_enc, 0)
torch.save(self.fixed_cands_enc, cands_enc_file)
def share(self):
"""Override to share optimizer."""
shared = super().share()
shared["optimizer"] = self.optimizer
return shared
def observe(self, observation):
"""
Observe an observation.
Additionally retrieves the dialogue history for the observation.
:param observation:
observation
:return:
the observation, with dialogue history included.
"""
self.observation = self.get_dialogue_history(observation)
return self.observation
def train_step(self, valid_obs, image_feats, personalities, dialogue_histories):
"""
Model train step.
:param valid_obs:
list of valid observations
:param image_feats:
list of image features, one per example
:param personalities:
list of personalities, one per example
:param dialogue_histories:
list of dialogue histories, one per example
:return:
the total loss and the number of correct examples
"""
self.model.train()
labels = [random.choice(v["labels"]) for v in valid_obs]
loss, num_correct, _ = self.model(
image_feats,
personalities,
dialogue_histories,
labels,
batchsize=len(valid_obs),
)
return loss, num_correct
def eval_step(self, valid_obs, image_feats, personalities, dialogue_histories):
"""
Model eval step.
:param valid_obs:
list of valid observations
:param image_feats:
list of image features, one per example
:param personalities:
list of personalities, one per example
:param dialogue_histories:
list of dialogue histories, one per example
:return:
the total loss, number of correct examples,
the ranked position of each correct caption,
and the ranked lists of candidates (one per example)
"""
self.model.eval()
med_rank = None
chosen_responses = None
candidates_encoded = None
if self.fixed_cands is not None:
candidates_encoded = self.fixed_cands_enc
candidates = self.fixed_cands
else:
candidates = [v["label_candidates"] for v in valid_obs]
chosen_responses = self.model.choose_best_response(
image_feats,
personalities,
dialogue_histories,
candidates,
candidates_encoded=candidates_encoded,
k=-1 if self.fixed_cands is None else 100,
batchsize=len(valid_obs),
)
loss = -1
if self.fixed_cands is not None:
num_correct = 0
else:
labels = [v.get("eval_labels") for v in valid_obs]
equality_list = [
1 if chosen_responses[i][0] in labels[i] else 0
for i in range(len(labels))
]
# calculate med ranks
med_rank = []
for i, e_list in enumerate(chosen_responses):
lowest_rank = len(e_list) + 1
for c in labels[i]:
lowest_rank = min(lowest_rank, e_list.index(c) + 1)
med_rank.append(lowest_rank)
num_correct = sum(equality_list)
return loss, num_correct, med_rank, chosen_responses
def batch_act(self, observations):
"""
Act on a batch of observations.
:param observations:
list of observations
:return:
A list of acts, one for each observation
"""
is_training = any(["labels" in obs for obs in observations])
valid_obs, valid_indexes = self.filter_valid_obs(observations, is_training)
image_feats = self.extract_image_feats(valid_obs)
personalities, dialogue_histories, dialogue_round = self.extract_texts(
valid_obs
)
chosen_responses = None
med_rank = None
if is_training:
self.optimizer.zero_grad()
loss, num_correct = self.train_step(
valid_obs, image_feats, personalities, dialogue_histories
)
loss.backward()
self.optimizer.step()
else:
loss, num_correct, med_rank, chosen_responses = self.eval_step(
valid_obs, image_feats, personalities, dialogue_histories
)
self.update_metrics(loss, num_correct, len(valid_obs), dialogue_round, med_rank)
result = [
{"text": "No Response During Traiing", "id": self.getID()}
for _ in range(len(observations))
]
if chosen_responses is not None:
for i, index_obs in enumerate(valid_indexes):
result[index_obs]["text"] = chosen_responses[i][0]
result[index_obs]["text_candidates"] = chosen_responses[i]
return result
def extract_texts(self, obs):
"""
Extract the personalities and dialogue histories from observations.
Additionally determine which dialogue round we are in.
Note that this function assumes that the personality is the
last line of the `text` field in the observation.
:param obs:
list of observations
:return:
a list of personalities, a list of dialogue histories, and the
current dialogue round (either first, second, or third+)
"""
splits = [v.get("text").split("\n") for v in obs]
if self.personality_override:
splits = [s + [self.personality_override] for s in splits]
personalities = [t[-1] for t in splits]
dialogue_histories = None
dialogue_round = "first_round"
if len(splits[0]) >= 2:
dialogue_round = "second_round" if len(splits[0]) == 2 else "third_round+"
dialogue_histories = ["\n".join(t[:-1]) for t in splits]
return personalities, dialogue_histories, dialogue_round
def get_dialogue_history(self, obs):
"""
Get dialogue history for an observation.
:param obs:
observation
:return:
the observation with the dialogue history in the `text` field
"""
if len(self.history) > 0:
obs["text"] = "\n".join(self.history) + "\n" + obs["text"]
if "labels" in obs:
self.history.append(random.choice(obs["labels"]))
elif "eval_labels" in obs:
self.history.append(random.choice(obs["eval_labels"]))
if obs.get("episode_done", True):
# end of this episode, clear the history
self.history.clear()
return obs
def update_metrics(
self, loss, num_correct, num_samples, dialogue_round, med_rank=None
):
"""
Update Metrics.
Overriden to include dialogue round
:param loss:
float loss
:param num_correct:
number of examples for which chosen caption is correct
:param num_samples:
total number of examples
:param med_rank:
rank of correct caption for each example
"""
self.metrics[dialogue_round]["hits@1/100"] += num_correct
self.metrics[dialogue_round]["loss"] += loss
self.metrics[dialogue_round]["num_samples"] += num_samples
if med_rank:
self.metrics[dialogue_round]["med_rank"] += med_rank
def receive_metrics(self, metrics_dict):
"""
Receive the metrics from validation.
Unfreeze text encoder weights after a certain number of rounds without improvement.
Override to account for different dialogue rounds.
:param metrics_dict:
the metrics dictionary
"""
if "tasks" in metrics_dict:
metrics_dict = metrics_dict["tasks"]["internal:comment_battle:imageDialog"]
if self.freeze_patience != -1 and self.is_frozen:
m_key = "hits@1/100"
ms = [
metrics_dict[r].get(m_key, -1)
for r in ["first_round", "second_round", "third_round+"]
]
m = sum(ms) / len([m for m in ms if m >= 0])
if m > self.freeze_best_metric:
self.freeze_impatience = 0
self.freeze_best_metric = m
print("performance not good enough to unfreeze the model.")
else:
self.freeze_impatience += 1
print("Growing impatience for unfreezing")
if self.freeze_impatience >= self.freeze_patience:
self.is_frozen = False
print(
"Reached impatience for fine tuning. "
"Reloading the best model so far."
)
self._build_model(self.model_file)
if self.use_cuda:
self.model = self.model.cuda()
print("Unfreezing.")
self.model.unfreeze_text_encoder()
print("Done")
def reset(self):
"""Override to reset dialogue history."""
super().reset()
self.history.clear()
def reset_metrics(self):
"""Reset per-dialogue round metrics."""
for v in self.metrics.values():
v["hits@1/100"] = 0.0
v["loss"] = 0.0
v["num_samples"] = 0.0
if "med_rank" in v:
v["med_rank"] = []
def report(self):
"""Report per-dialogue round metrics."""
m = {k: {} for k in ["first_round", "second_round", "third_round+"]}
for k, v in self.metrics.items():
if v["num_samples"] > 0:
m[k]["hits@1/100"] = round_sigfigs(
v["hits@1/100"] / v["num_samples"], 4
)
m[k]["loss"] = round_sigfigs(v["loss"] / v["num_samples"], 4)
if "med_rank" in v:
m[k]["med_rank"] = np.median(v["med_rank"])
return m
| 35.804296
| 91
| 0.571724
|
4a01ce183f8b50647ceafa95cd72c53c90edab8e
| 8,031
|
py
|
Python
|
spacy/tests/pipeline/test_entity_ruler.py
|
lyq0930/spaCy
|
d030e04c235c2c16916fe66ed5892158c21fd245
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2020-09-27T13:51:57.000Z
|
2020-09-27T13:51:57.000Z
|
spacy/tests/pipeline/test_entity_ruler.py
|
lyq0930/spaCy
|
d030e04c235c2c16916fe66ed5892158c21fd245
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-22T13:32:07.000Z
|
2021-06-23T09:15:29.000Z
|
spacy/tests/pipeline/test_entity_ruler.py
|
ezorita/spaCy
|
66f1d29b4effb0b355268832fdcff21c279658bc
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
import pytest
from spacy import registry
from spacy.tokens import Span
from spacy.language import Language
from spacy.pipeline import EntityRuler
from spacy.errors import MatchPatternError
from thinc.api import NumpyOps, get_current_ops
@pytest.fixture
def nlp():
return Language()
@pytest.fixture
@registry.misc("entity_ruler_patterns")
def patterns():
return [
{"label": "HELLO", "pattern": "hello world"},
{"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
{"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]},
{"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
{"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
{"label": "TECH_ORG", "pattern": "Microsoft", "id": "a2"},
]
@Language.component("add_ent")
def add_ent_component(doc):
doc.ents = [Span(doc, 0, 3, label="ORG")]
return doc
def test_entity_ruler_init(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
assert "HELLO" in ruler
assert "BYE" in ruler
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
doc = nlp("hello world bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_no_patterns_warns(nlp):
ruler = EntityRuler(nlp)
assert len(ruler) == 0
assert len(ruler.labels) == 0
nlp.add_pipe("entity_ruler")
assert nlp.pipe_names == ["entity_ruler"]
with pytest.warns(UserWarning):
doc = nlp("hello world bye bye")
assert len(doc.ents) == 0
def test_entity_ruler_init_patterns(nlp, patterns):
# initialize with patterns
ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
nlp.remove_pipe("entity_ruler")
# initialize with patterns from misc registry
nlp.config["initialize"]["components"]["entity_ruler"] = {
"patterns": {"@misc": "entity_ruler_patterns"}
}
ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
nlp.initialize()
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_init_clear(nlp, patterns):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
ruler.initialize(lambda: [])
assert len(ruler.labels) == 0
def test_entity_ruler_clear(nlp, patterns):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world")
assert len(doc.ents) == 1
ruler.clear()
assert len(ruler.labels) == 0
doc = nlp("hello world")
assert len(doc.ents) == 0
def test_entity_ruler_existing(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "ORG"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_overwrite(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[0].text == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_complex(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("foo foo bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "COMPLEX"
assert doc.ents[1].label_ == "BYE"
assert len(doc.ents[0]) == 2
assert len(doc.ents[1]) == 2
def test_entity_ruler_entity_id(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_cfg_ent_id_sep(nlp, patterns):
config = {"overwrite_ents": True, "ent_id_sep": "**"}
ruler = nlp.add_pipe("entity_ruler", config=config)
ruler.add_patterns(patterns)
assert "TECH_ORG**a1" in ruler.phrase_patterns
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_serialize_bytes(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert len(new_ruler.patterns) == len(ruler.patterns)
for pattern in ruler.patterns:
assert pattern in new_ruler.patterns
assert sorted(new_ruler.labels) == sorted(ruler.labels)
def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns):
ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
assert new_ruler.phrase_matcher_attr is None
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert new_ruler.phrase_matcher_attr == "LOWER"
def test_entity_ruler_validate(nlp):
ruler = EntityRuler(nlp)
validated_ruler = EntityRuler(nlp, validate=True)
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]}
# invalid pattern raises error without validate
with pytest.raises(ValueError):
ruler.add_patterns([invalid_pattern])
# valid pattern is added without errors with validate
validated_ruler.add_patterns([valid_pattern])
# invalid pattern raises error with validate
with pytest.raises(MatchPatternError):
validated_ruler.add_patterns([invalid_pattern])
def test_entity_ruler_properties(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"])
assert sorted(ruler.ent_ids) == ["a1", "a2"]
def test_entity_ruler_overlapping_spans(nlp):
ruler = EntityRuler(nlp)
patterns = [
{"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"},
]
ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("foo bar baz"))
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "FOOBAR"
@pytest.mark.parametrize("n_process", [1, 2])
def test_entity_ruler_multiprocessing(nlp, n_process):
if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2):
for ent in doc.ents:
assert ent.ent_id_ == "1234"
| 33.4625
| 82
| 0.668161
|
4a01ce51ca39f28a98688626d52c733891c44ef1
| 2,991
|
py
|
Python
|
wemake_python_styleguide/visitors/ast/complexity/nested.py
|
makarchuk/wemake-python-styleguide
|
f976eed24613fbd6ea29f1cc589d8f863165a7aa
|
[
"MIT"
] | null | null | null |
wemake_python_styleguide/visitors/ast/complexity/nested.py
|
makarchuk/wemake-python-styleguide
|
f976eed24613fbd6ea29f1cc589d8f863165a7aa
|
[
"MIT"
] | null | null | null |
wemake_python_styleguide/visitors/ast/complexity/nested.py
|
makarchuk/wemake-python-styleguide
|
f976eed24613fbd6ea29f1cc589d8f863165a7aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import ast
from typing import ClassVar
from wemake_python_styleguide.constants import (
NESTED_CLASSES_WHITELIST,
NESTED_FUNCTIONS_WHITELIST,
)
from wemake_python_styleguide.types import AnyFunctionDef, AnyNodes, final
from wemake_python_styleguide.violations.best_practices import (
NestedClassViolation,
NestedFunctionViolation,
)
from wemake_python_styleguide.visitors.base import BaseNodeVisitor
from wemake_python_styleguide.visitors.decorators import alias
@final
@alias('visit_any_function', (
'visit_FunctionDef',
'visit_AsyncFunctionDef',
))
class NestedComplexityVisitor(BaseNodeVisitor):
"""
Checks that structures are not nested.
We disallow to use nested functions and nested classes.
Because flat is better than nested.
We allow to nest function inside classes, that's called methods.
"""
_function_nodes: ClassVar[AnyNodes] = (
ast.FunctionDef,
ast.AsyncFunctionDef,
)
def _check_nested_function(self, node: AnyFunctionDef) -> None:
parent = getattr(node, 'parent', None)
is_inside_function = isinstance(parent, self._function_nodes)
if is_inside_function and node.name not in NESTED_FUNCTIONS_WHITELIST:
self.add_violation(NestedFunctionViolation(node, text=node.name))
def _check_nested_classes(self, node: ast.ClassDef) -> None:
parent = getattr(node, 'parent', None)
is_inside_class = isinstance(parent, ast.ClassDef)
is_inside_function = isinstance(parent, self._function_nodes)
if is_inside_class and node.name not in NESTED_CLASSES_WHITELIST:
self.add_violation(NestedClassViolation(node, text=node.name))
elif is_inside_function:
self.add_violation(NestedClassViolation(node, text=node.name))
def _check_nested_lambdas(self, node: ast.Lambda) -> None:
parent = getattr(node, 'parent', None)
if isinstance(parent, ast.Lambda):
self.add_violation(NestedFunctionViolation(node))
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""
Used to find nested classes in other classes and functions.
Uses ``NESTED_CLASSES_WHITELIST`` to respect some nested classes.
Raises:
NestedClassViolation
"""
self._check_nested_classes(node)
self.generic_visit(node)
def visit_any_function(self, node: AnyFunctionDef) -> None:
"""
Used to find nested functions.
Uses ``NESTED_FUNCTIONS_WHITELIST`` to respect some nested functions.
Raises:
NestedFunctionViolation
"""
self._check_nested_function(node)
self.generic_visit(node)
def visit_Lambda(self, node: ast.Lambda) -> None:
"""
Used to find nested ``lambda`` functions.
Raises:
NestedFunctionViolation
"""
self._check_nested_lambdas(node)
self.generic_visit(node)
| 30.835052
| 78
| 0.693748
|
4a01cefe757b9d583a8081a8083840161a303937
| 11,014
|
py
|
Python
|
src/lti/tool_config.py
|
revsys/lti
|
0df2470126a88b0c642de209cc2ff6388b657557
|
[
"MIT"
] | 1
|
2018-03-28T05:42:48.000Z
|
2018-03-28T05:42:48.000Z
|
src/lti/tool_config.py
|
revsys/lti
|
0df2470126a88b0c642de209cc2ff6388b657557
|
[
"MIT"
] | 1
|
2018-03-28T06:02:44.000Z
|
2018-03-28T06:02:44.000Z
|
src/elti/tool_config.py
|
CaptainAchilles/lti
|
cf8df0c8115d7e0dd02b4c87376db2b87d3726d7
|
[
"MIT"
] | 2
|
2018-03-28T05:52:46.000Z
|
2020-06-05T00:53:43.000Z
|
from collections import defaultdict
from lxml import etree, objectify
from .utils import InvalidLTIConfigError
VALID_ATTRIBUTES = [
'title',
'description',
'launch_url',
'secure_launch_url',
'icon',
'secure_icon',
'cartridge_bundle',
'cartridge_icon',
'vendor_code',
'vendor_name',
'vendor_description',
'vendor_url',
'vendor_contact_email',
'vendor_contact_name'
]
NSMAP = {
'blti': 'http://www.imsglobal.org/xsd/imsbasiclti_v1p0',
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'lticp': 'http://www.imsglobal.org/xsd/imslticp_v1p0',
'lticm': 'http://www.imsglobal.org/xsd/imslticm_v1p0',
}
class ToolConfig(object):
'''
Object used to represent LTI configuration.
Capable of creating and reading the Common Cartridge XML representation of
LTI links as described here:
http://www.imsglobal.org/LTI/v1p1/ltiIMGv1p1.html#_Toc319560470
TODO: Usage description
'''
def __init__(self, **kwargs):
'''
Create a new ToolConfig with the given options.
'''
# Initialize all class accessors to None
for attr in VALID_ATTRIBUTES:
setattr(self, attr, None)
for attr in ['custom_params', 'extensions']:
if attr in kwargs:
attr_val = kwargs.pop(attr)
else:
attr_val = defaultdict(lambda: None)
setattr(self, attr, attr_val)
# Iterate over all provided options and save to class instance members
for (key, val) in kwargs.items():
if key in VALID_ATTRIBUTES:
setattr(self, key, val)
else:
raise InvalidLTIConfigError(
"Invalid outcome request option: {}".format(key)
)
@staticmethod
def create_from_xml(xml):
'''
Create a ToolConfig from the given XML.
'''
config = ToolConfig()
config.process_xml(xml)
return config
def set_custom_param(self, key, val):
'''
Set a custom parameter to provided value.
'''
self.custom_params[key] = val
def get_custom_param(self, key):
'''
Gets a custom parameter. It not yet set, returns None object.
'''
return self.custom_params[key]
def set_ext_params(self, ext_key, ext_params):
'''
Set the extension parameters for a specific vendor.
'''
self.extensions[ext_key] = ext_params
def get_ext_params(self, ext_key):
'''
Get extension paramaters for provided extension. It not set, returns None object.
'''
return self.extensions[ext_key]
def set_ext_param(self, ext_key, param_key, val):
'''
Set the provided parameter in a set of extension parameters.
'''
self.extensions.setdefault(ext_key, defaultdict(lambda: None))
self.extensions[ext_key][param_key] = val
def get_ext_param(self, ext_key, param_key):
'''
Get specific param in set of provided extension parameters.
'''
if ext_key in self.extensions:
return self.extensions[ext_key].get(param_key)
def process_xml(self, xml):
'''
Parse tool configuration data out of the Common Cartridge LTI link XML.
'''
root = objectify.fromstring(xml, parser=etree.XMLParser())
# Parse all children of the root node
for child in root.getchildren():
if 'title' in child.tag:
self.title = child.text
if 'description' in child.tag:
self.description = child.text
if 'secure_launch_url' in child.tag:
self.secure_launch_url = child.text
elif 'launch_url' in child.tag:
self.launch_url = child.text
if 'icon' in child.tag:
self.icon = child.text
if 'secure_icon' in child.tag:
self.secure_icon = child.text
if 'cartridge_bundle' in child.tag:
self.cartridge_bundle = child.attrib['identifierref']
if 'catridge_icon' in child.tag:
self.cartridge_icon = child.atrib['identifierref']
if 'vendor' in child.tag:
# Parse vendor tag
for v_child in child.getchildren():
if 'code' in v_child.tag:
self.vendor_code = v_child.text
if 'description' in v_child.tag:
self.vendor_description = v_child.text
if 'name' in v_child.tag:
self.vendor_name = v_child.text
if 'url' in v_child.tag:
self.vendor_url = v_child.text
if 'contact' in v_child.tag:
# Parse contact tag for email and name
for c_child in v_child:
if 'name' in c_child.tag:
self.vendor_contact_name = c_child.text
if 'email' in c_child.tag:
self.vendor_contact_email = c_child.text
if 'custom' in child.tag:
# Parse custom tags
for custom_child in child.getchildren():
self.custom_params[custom_child.attrib['name']] =\
custom_child.text
if 'extensions' in child.tag:
platform = child.attrib['platform']
properties = {}
# Parse extension tags
for ext_child in child.getchildren():
if 'property' in ext_child.tag:
properties[ext_child.attrib['name']] = ext_child.text
elif 'options' in ext_child.tag:
opt_name = ext_child.attrib['name']
options = {}
for option_child in ext_child.getchildren():
options[option_child.attrib['name']] =\
option_child.text
properties[opt_name] = options
self.set_ext_params(platform, properties)
def recursive_options(self, element, params):
for key, val in sorted(params.items()):
if isinstance(val, dict):
options_node = etree.SubElement(
element,
'{%s}%s' % (NSMAP['lticm'], 'options'),
name=key,
)
for key, val in sorted(val.items()):
self.recursive_options(options_node, {key: val})
else:
param_node = etree.SubElement(
element,
'{%s}%s' % (NSMAP['lticm'], 'property'),
name=key,
)
param_node.text = val
def to_xml(self, opts=defaultdict(lambda: None)):
'''
Generate XML from the current settings.
'''
if not self.launch_url or not self.secure_launch_url:
raise InvalidLTIConfigError('Invalid LTI configuration')
root = etree.Element(
'cartridge_basiclti_link',
attrib={
'{%s}%s' % (NSMAP['xsi'], 'schemaLocation'): 'http://www.imsglobal.org/xsd/imslticc_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticc_v1p0.xsd http://www.imsglobal.org/xsd/imsbasiclti_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imsbasiclti_v1p0p1.xsd http://www.imsglobal.org/xsd/imslticm_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticm_v1p0.xsd http://www.imsglobal.org/xsd/imslticp_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticp_v1p0.xsd',
'xmlns': 'http://www.imsglobal.org/xsd/imslticc_v1p0',
},
nsmap=NSMAP,
)
for key in ['title', 'description', 'launch_url', 'secure_launch_url']:
option = etree.SubElement(root, '{%s}%s' % (NSMAP['blti'], key))
option.text = getattr(self, key)
vendor_keys = ['name', 'code', 'description', 'url']
if any('vendor_' + key for key in vendor_keys) or\
self.vendor_contact_email:
vendor_node = etree.SubElement(
root,
'{%s}%s' % (NSMAP['blti'], 'vendor'),
)
for key in vendor_keys:
if getattr(self, 'vendor_' + key) is not None:
v_node = etree.SubElement(
vendor_node,
'{%s}%s' % (NSMAP['lticp'], key),
)
v_node.text = getattr(self, 'vendor_' + key)
if getattr(self, 'vendor_contact_email'):
v_node = etree.SubElement(
vendor_node,
'{%s}%s' % (NSMAP['lticp'], 'contact'),
)
c_name = etree.SubElement(
v_node,
'{%s}%s' % (NSMAP['lticp'], 'name'),
)
c_name.text = self.vendor_contact_name
c_email = etree.SubElement(
v_node,
'{%s}%s' % (NSMAP['lticp'], 'email'),
)
c_email.text = self.vendor_contact_email
# Custom params
if len(self.custom_params) != 0:
custom_node = etree.SubElement(
root,
'{%s}%s' % (NSMAP['blti'], 'custom'),
)
for (key, val) in sorted(self.custom_params.items()):
c_node = etree.SubElement(
custom_node,
'{%s}%s' % (NSMAP['lticm'], 'property'),
)
c_node.set('name', key)
c_node.text = val
# Extension params
if len(self.extensions) != 0:
for (key, params) in sorted(self.extensions.items()):
extension_node = etree.SubElement(
root,
'{%s}%s' % (NSMAP['blti'], 'extensions'),
platform=key,
)
self.recursive_options(extension_node, params)
if getattr(self, 'cartridge_bundle'):
identifierref = etree.SubElement(
root,
'cartridge_bundle',
identifierref=self.cartridge_bundle,
)
if getattr(self, 'cartridge_icon'):
identifierref = etree.SubElement(
root,
'cartridge_icon',
identifierref=self.cartridge_icon,
)
declaration = b'<?xml version="1.0" encoding="UTF-8"?>'
return declaration + etree.tostring(root, encoding='utf-8')
| 38.376307
| 479
| 0.516797
|
4a01cfe44a15d157be41d37b367cd1564f18b3f9
| 917
|
py
|
Python
|
apps/tasks/urls.py
|
Sravanksk/fyle-qbo-api
|
53f595170a073f245b9930bfce2ca07bdf998ce3
|
[
"MIT"
] | null | null | null |
apps/tasks/urls.py
|
Sravanksk/fyle-qbo-api
|
53f595170a073f245b9930bfce2ca07bdf998ce3
|
[
"MIT"
] | null | null | null |
apps/tasks/urls.py
|
Sravanksk/fyle-qbo-api
|
53f595170a073f245b9930bfce2ca07bdf998ce3
|
[
"MIT"
] | null | null | null |
"""fyle_qbo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from .views import TasksView, TasksByIdView, TasksByExpenseGroupIdView
urlpatterns = [
path('', TasksByIdView.as_view()),
path('expense_group/<int:expense_group_id>/', TasksByExpenseGroupIdView.as_view()),
path('all/', TasksView.as_view())
]
| 36.68
| 87
| 0.718648
|
4a01d06f5195438416debbf98d914807536e8b31
| 36
|
py
|
Python
|
backups/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
backups/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
backups/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
from .rsyncServer import rsyncServer
| 36
| 36
| 0.888889
|
4a01d2787e7eab53b55121d2fdd302121768a482
| 391
|
py
|
Python
|
Gallery/wsgi.py
|
willardsigei/P-Gallery
|
e2d2de52616eb133cd1bb209f179e57272ab5527
|
[
"MIT"
] | null | null | null |
Gallery/wsgi.py
|
willardsigei/P-Gallery
|
e2d2de52616eb133cd1bb209f179e57272ab5527
|
[
"MIT"
] | null | null | null |
Gallery/wsgi.py
|
willardsigei/P-Gallery
|
e2d2de52616eb133cd1bb209f179e57272ab5527
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Gallery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Gallery.settings")
application = get_wsgi_application()
| 24.4375
| 78
| 0.787724
|
4a01d29f3f096d62b7e12ffd227ab20a3adbdf70
| 24,893
|
py
|
Python
|
src/aiocasambi/unit.py
|
hellqvio86/aiocasambi
|
2efd61ed8f8425f3a4a5b257f3addb3face5a185
|
[
"MIT"
] | 7
|
2020-06-14T20:43:29.000Z
|
2021-09-30T23:36:09.000Z
|
src/aiocasambi/unit.py
|
hellqvio86/aiocasambi
|
2efd61ed8f8425f3a4a5b257f3addb3face5a185
|
[
"MIT"
] | 2
|
2022-01-04T12:59:06.000Z
|
2022-01-07T09:46:13.000Z
|
src/aiocasambi/unit.py
|
hellqvio86/aiocasambi
|
2efd61ed8f8425f3a4a5b257f3addb3face5a185
|
[
"MIT"
] | 5
|
2020-06-14T19:12:37.000Z
|
2022-01-03T12:42:46.000Z
|
"""State representation of Casanbi Unit"""
import logging
import re
from pprint import pformat
from typing import Tuple, Union
from colorsys import rgb_to_hsv
from .errors import AiocasambiException
LOGGER = logging.getLogger(__name__)
UNIT_STATE_OFF = "off"
UNIT_STATE_ON = "on"
class Unit:
"""Represents a client network device."""
def __init__(
self,
*,
name: str,
address: str,
unit_id: int,
network_id: int,
wire_id: int,
controller,
controls: dict,
value: float = 0,
online: bool = True,
enabled: bool = True,
state: str = UNIT_STATE_OFF,
):
self._name = name
self._address = address
self._unit_id = int(unit_id)
self._network_id = network_id
self._value = value
self._state = state
self._fixture_model = None
self._fixture = None
self._wire_id = wire_id
self._controller = controller
self._oem = None
self._online = online
self._enabled = enabled
self._controls = {}
for control in controls:
key = control["type"]
self._controls[key] = control
@property
def value(self) -> float:
"""
Getter for value
"""
value = 0
if "Dimmer" in self._controls:
return self._controls["Dimmer"]["value"]
else:
err_msg = f"unit_id={self._unit_id} - value - "
err_msg += f"Dimmer is missing in controls: {self._controls}"
LOGGER.debug(err_msg)
return value
@value.setter
def value(self, value: float) -> None:
"""
Setter for value
"""
LOGGER.debug(f"unit_id={self._unit_id} - value - setting value to: {value}")
if value == 0:
self._state = UNIT_STATE_OFF
self._value = value
elif value > 0 and value <= 1:
self._state = UNIT_STATE_ON
self._value = value
else:
raise AiocasambiException(f"invalid value {value} for {self}")
@property
def name(self) -> str:
"""
Getter for name
"""
return self._name
@name.setter
def name(self, name: str) -> None:
"""
Setter for name
"""
self._name = name
@property
def fixture_model(self) -> str:
"""
Getter for fixture model
"""
return self._fixture_model
@fixture_model.setter
def fixture_model(self, fixture_model: str) -> None:
"""
Setter for fixture model
"""
self._fixture_model = fixture_model
@property
def online(self) -> bool:
"""
Getter for online
"""
return self._online
@online.setter
def online(self, online: bool) -> None:
"""
Setter for online
"""
if not self._online and online:
LOGGER.info(f"unit_id={self._unit_id} - online - unit is back online")
elif self._online and not online:
LOGGER.debug(f"unit_id={self._unit_id} - online - Setting unit to offline")
self._online = online
@property
def controls(self) -> dict:
"""
Getter for controls state
"""
return self._controls
@controls.setter
def controls(self, controls: Union[list, dict]) -> None:
"""
Setter for controls
"""
if isinstance(controls, list):
for control in controls:
# Recusive call
self.controls = control
elif isinstance(controls, dict):
LOGGER.debug(
f"unit_id={self._unit_id} - setter controls - Adding following control to controls: {controls}"
)
key = controls["type"]
self._controls[key] = controls
@property
def oem(self) -> str:
"""
Getter for oem
"""
return self._oem
@oem.setter
def oem(self, oem: str) -> None:
"""
Setter for oem
"""
self._oem = oem
@property
def fixture(self) -> str:
"""
Getter for fixture
"""
return self._fixture
@fixture.setter
def fixture(self, fixture: str) -> None:
"""
Setter for fixture
"""
self._fixture = fixture
@property
def enabled(self) -> bool:
"""
Getter for enabled
"""
return self._enabled
@enabled.setter
def enabled(self, enabled: bool) -> None:
"""
Setter for enabled
"""
self._enabled = enabled
@property
def state(self) -> str:
"""
Getter for state
"""
return self._state
@state.setter
def state(self, state: str) -> None:
"""
Setter for state
"""
if state == UNIT_STATE_OFF:
self.value = 0
self._state = state
@property
def unique_id(self) -> str:
"""
Getter for unique_id
"""
return f"{self._network_id}-{self._unit_id}"
@property
def controller(self):
"""
Getter for controller
"""
return self._controller
@controller.setter
def controller(self, controller):
"""
Setter for controller
"""
self._controller = controller
async def turn_unit_off(self) -> None:
"""
Function for turning a unit off
"""
# Unit_id needs to be an integer
unit_id = self._unit_id
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise AiocasambiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
target_controls = {"Dimmer": {"value": 0}}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
await self._controller.ws_send_message(message)
async def turn_unit_on(self) -> None:
"""
Function for turning a unit on
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
unit_id = self._unit_id
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
reason = "expected unit_id to be an integer,"
reason += f"got: {unit_id}"
raise AiocasambiException(reason)
target_controls = {"Dimmer": {"value": 1}}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
await self._controller.ws_send_message(message)
async def set_unit_rgbw(self, *, color_value: Tuple[int, int, int]) -> None:
"""
Set RGB
"""
target_controls = None
(red, green, blue, white) = color_value
unit_id = self._unit_id
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise AiocasambiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
white_value = white / 255.0
# 'name': 'white', 'type': 'White', 'value': 0.0
target_controls = {
"RGB": {"rgb": f"rgb({red}, {green}, {blue})"},
"Colorsource": {"source": "RGB"},
"White": {"value": white_value},
}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
dbg_msg = f"Setting color to rgb({red}, {green}, {blue}, {white}) "
dbg_msg += f"sending: {pformat(message)}"
LOGGER.debug(f"unit_id={self._unit_id} - set_unit_rgb - {dbg_msg}")
await self._controller.ws_send_message(message)
return
async def set_unit_rgb(
self, *, color_value: Tuple[int, int, int], send_rgb_format=False
) -> None:
"""
Set RGB
"""
target_controls = None
(red, green, blue) = color_value
(hue, sat, value) = rgb_to_hsv(red, green, blue)
unit_id = self._unit_id
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise AiocasambiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not send_rgb_format:
target_controls = {
"RGB": {"hue": round(hue, 1), "sat": round(sat, 1)},
"Colorsource": {"source": "RGB"},
}
else:
target_controls = {
"RGB": {"rgb": f"rgb({red}, {green}, {blue})"},
"Colorsource": {"source": "RGB"},
}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
dbg_msg = f"Setting color to rgb({red}, {green}, {blue}) "
dbg_msg += f"- (hue: {hue}, sat: {sat}, value: {value}) - "
dbg_msg += f"- send_rgb_format: {send_rgb_format} - "
dbg_msg += f"sending: {pformat(message)}"
LOGGER.debug(f"unit_id={self._unit_id} - set_unit_rgb - {dbg_msg}")
await self._controller.ws_send_message(message)
return
async def set_unit_color_temperature(self, *, value: int, source="TW") -> None:
"""
Setter for unit color temperature
"""
# Unit_id needs to be an integer
unit_id = self._unit_id
target_value = value
if source == "mired":
# Convert to Kelvin
target_value = round(1000000 / value)
# Convert to nerest 50 in kelvin, like the gui is doing
if target_value % 50 != 0:
target_value = int(target_value / 50) * 50 + 50
dbg_msg = f"converting target value to {target_value}"
dbg_msg += " (nearest 50 kelvin like GUI)"
LOGGER.debug(
f"unit_id={self._unit_id} - set_unit_color_temperature - {dbg_msg}"
)
# Get min and max temperature color in kelvin
(cct_min, cct_max, _) = self.get_supported_color_temperature()
if target_value < cct_min:
dbg_msg = f"target_value: {target_value}"
dbg_msg += " smaller than min supported temperature,"
dbg_msg += " setting to min supported color temperature:"
dbg_msg += f" {cct_min}"
LOGGER.debug(
f"unit_id={self._unit_id} - set_unit_color_temperature - {dbg_msg}"
)
target_value = cct_min
elif target_value > cct_max:
dbg_msg = f"target_value: {target_value}"
dbg_msg += " larger than max supported temperature,"
dbg_msg += " setting to max supported color temperature:"
dbg_msg += f" {cct_max}"
LOGGER.debug(
f"unit_id={self._unit_id} - set_unit_color_temperature - {dbg_msg}"
)
target_value = cct_max
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise AiocasambiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
target_controls = {
"ColorTemperature": {"value": target_value},
"Colorsource": {"source": "TW"},
}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
dbg_msg = f"value: {value}, source: {source} "
dbg_msg += f"sending: {message}"
LOGGER.debug(
f"unit_id={self._unit_id} - set_unit_color_temperature - {dbg_msg}"
)
await self._controller.ws_send_message(message)
async def set_unit_value(self, *, value: Union[float, int]) -> None:
"""
Function for setting an unit to a specific value
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
unit_id = self._unit_id
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise AiocasambiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not (value >= 0 and value <= 1):
raise AiocasambiException("value needs to be between 0 and 1")
target_controls = {"Dimmer": {"value": value}}
message = {
"wire": self._wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.value = value
LOGGER.debug(f"unit_id={self._unit_id} - set_unit_value - value={value}")
await self._controller.ws_send_message(message)
def get_supported_color_temperature(self) -> Tuple[int, int, int]:
"""
Return the supported color temperatures,
(0, 0, 0) if nothing is supported
"""
cct_min = 0
cct_max = 0
current = 0
if not self._controls:
LOGGER.debug(f"unit_id={self._unit_id} control is None")
return (min, max, current)
if "CCT" in self._controls and self._controls["CCT"]:
cct_min = self._controls["CCT"]["min"]
cct_max = self._controls["CCT"]["max"]
current = self._controls["CCT"]["value"]
dbg_msg = "returning "
dbg_msg += f"min={cct_min} max={cct_max} current={current} "
dbg_msg += f"for name={self.name}"
LOGGER.debug(
f"unit_id={self._unit_id} - get_supported_color_temperature - {dbg_msg}"
)
return (cct_min, cct_max, current)
def get_max_mired(self) -> int:
"""
M = 1000000 / T
25000 K, has a mired value of M = 40 mireds
1000000 / 25000 = 40
{
'Dimmer': {
'type': 'Dimmer',
'value': 0.0
},
'CCT': {
'min': 2200,
'max': 6000,
'level': 0.4631578947368421,
'type': 'CCT',
'value': 3960.0
}
}
"""
cct_min = self._controls["CCT"]["min"]
result = round(1000000 / cct_min)
dbg_msg = f"returning {result} (in kv {cct_min}) "
dbg_msg += f"for name={self.name}"
LOGGER.debug(f"unit_id={self._unit_id} - get_max_mired - {dbg_msg}")
return result
def get_min_mired(self) -> int:
"""
M = 1000000 / T
25000 K, has a mired value of M = 40 mireds
1000000 / 25000 = 40
{
'Dimmer': {
'type': 'Dimmer',
'value': 0.0
},
'CCT': {
'min': 2200,
'max': 6000,
'level': 0.4631578947368421,
'type': 'CCT',
'value': 3960.0
}
}
"""
cct_max = self._controls["CCT"]["max"]
result = round(1000000 / cct_max)
dbg_msg = f"returning {result} (in kv {cct_max}) "
dbg_msg += f"for name={self.name}"
LOGGER.debug(f"unit_id={self._unit_id} - get_min_mired - {dbg_msg}")
return result
def get_color_temp(self) -> int:
"""
M = 1 000 000 / T
25000 K, has a mired value of M = 40 mireds
1000000 / 25000 = 40
{
'Dimmer': {
'type': 'Dimmer',
'value': 0.0
},
'CCT': {
'min': 2200,
'max': 6000,
'level': 0.4631578947368421,
'type': 'CCT',
'value': 3960.0
}
}
"""
cct_value = self._controls["CCT"]["value"]
result = round(1000000 / cct_value)
dbg_msg = f"returning {result} (in kv {cct_value}) "
dbg_msg += f"for name={self.name}"
LOGGER.debug(f"unit_id={self._unit_id} - get_color_temp - {dbg_msg}")
return result
def get_rgb_color(self) -> Tuple[int, int, int]:
"""
Return rgb color
{
'Color': {'sat': 1.0, 'name': 'rgb', 'hue': 1.0, 'rgb': 'rgb(255, 0, 4)'
}
"""
red = 0
green = 0
blue = 0
regexp = re.compile(
r"rgb\(\s*(?P<red>\d+),\s+(?P<green>\d+),\s+(?P<blue>\d+)\)"
)
rgb_value = self._controls["Color"]["rgb"]
match = regexp.match(rgb_value)
if match:
red = int(match.group("red"))
green = int(match.group("green"))
blue = int(match.group("blue"))
else:
err_msg = f"failed to parse rgb_value: {rgb_value}"
LOGGER.error(f"unit_id={self._unit_id} - get_rgb_color - {err_msg}")
dbg_msg = f"returning ({red}, {green}, {blue}) "
dbg_msg += f"for name={self.name}"
LOGGER.debug(f"unit_id={self._unit_id} - get_rgb_color - {dbg_msg}")
return (red, green, blue)
def get_rgbw_color(self) -> Tuple[int, int, int, int]:
"""
Return rgbw color
"""
(red, green, blue) = self.get_rgb_color()
white = self._controls["White"]["value"]
return (red, green, blue, int(round(white * 255, 0)))
def supports_rgbw(self) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffff',
'condition': 0,
'controls': [[{'name': 'dimmer0', 'type': 'Dimmer', 'value': 0.0},
{'hue': 0.9882697947214076,
'name': 'rgb',
'rgb': 'rgb(255, 21, 40)',
'sat': 0.9176470588235294,
'type': 'Color'},
{'name': 'white', 'type': 'White', 'value': 0.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 4027,
'groupId': 0,
'id': 14,
'name': 'Test RGB',
'on': True,
'online': True,
'position': 10,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'}
"""
if not self._controls:
LOGGER.debug(f"unit_id={self._unit_id} - supports_rgbw - controls is None")
return False
if "Color" in self._controls and "White" in self._controls:
return True
return False
def supports_rgb(self) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffff',
'condition': 0,
'controls': [[{'name': 'dimmer0', 'type': 'Dimmer', 'value': 0.0},
{'hue': 0.9882697947214076,
'name': 'rgb',
'rgb': 'rgb(255, 21, 40)',
'sat': 0.9176470588235294,
'type': 'Color'},
{'name': 'white', 'type': 'White', 'value': 0.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 4027,
'groupId': 0,
'id': 14,
'name': 'Test RGB',
'on': True,
'online': True,
'position': 10,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'}
"""
if not self._controls:
LOGGER.debug(f"unit_id={self._unit_id} - supports_rgb - controls is None")
return False
if "Color" in self._controls:
return True
return False
def supports_color_temperature(self) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffffffffff',
'condition': 0,
'controls': [[{'type': 'Dimmer', 'value': 0.0},
{'level': 0.49736842105263157,
'max': 6000,
'min': 2200,
'type': 'CCT',
'value': 4090.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 14235,
'groupId': 0,
'id': 13,
'image': 'ffffffffffffffffffffffffffffffff',
'name': 'Arbetslampa',
'on': True,
'online': True,
'position': 9,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'
}
"""
if not self._controls:
LOGGER.debug(
f"unit_id={self._unit_id} - supports_color_temperature - controls is None"
)
return False
if "CCT" in self._controls:
return True
return False
def supports_brightness(self) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffffffffff',
'condition': 0,
'controls': [[{'type': 'Dimmer', 'value': 0.0},
{'level': 0.49736842105263157,
'max': 6000,
'min': 2200,
'type': 'CCT',
'value': 4090.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 14235,
'groupId': 0,
'id': 13,
'image': 'ffffffffffffffffffffffffffffffff',
'name': 'Arbetslampa',
'on': True,
'online': True,
'position': 9,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'
}
"""
if not self._controls:
LOGGER.debug(
f"unit_id={self._unit_id} - supports_brightness - controls is None"
)
return False
if "Dimmer" in self._controls:
return True
return False
def __repr__(self) -> str:
"""Return the representation."""
name = self._name
address = self._address
unit_id = self._unit_id
network_id = self._network_id
value = self._value
state = self._state
wire_id = self._wire_id
result = f"<Unit {name}:"
result += f"unit_id={unit_id} "
result += f"address={address} "
result += f"value={value} "
result += f"state={state} "
result += f"online={self._online} "
result += f"network_id={network_id} "
result += f"wire_id={wire_id}"
if self._fixture:
result = f"{result} fixure={self._fixture}"
if self._fixture_model:
result = f"{result} fixture_model={self._fixture_model}"
if self._oem:
result = f"{result} oem={self._oem}"
if self._controls:
# Controls state is set, not None
result = f"{result} supports_brightness="
result = f"{result}{self.supports_brightness()}"
result = f"{result} supports_color_temperature="
result = f"{result}{self.supports_color_temperature()}"
result = f"{result} supports_rgb="
result = f"{result}{self.supports_rgb()}"
result = f"{result} supports_rgbw="
result = f"{result}{self.supports_rgbw()}"
result = f"{result} controls={self._controls}"
result = f"{result} >"
return result
| 28.579793
| 111
| 0.496003
|
4a01d30c6bb9a136bf26ad187630bb8e8b30a50c
| 6,846
|
py
|
Python
|
core/migrations/0001_initial.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0001_initial.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0001_initial.py
|
luisza/vcl_django
|
43d04f7951cb8805502e51f6f6360c7ec63215cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-02 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adminlevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, unique=True)),
],
options={
'db_table': 'adminlevel',
},
),
migrations.CreateModel(
name='Affiliation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('shibname', models.CharField(blank=True, max_length=60, null=True)),
('dataupdatetext', models.TextField(db_column='dataUpdateText')),
('sitewwwaddress', models.CharField(blank=True, max_length=56, null=True)),
('helpaddress', models.CharField(blank=True, max_length=32, null=True)),
('shibonly', models.IntegerField()),
('theme', models.CharField(max_length=50)),
],
options={
'db_table': 'affiliation',
},
),
migrations.CreateModel(
name='Continuations',
fields=[
('id', models.CharField(max_length=255, primary_key=True, serialize=False)),
('userid', models.IntegerField()),
('expiretime', models.DateTimeField()),
('frommode', models.CharField(max_length=50)),
('tomode', models.CharField(max_length=50)),
('data', models.TextField()),
('multicall', models.IntegerField()),
('deletefromid', models.CharField(max_length=255)),
('parentid', models.ForeignKey(blank=True, db_column='parentid', null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Continuations')),
],
options={
'db_table': 'continuations',
},
),
migrations.CreateModel(
name='Documentation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('title', models.CharField(max_length=255)),
('data', models.TextField()),
],
options={
'db_table': 'documentation',
},
),
migrations.CreateModel(
name='Imtype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
],
options={
'db_table': 'IMtype',
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.SmallIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30, unique=True)),
('prettyname', models.CharField(max_length=70)),
('description', models.CharField(max_length=255)),
('perlpackage', models.CharField(max_length=150)),
],
options={
'db_table': 'module',
},
),
migrations.CreateModel(
name='Osinstalltype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
options={
'db_table': 'OSinstalltype',
},
),
migrations.CreateModel(
name='Platform',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
],
options={
'db_table': 'platform',
},
),
migrations.CreateModel(
name='Privnode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('parent', models.ForeignKey(db_column='parent', on_delete=django.db.models.deletion.CASCADE, to='core.Privnode')),
],
options={
'db_table': 'privnode',
},
),
migrations.CreateModel(
name='Sitemaintenance',
fields=[
('id', models.SmallIntegerField(primary_key=True, serialize=False)),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('ownerid', models.IntegerField()),
('created', models.DateTimeField()),
('reason', models.TextField(blank=True, null=True)),
('usermessage', models.TextField()),
('informhoursahead', models.SmallIntegerField()),
('allowreservations', models.IntegerField()),
],
options={
'db_table': 'sitemaintenance',
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
],
options={
'db_table': 'state',
},
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.SmallIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128, unique=True)),
('serialization', models.CharField(max_length=12)),
('value', models.TextField()),
('setby', models.CharField(blank=True, max_length=128, null=True)),
('timestamp', models.DateTimeField()),
],
options={
'db_table': 'variable',
},
),
migrations.AlterUniqueTogether(
name='privnode',
unique_together=set([('parent', 'name')]),
),
]
| 39.802326
| 163
| 0.513877
|
4a01d37056c1dd5f4d245a6c3159ff9aaaadc252
| 12,995
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_resource_limits.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_resource_limits.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_resource_limits.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_resource_limits
short_description: Configure resource limits in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and resource_limits category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_resource_limits:
description:
- Configure resource limits.
default: null
type: dict
suboptions:
custom_service:
description:
- Maximum number of firewall custom services.
type: int
dialup_tunnel:
description:
- Maximum number of dial-up tunnels.
type: int
firewall_address:
description:
- Maximum number of firewall addresses (IPv4, IPv6, multicast).
type: int
firewall_addrgrp:
description:
- Maximum number of firewall address groups (IPv4, IPv6).
type: int
firewall_policy:
description:
- Maximum number of firewall policies (IPv4, IPv6, policy46, policy64, DoS-policy4, DoS-policy6, multicast).
type: int
ipsec_phase1:
description:
- Maximum number of VPN IPsec phase1 tunnels.
type: int
ipsec_phase1_interface:
description:
- Maximum number of VPN IPsec phase1 interface tunnels.
type: int
ipsec_phase2:
description:
- Maximum number of VPN IPsec phase2 tunnels.
type: int
ipsec_phase2_interface:
description:
- Maximum number of VPN IPsec phase2 interface tunnels.
type: int
log_disk_quota:
description:
- Log disk quota in MB.
type: int
onetime_schedule:
description:
- Maximum number of firewall one-time schedules.
type: int
proxy:
description:
- Maximum number of concurrent proxy users.
type: int
recurring_schedule:
description:
- Maximum number of firewall recurring schedules.
type: int
service_group:
description:
- Maximum number of firewall service groups.
type: int
session:
description:
- Maximum number of sessions.
type: int
sslvpn:
description:
- Maximum number of SSL-VPN.
type: int
user:
description:
- Maximum number of local users.
type: int
user_group:
description:
- Maximum number of user groups.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure resource limits.
fortios_system_resource_limits:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_resource_limits:
custom_service: "3"
dialup_tunnel: "4"
firewall_address: "5"
firewall_addrgrp: "6"
firewall_policy: "7"
ipsec_phase1: "8"
ipsec_phase1_interface: "9"
ipsec_phase2: "10"
ipsec_phase2_interface: "11"
log_disk_quota: "12"
onetime_schedule: "13"
proxy: "14"
recurring_schedule: "15"
service_group: "16"
session: "17"
sslvpn: "18"
user: "19"
user_group: "20"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_resource_limits_data(json):
option_list = ['custom_service', 'dialup_tunnel', 'firewall_address',
'firewall_addrgrp', 'firewall_policy', 'ipsec_phase1',
'ipsec_phase1_interface', 'ipsec_phase2', 'ipsec_phase2_interface',
'log_disk_quota', 'onetime_schedule', 'proxy',
'recurring_schedule', 'service_group', 'session',
'sslvpn', 'user', 'user_group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_resource_limits(data, fos):
vdom = data['vdom']
system_resource_limits_data = data['system_resource_limits']
filtered_data = underscore_to_hyphen(filter_system_resource_limits_data(system_resource_limits_data))
return fos.set('system',
'resource-limits',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_resource_limits']:
resp = system_resource_limits(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_resource_limits": {
"required": False, "type": "dict", "default": None,
"options": {
"custom_service": {"required": False, "type": "int"},
"dialup_tunnel": {"required": False, "type": "int"},
"firewall_address": {"required": False, "type": "int"},
"firewall_addrgrp": {"required": False, "type": "int"},
"firewall_policy": {"required": False, "type": "int"},
"ipsec_phase1": {"required": False, "type": "int"},
"ipsec_phase1_interface": {"required": False, "type": "int"},
"ipsec_phase2": {"required": False, "type": "int"},
"ipsec_phase2_interface": {"required": False, "type": "int"},
"log_disk_quota": {"required": False, "type": "int"},
"onetime_schedule": {"required": False, "type": "int"},
"proxy": {"required": False, "type": "int"},
"recurring_schedule": {"required": False, "type": "int"},
"service_group": {"required": False, "type": "int"},
"session": {"required": False, "type": "int"},
"sslvpn": {"required": False, "type": "int"},
"user": {"required": False, "type": "int"},
"user_group": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.732997
| 128
| 0.587072
|
4a01d4e5c75d44d7afd9fa10688cd5ec3b5afd9b
| 20,919
|
py
|
Python
|
Lib/test/test_scope.py
|
Space0726/RustPython
|
7a89a4bcfddfaac8560bdb33d53621e1639b2e70
|
[
"MIT"
] | null | null | null |
Lib/test/test_scope.py
|
Space0726/RustPython
|
7a89a4bcfddfaac8560bdb33d53621e1639b2e70
|
[
"MIT"
] | null | null | null |
Lib/test/test_scope.py
|
Space0726/RustPython
|
7a89a4bcfddfaac8560bdb33d53621e1639b2e70
|
[
"MIT"
] | null | null | null |
import unittest
import weakref
from test.support import check_syntax_error, cpython_only
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
@unittest.expectedFailure # TODO RustPython
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testCellIsKwonlyArg(self):
# Issue 1409: Initialisation of a cell value,
# when it comes from a keyword-only parameter
def foo(*, a=17):
def bar():
return a + 5
return bar() + 3
self.assertEqual(foo(a=42), 50)
self.assertEqual(foo(), 25)
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError("x must be >= 0")
self.assertEqual(f(6), 720)
@unittest.expectedFailure # TODO RustPython
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """if 1:
def unoptimized_clash1(strip):
def f(s):
from sys import *
return getrefcount(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """if 1:
def unoptimized_clash2():
from sys import *
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """if 1:
def unoptimized_clash2():
from sys import *
def g():
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """if 1:
def f():
def g():
from sys import *
return getrefcount # global or local?
""")
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
@unittest.expectedFailure # TODO RustPython
def testUnboundLocal(self):
def errorInOuter():
print(y)
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
@unittest.expectedFailure # TODO RustPython
def testUnboundLocal_AfterDel(self):
# #4617: It is now legal to delete a cell variable.
# The following functions must obviously compile,
# and give the correct error when accessing the deleted name.
def errorInOuter():
y = 1
del y
print(y)
def inner():
return y
def errorInInner():
def inner():
return y
y = 1
del y
inner()
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
@unittest.expectedFailure # TODO RustPython
def testUnboundLocal_AugAssign(self):
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec("""if 1:
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""", {'fail': self.fail})
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
@unittest.expectedFailure # TODO RustPython
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec("""if 1:
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
""")
@unittest.expectedFailure # TODO RustPython
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
@unittest.expectedFailure # TODO RustPython
def testClassAndGlobal(self):
exec("""if 1:
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assertTrue(X.passed)
""")
@unittest.expectedFailure # TODO RustPython
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assertIn('h', d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assertNotIn("x", varnames)
self.assertIn("y", varnames)
@cpython_only
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(lambda a,b,c:None)
x = 12
class C:
def f(self):
return x
self.assertEqual(x, 12) # Used to raise UnboundLocalError
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
@cpython_only
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
@unittest.expectedFailure # TODO RustPython
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.__code__)
try:
exec(g.__code__, {})
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print(bad)
except NameError:
pass
else:
print("bad should not be defined")
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print(bad)
except NameError:
pass
@unittest.expectedFailure # TODO RustPython
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def testNonLocalFunction(self):
def f(x):
def inc():
nonlocal x
x += 1
return x
def dec():
nonlocal x
x -= 1
return x
return inc, dec
inc, dec = f(0)
self.assertEqual(inc(), 1)
self.assertEqual(inc(), 2)
self.assertEqual(dec(), 1)
self.assertEqual(dec(), 0)
def testNonLocalMethod(self):
def f(x):
class c:
def inc(self):
nonlocal x
x += 1
return x
def dec(self):
nonlocal x
x -= 1
return x
return c()
c = f(0)
self.assertEqual(c.inc(), 1)
self.assertEqual(c.inc(), 2)
self.assertEqual(c.dec(), 1)
self.assertEqual(c.dec(), 0)
@unittest.expectedFailure # TODO RustPython
def testGlobalInParallelNestedFunctions(self):
# A symbol table bug leaked the global statement from one
# function to other nested functions in the same block.
# This test verifies that a global statement in the first
# function does not affect the second function.
local_ns = {}
global_ns = {}
exec("""if 1:
def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
result9 = g()
result2 = h()
""", local_ns, global_ns)
self.assertEqual(2, global_ns["result2"])
self.assertEqual(9, global_ns["result9"])
def testNonLocalClass(self):
def f(x):
class c:
nonlocal x
x += 1
def get(self):
return x
return c()
c = f(0)
self.assertEqual(c.get(), 1)
self.assertNotIn("x", c.__class__.__dict__)
def testNonLocalGenerator(self):
def f(x):
def g(y):
nonlocal x
for i in range(y):
x += 1
yield x
return g
g = f(0)
self.assertEqual(list(g(5)), [1, 2, 3, 4, 5])
def testNestedNonLocal(self):
def f(x):
def g():
nonlocal x
x -= 2
def h():
nonlocal x
x += 4
return x
return h
return g
g = f(1)
h = g()
self.assertEqual(h(), 3)
def testTopIsNotSignificant(self):
# See #9997.
def top(a):
pass
def b():
global a
@unittest.expectedFailure # TODO RustPython
def testClassNamespaceOverridesClosure(self):
# See #17853.
x = 42
class X:
locals()["x"] = 43
y = x
self.assertEqual(X.y, 43)
class X:
locals()["x"] = 43
del x
self.assertFalse(hasattr(X, "x"))
self.assertEqual(x, 42)
@cpython_only
def testCellLeak(self):
# Issue 17927.
#
# The issue was that if self was part of a cycle involving the
# frame of a method call, *and* the method contained a nested
# function referencing self, thereby forcing 'self' into a
# cell, setting self to None would not be enough to break the
# frame -- the frame had another reference to the instance,
# which could not be cleared by the code running in the frame
# (though it will be cleared when the frame is collected).
# Without the lambda, setting self to None is enough to break
# the cycle.
class Tester:
def dig(self):
if 0:
lambda: self
try:
1/0
except Exception as exc:
self.exc = exc
self = None # Break the cycle
tester = Tester()
tester.dig()
ref = weakref.ref(tester)
del tester
self.assertIsNone(ref())
if __name__ == '__main__':
unittest.main()
| 26.513308
| 84
| 0.455184
|
4a01d53b8d6eef419134054b8fcda2bcfc75c974
| 3,009
|
py
|
Python
|
VISUALIZE/examples/nb_nbody_specials.py
|
binary-husky/hmp2g
|
1a4f4093cd296f07348f4db4c7503aca6e1fb05c
|
[
"MIT"
] | 2
|
2022-02-25T12:04:55.000Z
|
2022-03-15T02:37:59.000Z
|
VISUALIZE/examples/nb_nbody_specials.py
|
binary-husky/hmp2g
|
1a4f4093cd296f07348f4db4c7503aca6e1fb05c
|
[
"MIT"
] | null | null | null |
VISUALIZE/examples/nb_nbody_specials.py
|
binary-husky/hmp2g
|
1a4f4093cd296f07348f4db4c7503aca6e1fb05c
|
[
"MIT"
] | null | null | null |
import numpy as np
import random, time
from scipy.integrate import ode
from UTILS.tensor_ops import distance_matrix, repeat_at, delta_matrix
from VISUALIZE.mcom import mcom
PI = np.pi
def run():
# 可视化界面初始化
可视化桥 = mcom(path='RECYCLE/v2d_logger/', draw_mode='Threejs')
可视化桥.初始化3D(); 可视化桥.设置样式('star')
可视化桥.其他几何体之旋转缩放和平移('ball', 'SphereGeometry(1)', 0,0,0, 1,1,1, 0,0,0) # 球体
# 设置初始参数
colors = ['Yellow', 'Red', 'Blue', 'White','Yellow', 'Red', 'Blue', 'White','Yellow', 'Red', 'Blue', 'White','Yellow', 'Red', 'Blue', 'White']
size = [0.04, 0.04, 0.04, 0.04]
labels = ['A1', 'A2', 'A3', 'A4']
位置 = np.array([
[1.382857, 0,0],
[0,0.157030,0],
[-1.382857,0,0],
[0,-0.157030,0],
])
速度 = np.array([
[0, 0.584873,0],
[1.871935, 0,0],
[0, -0.584873,0],
[-1.871935, 0,0],
])
质量 = np.array([1, 1, 1, 1])
G = 1 # 6.67×10^-11N·m^2/kg^2
N体=len(位置)
assert N体==len(位置) and N体==len(速度) and N体==4
dT = 0.001 # 时间间隔
def pack_y(位置, 速度):
return np.concatenate((位置.reshape(-1), 速度.reshape(-1)))
def unpack_y(y):
位置, 速度 = y.reshape(2, N体, 3)
return 位置, 速度
def f(t, y):
位置, 速度 = unpack_y(y)
# F = (GMm)/距离矩阵^2
距离矩阵 = distance_matrix(位置) # 计算距离矩阵
距离矩阵平方 = 距离矩阵 * 距离矩阵.T
# 计算Mm矩阵:
质量矩阵 = repeat_at(tensor=质量, insert_dim=-1, n_times=N体)
Mm矩阵 = 质量矩阵 * 质量矩阵.T
# 计算引力
引力标量矩阵 = G*Mm矩阵/(距离矩阵平方+1e-10)
# 对角线元素清零
for i in range(N体): 引力标量矩阵[i,i] = 0
# 引力的方向,即位移的方向
引力方向 = delta_matrix(位置) # 形状为 (N体, N体, 3)
# 引力方向归一化,乘以引力矩阵(标量)
引力方向 = 引力方向/(np.linalg.norm(引力方向,axis=-1, keepdims=True)+1e-10)
引力 = 引力方向*repeat_at(tensor=引力标量矩阵, insert_dim=-1, n_times=3)
引力合力 = 引力.sum(1)
# a=F/m
加速度 = 引力合力/repeat_at(tensor=质量, insert_dim=-1, n_times=3)
位置导数 = 速度
速度导数 = 加速度
y_dot = pack_y(位置导数, 速度导数)
return y_dot
r = ode(f).set_integrator('dop853') # lsoda is fast, dop853 is precise but slow!
y0 = pack_y(位置, 速度); r.set_initial_value(y0, 0)
while r.successful() and r.t < 100:
时间 = int((r.t+dT)/dT)
位置, 速度 = unpack_y(r.integrate(r.t+dT))
if 时间%10==0:
位置_实部 = 位置.real
for body in range(N体):
可视化桥.发送几何体('ball|%d|%s|%.2f'%(body, colors[body], size[body]), # 填入核心参量: “已声明的形状|几何体之ID标识|颜色|整体大小”
位置_实部[body, 0], 位置_实部[body, 1], 位置_实部[body, 2], ro_x=0, ro_y=0, ro_z=0, # 6 DOF
opacity=1, label=labels[body], label_color='white', track_n_frame=2500, track_tension=0, track_color=colors[body], # 轨迹的颜色显示,输入js颜色名或者hex值均可
)
可视化桥.结束关键帧()
print('\r %.2f'%(r.t+dT), end='', flush=True)
if __name__ == "__main__":
run()
time.sleep(10000)
| 34.988372
| 166
| 0.516451
|
4a01d719be3b849f7b66da421089b7d6d90de1d3
| 669
|
py
|
Python
|
Python/Closures and Decorators/Decorators 2 - Name Directory.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
Python/Closures and Decorators/Decorators 2 - Name Directory.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
Python/Closures and Decorators/Decorators 2 - Name Directory.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
# Author: Madhur Gupta
# Github: github.com/guptamadhur
# Project: Hacker Rank Practice Python
# Sample Input
#
# 3
# Mike Thomson 20 M
# Robert Bustle 32 M
# Andria Bustle 30 F
# Sample Output
#
# Mr. Mike Thomson
# Ms. Andria Bustle
# Mr. Robert Bustle
import operator
def age(x):
return int(x[2])
def person_lister(f):
def inner(people):
return map(f,sorted(people, key=age))
return inner
@person_lister
def name_format(person):
return ("Mr. " if person[3] == "M" else "Ms. ") + person[0] + " " + person[1]
if __name__ == '__main__':
people = [input().split() for i in range(int(input()))]
print(*name_format(people), sep='\n')
| 19.676471
| 81
| 0.650224
|
4a01d7849121011178bd3a0d5a767fd9f9b3505e
| 579
|
py
|
Python
|
spotify_search/spotify_api.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | 1
|
2021-07-29T16:09:30.000Z
|
2021-07-29T16:09:30.000Z
|
spotify_search/spotify_api.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
spotify_search/spotify_api.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
import os
import requests
BASE_URL = "https://api.spotify.com/v1"
AUTH_TOKEN = os.getenv("SPOTIFY_AUTH_TOKEN")
def get_spotify_response(search_term, limit=None, search_type=None):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {AUTH_TOKEN}"
}
url = f"{BASE_URL}/search"
params = {
"q": search_term,
"type": search_type or "track",
"limit": limit or 10,
}
response = requests.get(url, params=params, headers=headers)
return response.json()
| 22.269231
| 68
| 0.632124
|
4a01d8de083e651f9521b9396379d64ac1e39876
| 28,480
|
py
|
Python
|
test/with_dummyserver/test_https.py
|
benjaminp/urllib3
|
62ef68e49edf5dabde26732a154d0e925cef7301
|
[
"MIT"
] | null | null | null |
test/with_dummyserver/test_https.py
|
benjaminp/urllib3
|
62ef68e49edf5dabde26732a154d0e925cef7301
|
[
"MIT"
] | null | null | null |
test/with_dummyserver/test_https.py
|
benjaminp/urllib3
|
62ef68e49edf5dabde26732a154d0e925cef7301
|
[
"MIT"
] | null | null | null |
import datetime
import json
import logging
import ssl
import sys
import shutil
import warnings
import mock
import pytest
from dummyserver.testcase import HTTPSDummyServerTestCase, IPV6HTTPSDummyServerTestCase
from dummyserver.server import (
CLIENT_CERT,
CLIENT_INTERMEDIATE_PEM,
CLIENT_NO_INTERMEDIATE_PEM,
CLIENT_INTERMEDIATE_KEY,
DEFAULT_CA,
DEFAULT_CA_BAD,
DEFAULT_CERTS,
IPV6_ADDR_CERTS,
IPV6_ADDR_CA,
HAS_IPV6,
IP_SAN_CERTS,
IPV6_SAN_CERTS,
IPV6_SAN_CA,
PASSWORD_CLIENT_KEYFILE,
)
from test import (
onlyPy279OrNewer,
notSecureTransport,
notOpenSSL098,
requires_network,
requires_ssl_context_keyfile_password,
fails_on_travis_gce,
requiresTLSv1,
requiresTLSv1_1,
requiresTLSv1_2,
requiresTLSv1_3,
TARPIT_HOST,
SHORT_TIMEOUT,
LONG_TIMEOUT,
)
from urllib3 import HTTPSConnectionPool
from urllib3.connection import VerifiedHTTPSConnection, RECENT_DATE
from urllib3.exceptions import (
SSLError,
ConnectTimeoutError,
InsecureRequestWarning,
SystemTimeWarning,
InsecurePlatformWarning,
MaxRetryError,
ProtocolError,
)
from urllib3.packages import six
from urllib3.util.timeout import Timeout
import urllib3.util as util
# Retry failed tests
pytestmark = pytest.mark.flaky
ResourceWarning = getattr(
six.moves.builtins, "ResourceWarning", type("ResourceWarning", (), {})
)
log = logging.getLogger("urllib3.connectionpool")
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
TLSv1_CERTS = DEFAULT_CERTS.copy()
TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None)
TLSv1_1_CERTS = DEFAULT_CERTS.copy()
TLSv1_1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_1", None)
TLSv1_2_CERTS = DEFAULT_CERTS.copy()
TLSv1_2_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_2", None)
TLSv1_3_CERTS = DEFAULT_CERTS.copy()
TLSv1_3_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLS", None)
class TestHTTPS(HTTPSDummyServerTestCase):
tls_protocol_name = None
def test_simple(self):
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
@fails_on_travis_gce
def test_dotted_fqdn(self):
with HTTPSConnectionPool(
self.host + ".", self.port, ca_certs=DEFAULT_CA
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_client_intermediate(self, certs_dir):
"""Check that certificate chains work well with client certs
We generate an intermediate CA from the root CA, and issue a client certificate
from that intermediate CA. Since the server only knows about the root CA, we
need to send it the certificate *and* the intermediate CA, so that it can check
the whole chain.
"""
with HTTPSConnectionPool(
self.host,
self.port,
key_file=str(certs_dir / CLIENT_INTERMEDIATE_KEY),
cert_file=str(certs_dir / CLIENT_INTERMEDIATE_PEM),
ca_certs=DEFAULT_CA,
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = json.loads(r.data.decode("utf-8"))
assert subject["organizationalUnitName"].startswith("Testing cert")
def test_client_no_intermediate(self, certs_dir):
"""Check that missing links in certificate chains indeed break
The only difference with test_client_intermediate is that we don't send the
intermediate CA to the server, only the client cert.
"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=str(certs_dir / CLIENT_NO_INTERMEDIATE_PEM),
key_file=str(certs_dir / CLIENT_INTERMEDIATE_KEY),
ca_certs=DEFAULT_CA,
) as https_pool:
try:
https_pool.request("GET", "/certificate", retries=False)
except SSLError as e:
if not (
"alert unknown ca" in str(e)
or "invalid certificate chain" in str(e)
or "unknown Cert Authority" in str(e)
or
# https://github.com/urllib3/urllib3/issues/1422
"connection closed via error" in str(e)
or "WSAECONNRESET" in str(e)
):
raise
except ProtocolError as e:
if not (
"An existing connection was forcibly closed by the remote host"
in str(e)
# Python 3.7.4+
or "WSAECONNRESET" in str(e) # Windows
or "EPIPE" in str(e) # macOS
or "ECONNRESET" in str(e) # OpenSSL
):
raise
@requires_ssl_context_keyfile_password
def test_client_key_password(self):
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
key_file=PASSWORD_CLIENT_KEYFILE,
cert_file=CLIENT_CERT,
key_password="letmein",
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = json.loads(r.data.decode("utf-8"))
assert subject["organizationalUnitName"].startswith("Testing server cert")
@requires_ssl_context_keyfile_password
def test_client_encrypted_key_requires_password(self):
with HTTPSConnectionPool(
self.host,
self.port,
key_file=PASSWORD_CLIENT_KEYFILE,
cert_file=CLIENT_CERT,
key_password=None,
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/certificate")
assert "password is required" in str(e.value)
assert isinstance(e.value.reason, SSLError)
def test_verified(self):
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if (
sys.version_info >= (2, 7, 9)
or util.IS_PYOPENSSL
or util.IS_SECURETRANSPORT
):
assert not warn.called, warn.call_args_list
else:
assert warn.called
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
assert error == InsecurePlatformWarning
def test_verified_with_context(self):
ctx = util.ssl_.create_urllib3_context(cert_reqs=ssl.CERT_REQUIRED)
ctx.load_verify_locations(cafile=DEFAULT_CA)
with HTTPSConnectionPool(self.host, self.port, ssl_context=ctx) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if (
sys.version_info >= (2, 7, 9)
or util.IS_PYOPENSSL
or util.IS_SECURETRANSPORT
):
assert not warn.called, warn.call_args_list
else:
assert warn.called
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
assert error == InsecurePlatformWarning
def test_context_combines_with_ca_certs(self):
ctx = util.ssl_.create_urllib3_context(cert_reqs=ssl.CERT_REQUIRED)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_context=ctx
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if (
sys.version_info >= (2, 7, 9)
or util.IS_PYOPENSSL
or util.IS_SECURETRANSPORT
):
assert not warn.called, warn.call_args_list
else:
assert warn.called
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
assert error == InsecurePlatformWarning
@onlyPy279OrNewer
@notSecureTransport # SecureTransport does not support cert directories
@notOpenSSL098 # OpenSSL 0.9.8 does not support cert directories
def test_ca_dir_verified(self, tmpdir):
# OpenSSL looks up certificates by the hash for their name, see c_rehash
# TODO infer the bytes using `cryptography.x509.Name.public_bytes`.
# https://github.com/pyca/cryptography/pull/3236
shutil.copyfile(DEFAULT_CA, str(tmpdir / "b6b9ccf9.0"))
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="CERT_REQUIRED", ca_cert_dir=str(tmpdir)
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
def test_invalid_common_name(self):
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
assert "doesn't match" in str(
e.value.reason
) or "certificate verify failed" in str(e.value.reason)
def test_verified_with_bad_ca_certs(self):
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA_BAD
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
assert "certificate verify failed" in str(e.value.reason), (
"Expected 'certificate verify failed', instead got: %r" % e.value.reason
)
def test_verified_without_ca_certs(self):
# default is cert_reqs=None which is ssl.CERT_NONE
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="CERT_REQUIRED"
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
# there is a different error message depending on whether or
# not pyopenssl is injected
assert (
"No root certificates specified" in str(e.value.reason)
# PyPy sometimes uses all-caps here
or "certificate verify failed" in str(e.value.reason).lower()
or "invalid certificate chain" in str(e.value.reason)
), (
"Expected 'No root certificates specified', "
"'certificate verify failed', or "
"'invalid certificate chain', "
"instead got: %r" % e.value.reason
)
def test_no_ssl(self):
with HTTPSConnectionPool(self.host, self.port) as pool:
pool.ConnectionCls = None
with pytest.raises(SSLError):
pool._new_conn()
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_unverified_ssl(self):
""" Test that bare HTTPSConnection can connect, make requests """
with HTTPSConnectionPool(self.host, self.port, cert_reqs=ssl.CERT_NONE) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
assert InsecureRequestWarning in [x[0][1] for x in calls]
def test_ssl_unverified_with_ca_certs(self):
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="CERT_NONE", ca_certs=DEFAULT_CA_BAD
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
if (
sys.version_info >= (2, 7, 9)
or util.IS_PYOPENSSL
or util.IS_SECURETRANSPORT
):
category = calls[0][0][1]
elif util.HAS_SNI:
category = calls[1][0][1]
else:
category = calls[2][0][1]
assert category == InsecureRequestWarning
def test_assert_hostname_false(self):
with HTTPSConnectionPool(
"localhost", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_hostname = False
https_pool.request("GET", "/")
def test_assert_specific_hostname(self):
with HTTPSConnectionPool(
"localhost", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_hostname = "localhost"
https_pool.request("GET", "/")
def test_server_hostname(self):
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
server_hostname="localhost",
) as https_pool:
conn = https_pool._new_conn()
conn.request("GET", "/")
# Assert the wrapping socket is using the passed-through SNI name.
# pyopenssl doesn't let you pull the server_hostname back off the
# socket, so only add this assertion if the attribute is there (i.e.
# the python ssl module).
if hasattr(conn.sock, "server_hostname"):
assert conn.sock.server_hostname == "localhost"
def test_assert_fingerprint_md5(self):
with HTTPSConnectionPool(
"localhost", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_fingerprint = (
"F2:06:5A:42:10:3F:45:1C:17:FE:E6:07:1E:8A:86:E5"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha1(self):
with HTTPSConnectionPool(
"localhost", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_fingerprint = (
"92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha256(self):
with HTTPSConnectionPool(
"localhost", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_fingerprint = (
"C5:4D:0B:83:84:89:2E:AE:B4:58:BB:12:"
"F7:A6:C4:76:05:03:88:D8:57:65:51:F3:"
"1E:60:B0:8B:70:18:64:E6"
)
https_pool.request("GET", "/")
def test_assert_invalid_fingerprint(self):
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
def _test_request(pool):
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
_test_request(https_pool)
https_pool._get_conn()
# Uneven length
https_pool.assert_fingerprint = "AA:A"
_test_request(https_pool)
https_pool._get_conn()
# Invalid length
https_pool.assert_fingerprint = "AA"
_test_request(https_pool)
def test_verify_none_and_bad_fingerprint(self):
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_NONE", ca_certs=DEFAULT_CA_BAD
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
with pytest.raises(MaxRetryError) as cm:
https_pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_verify_none_and_good_fingerprint(self):
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_NONE", ca_certs=DEFAULT_CA_BAD
) as https_pool:
https_pool.assert_fingerprint = (
"92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
)
https_pool.request("GET", "/")
@notSecureTransport
def test_good_fingerprint_and_hostname_mismatch(self):
# This test doesn't run with SecureTransport because we don't turn off
# hostname validation without turning off all validation, which this
# test doesn't do (deliberately). We should revisit this if we make
# new decisions.
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
https_pool.assert_fingerprint = (
"92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
)
https_pool.request("GET", "/")
@requires_network
def test_https_timeout(self):
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
timeout = Timeout(read=0.01)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
https_pool.ca_certs = DEFAULT_CA
https_pool.assert_fingerprint = (
"92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
)
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host, self.port, timeout=timeout, cert_reqs="CERT_NONE"
) as https_pool:
https_pool.request("GET", "/")
def test_tunnel(self):
""" test the _tunnel behavior """
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host, self.port, timeout=timeout, cert_reqs="CERT_NONE"
) as https_pool:
conn = https_pool._new_conn()
try:
conn.set_tunnel(self.host, self.port)
conn._tunnel = mock.Mock()
https_pool._make_request(conn, "GET", "/")
conn._tunnel.assert_called_once_with()
finally:
conn.close()
@requires_network
def test_enhanced_timeout(self):
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=SHORT_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
with pytest.raises(ConnectTimeoutError):
https_pool._make_request(conn, "GET", "/")
finally:
conn.close()
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=LONG_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/", timeout=Timeout(connect=SHORT_TIMEOUT))
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(total=None),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request(
"GET", "/", timeout=Timeout(total=None, connect=SHORT_TIMEOUT)
)
finally:
conn.close()
def test_enhanced_ssl_connection(self):
fingerprint = "92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
@onlyPy279OrNewer
def test_ssl_correct_system_time(self):
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings("GET", "/")
assert [] == w
@onlyPy279OrNewer
def test_ssl_wrong_system_time(self):
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
with mock.patch("urllib3.connection.datetime") as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings("GET", "/")
assert len(w) == 1
warning = w[0]
assert SystemTimeWarning == warning.category
assert str(RECENT_DATE) in warning.message.args[0]
def _request_without_resource_warnings(self, method, url):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.request(method, url)
return [x for x in w if not isinstance(x.message, ResourceWarning)]
def test_set_ssl_version_to_tls_version(self):
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.ssl_version = self.certs["ssl_version"]
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
def test_set_cert_default_cert_required(self):
conn = VerifiedHTTPSConnection(self.host, self.port)
conn.set_cert()
assert conn.cert_reqs == ssl.CERT_REQUIRED
def test_tls_protocol_name_of_socket(self):
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
if not hasattr(conn.sock, "version"):
pytest.skip("SSLSocket.version() not available")
assert conn.sock.version() == self.tls_protocol_name
finally:
conn.close()
@requiresTLSv1()
class TestHTTPS_TLSv1(TestHTTPS):
tls_protocol_name = "TLSv1"
certs = TLSv1_CERTS
@requiresTLSv1_1()
class TestHTTPS_TLSv1_1(TestHTTPS):
tls_protocol_name = "TLSv1.1"
certs = TLSv1_1_CERTS
@requiresTLSv1_2()
class TestHTTPS_TLSv1_2(TestHTTPS):
tls_protocol_name = "TLSv1.2"
certs = TLSv1_2_CERTS
@requiresTLSv1_3()
class TestHTTPS_TLSv1_3(TestHTTPS):
tls_protocol_name = "TLSv1.3"
certs = TLSv1_3_CERTS
class TestHTTPS_NoSAN:
def test_warning_for_certs_without_a_san(self, no_san_server):
"""Ensure that a warning is raised when the cert from the server has
no Subject Alternative Name."""
with mock.patch("warnings.warn") as warn:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
assert warn.called
class TestHTTPS_IPSAN(HTTPSDummyServerTestCase):
certs = IP_SAN_CERTS
def test_can_validate_ip_san(self):
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
try:
import ipaddress # noqa: F401
except ImportError:
pytest.skip("Only runs on systems with an ipaddress module")
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_REQUIRED", ca_certs=DEFAULT_CA
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
class TestHTTPS_IPv6Addr(IPV6HTTPSDummyServerTestCase):
certs = IPV6_ADDR_CERTS
@pytest.mark.skipif(not HAS_IPV6, reason="Only runs on IPv6 systems")
def test_strip_square_brackets_before_validating(self):
"""Test that the fix for #760 works."""
with HTTPSConnectionPool(
"[::1]", self.port, cert_reqs="CERT_REQUIRED", ca_certs=IPV6_ADDR_CA
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
class TestHTTPS_IPV6SAN(IPV6HTTPSDummyServerTestCase):
certs = IPV6_SAN_CERTS
def test_can_validate_ipv6_san(self):
"""Ensure that urllib3 can validate SANs with IPv6 addresses in them."""
try:
import ipaddress # noqa: F401
except ImportError:
pytest.skip("Only runs on systems with an ipaddress module")
with HTTPSConnectionPool(
"[::1]", self.port, cert_reqs="CERT_REQUIRED", ca_certs=IPV6_SAN_CA
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
| 37.035111
| 88
| 0.587956
|
4a01da108fbba1954570581b20ae0af114c97d8f
| 2,001
|
py
|
Python
|
GUI/GtkGI/Applications.py
|
gcewing/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 9
|
2019-07-15T19:03:27.000Z
|
2021-11-24T19:50:02.000Z
|
GUI/GtkGI/Applications.py
|
mnabeelp/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 3
|
2019-09-11T13:22:10.000Z
|
2020-08-19T20:13:00.000Z
|
GUI/GtkGI/Applications.py
|
mnabeelp/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 4
|
2020-02-23T16:50:06.000Z
|
2022-02-10T07:15:35.000Z
|
#
# Python GUI - Application class - Gtk
#
import sys
from gi.repository import Gtk, Gdk
from GUI.Globals import application
from GUI.GApplications import Application as GApplication
class Application(GApplication):
_in_gtk_main = 0
def run(self):
GApplication.run(self)
def set_menus(self, menu_list):
GApplication.set_menus(self, menu_list)
for window in self._windows:
window._gtk_update_menubar()
# def handle_events(self):
# #print "Application.handle_events: entering Gtk.main" ###
# _call_with_excepthook(Gtk.main, Gtk.main_quit)
# #print "Application.handle_events: returned from Gtk.main" ###
def handle_next_event(self, modal_window = None):
_call_with_excepthook(Gtk.main_iteration)
# def _quit(self):
# self._quit_flag = True
# Gtk.main_quit()
# def _exit_event_loop(self):
# Gtk.main_quit()
def get_target_window(self):
for window in self._windows:
if window._gtk_outer_widget.has_toplevel_focus():
return window
return None
def zero_windows_allowed(self):
return 0
def query_clipboard(self):
return _gtk_clipboard.wait_is_text_available()
def get_clipboard(self):
return _gtk_clipboard.wait_for_text()
def set_clipboard(self, data):
_gtk_clipboard.set_text(data, len(data))
#------------------------------------------------------------------------------
CLIPBOARD = Gdk.atom_intern("CLIPBOARD", False)
_gtk_clipboard = Gtk.Clipboard.get(CLIPBOARD)
#------------------------------------------------------------------------------
def _call_with_excepthook(proc, breakout = None):
# This function arranges for exceptions to be propagated
# across calls to the Gtk event loop functions.
exc_info = []
def excepthook(*args):
exc_info[:] = args
if breakout:
breakout()
old_excepthook = sys.excepthook
try:
sys.excepthook = excepthook
proc()
finally:
sys.excepthook = old_excepthook
if exc_info:
#print "_call_with_excepthook: raising", exc_info ###
raise exc_info[0], exc_info[1], exc_info[2]
| 24.703704
| 79
| 0.689155
|
4a01db9c0ba8c0ffc15524da90e21c23dc743e9f
| 7,110
|
py
|
Python
|
tests/chainer_tests/functions_tests/connection_tests/test_dilated_convolution_2d.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 7
|
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
tests/chainer_tests/functions_tests/connection_tests/test_dilated_convolution_2d.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/chainer_tests/functions_tests/connection_tests/test_dilated_convolution_2d.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer.functions.connection import dilated_convolution_2d
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'c_contiguous': [True, False],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}) + testing.product({
'c_contiguous': [False],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestDilatedConvolution2DFunction(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 2
self.dilate = 2
self.use_cudnn = True
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, out_channels).astype(self.x_dtype)
self.x = numpy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.x_dtype)
if self.cover_all:
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 3, 2)).astype(self.x_dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
@attr.gpu
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
y_cpu = functions.dilated_convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
dilate=self.dilate, use_cudnn=self.use_cudnn,
cover_all=self.cover_all)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
y_gpu = functions.dilated_convolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
dilate=self.dilate, use_cudnn=self.use_cudnn,
cover_all=self.cover_all)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
@attr.gpu
def test_forward_consistency_im2col(self):
self.use_cudnn = False
self.test_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.use_cudnn = False
self.test_forward_consistency(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad):
xp = cuda.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
gradient_check.check_backward(
dilated_convolution_2d.DilatedConvolution2DFunction(
self.stride, self.pad, self.dilate,
self.use_cudnn, self.cover_all),
args, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.use_cudnn = False
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col_nobias(self):
self.use_cudnn = False
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestDilatedConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 2
self.dilate = 2
self.x = cuda.cupy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.dtype)
self.expect = self.use_cudnn and (
cuda.cudnn.cudnn.getVersion() >= 3000 or
self.dtype != numpy.float16)
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return functions.dilated_convolution_2d(
x, W, None, stride=self.stride, pad=self.pad, dilate=self.dilate,
use_cudnn=self.use_cudnn)
def test_call_cudnn_forward(self):
with mock.patch('cupy.cudnn.cudnn.convolutionForward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backrward(self):
y = self.forward()
y.grad = self.gy
if cuda.cudnn.cudnn.getVersion() >= 4000:
name = 'cupy.cudnn.cudnn.convolutionBackwardData_v3'
else:
name = 'cupy.cudnn.cudnn.convolutionBackwardData_v2'
with mock.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 35.728643
| 77
| 0.611533
|
4a01dcbaabb698b9a9974cc29fdb37a240de8d90
| 7,677
|
py
|
Python
|
blueprints/genome_search.py
|
Ensembl/ensembl-2020-genome-search
|
ed0877ee3eedac135ac84809e9c44429a758ac82
|
[
"Apache-2.0"
] | 2
|
2020-02-06T13:25:53.000Z
|
2020-07-31T05:34:39.000Z
|
blueprints/genome_search.py
|
Ensembl/ensembl-2020-genome-search
|
ed0877ee3eedac135ac84809e9c44429a758ac82
|
[
"Apache-2.0"
] | 15
|
2019-07-09T16:30:13.000Z
|
2021-10-06T10:30:10.000Z
|
blueprints/genome_search.py
|
Ensembl/ensembl-2020-genome-search
|
ed0877ee3eedac135ac84809e9c44429a758ac82
|
[
"Apache-2.0"
] | 3
|
2020-06-23T17:21:37.000Z
|
2021-03-26T12:38:43.000Z
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Blueprint, jsonify, make_response, abort, request
from flask import current_app as app
from flask_restful import Resource, Api, reqparse
from resources.ensembl_indexer import Tokenize
search_bp = Blueprint('genome_search', __name__)
api = Api(search_bp)
class Search(Resource):
def get(self, **kwargs):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('query', type=str, required=True, help="Missing 'query' param in the request.", location='args')
parser.add_argument('division', type=str, location='args')
parser.add_argument('exclude', type=str, location='args', action='append')
parser.add_argument('limit', type=int, location='args')
self.args = parser.parse_args()
# print(self.args)
tokenize = Tokenize()
self.tokens = tokenize.create_tokens(self.args.query)
# Split user query to work on each word
self.query_words = self.args.query.split()
grouped_by_match_position = {}
total_matches = 0
genome_keys = self._get_genome_keys()
for genome_key in genome_keys:
genome = self._get_genome(genome_key)
# Skip if genome search hit does not belong to user supplied division
if self.args.division is not None:
if not self._check_if_belongs_to_division(genome):
continue
# Skip if search hit is for a genome which is in exclusion list
if self.args.exclude is not None:
if self._check_if_in_exclude_list(genome):
continue
matched_positions = self._locate_match_positions(genome)
if any(matched_positions):
grouped_by_match_position = self._group_by_match_position(grouped_by_match_position, matched_positions)
total_matches += 1
response = self._prepare_response(grouped_by_match_position, total_matches)
return make_response(jsonify(response), 200)
def _get_genome_keys(self):
# Get Genome keys for every token in user query
genome_keys = []
for token in self.tokens:
genome_keys_of_token = app.indexes.search(token)
if genome_keys_of_token is None:
genome_keys.append(set())
else:
genome_keys.append(set(genome_keys_of_token))
# Create a set of Genome keys which are common to all the tokens in a user query
if any(genome_keys):
return set.intersection(*genome_keys)
else:
return set()
def _get_genome(self, genome_key):
return app.genome_store.get_genome(genome_key)
def _check_if_belongs_to_division(self, genome):
if self.args.division in [*app.config['VALID_DIVISIONS'].values()]:
if self.args.division in genome['division']:
return True
else:
return False
else:
return abort(400, {'error': 'Invalid division filter. Use values from {}'.format(', '.join([*app.config['VALID_DIVISIONS'].values()]))})
def _check_if_in_exclude_list(self, genome):
if genome.get('genome_id') in self.args.exclude:
return True
else:
return False
def _locate_match_positions(self, genome):
genome_with_matched_positions = {}
for genome_key_to_index in app.config['KEYS_TO_INDEX']:
if genome[genome_key_to_index] is not None:
genome_name_words = genome[genome_key_to_index].split()
match_positions = {}
for query_word in self.query_words:
current_location = 0
for nth_word in range(len(genome_name_words)):
if query_word.lower() == genome_name_words[nth_word][:len(query_word)].lower():
# print('******Match*******')
match_positions.setdefault('match_in_nth_word', set()).add(nth_word + 1)
match_positions.setdefault('offsets', {})
if current_location not in match_positions['offsets'].keys() or \
match_positions['offsets'][current_location] < len(query_word):
match_positions['offsets'].update({current_location: len(query_word)})
# +1 for white space
current_location = current_location + len(genome_name_words[nth_word]) + 1
if any(match_positions):
genome_with_matched_positions.setdefault('matches_info', {}).update({genome_key_to_index: match_positions})
genome_with_matched_positions['genome_info'] = genome
return genome_with_matched_positions
def _group_by_match_position(self, grouped_by_match_position, genome_with_matched_positions):
group_number = min(list(set.union(*[match_info['match_in_nth_word'] for match_info in genome_with_matched_positions['matches_info'].values()])))
grouped_by_match_position.setdefault(group_number, []).append(genome_with_matched_positions)
return grouped_by_match_position
def _prepare_response(self, grouped_genomes, total_matches):
response = {}
genome_hit_count = 0
for group_number, genomes in sorted(grouped_genomes.items()):
group = []
for genome in genomes:
genome_hit_count += 1
if self.args.limit is not None and genome_hit_count > self.args.limit:
continue
genome_hit = dict(
genome_id=genome['genome_info']['genome_id'],
reference_genome_id=genome['genome_info']['reference_genome_id'],
common_name=genome['genome_info']['common_name'],
scientific_name=genome['genome_info']['scientific_name'],
assembly_name=genome['genome_info']['assembly_name'],
)
for match_in_genome_name_type, match_info in genome['matches_info'].items():
for offset, length in match_info['offsets'].items():
matched_substring = dict(
length=length,
offset=offset,
match=match_in_genome_name_type,
)
genome_hit.setdefault('matched_substrings', []).append(matched_substring)
group.append(genome_hit)
if group:
response.setdefault('genome_matches', []).append(group)
# If there are no matches, just respond with an empty list
if 'genome_matches' not in response:
response.setdefault('genome_matches', [])
response['total_hits'] = total_matches
return response
api.add_resource(Search, '/')
| 40.619048
| 152
| 0.621206
|
4a01dcc1e78709b99f149f9bc73b215b59dc3549
| 1,548
|
py
|
Python
|
SC001/Assignment4/best_photoshop_award.py
|
elven-liu/stanCode-projects
|
e42f84d3b7bb7422e8d755d9e81cb24cc5625670
|
[
"MIT"
] | null | null | null |
SC001/Assignment4/best_photoshop_award.py
|
elven-liu/stanCode-projects
|
e42f84d3b7bb7422e8d755d9e81cb24cc5625670
|
[
"MIT"
] | null | null | null |
SC001/Assignment4/best_photoshop_award.py
|
elven-liu/stanCode-projects
|
e42f84d3b7bb7422e8d755d9e81cb24cc5625670
|
[
"MIT"
] | null | null | null |
"""
File: best_photoshop_award.py
----------------------------------
This file creates a photoshopped image
that is going to compete for the Best
Photoshop Award for SC001.
Please put all the images you will use in the image_contest folder
and make sure to choose the right folder when loading your images.
"""
from simpleimage import SimpleImage
# Controls the threshold of detecting green screen pixel
THRESHOLD = 1.15
# Controls the upper bound for black pixel
BLACK_PIXEL = 120
def combine(bg, me):
"""
: param1 bg: SimpleImage, the background image
: param2 ma: SimpleImage, green screen figure image
: return me: SimpleImage, the green screen pixels are replaced by pixels of background image
"""
for y in range(bg.height):
for x in range(bg.width):
pixel_me = me.get_pixel(x, y)
avg = (pixel_me.red+pixel_me.blue+pixel_me.green) // 3
total = pixel_me.red+pixel_me.blue+pixel_me.green
if pixel_me.green > avg*THRESHOLD and total > BLACK_PIXEL:
pixel_bg = bg.get_pixel(x, y)
pixel_me.red = pixel_bg.red
pixel_me.blue = pixel_bg.blue
pixel_me.green = pixel_bg.green
return me
def main():
"""
This program is to photoshop my picture with a gorilla.
"""
fg = SimpleImage('image_contest/Elven.jpeg')
bg = SimpleImage('image_contest/Godzilla.jpeg')
bg.make_as_big_as(fg)
combined_img = combine(bg, fg)
combined_img.show()
if __name__ == '__main__':
main()
| 30.352941
| 96
| 0.656331
|
4a01dcda937fa8a9980d1129747452d02d95d030
| 10,035
|
py
|
Python
|
core/python/spirit/hamiltonian.py
|
MSallermann/spirit
|
d3b771bcbf2f1eb4b28d48899091c17a48f12c67
|
[
"MIT"
] | 46
|
2020-08-24T22:40:15.000Z
|
2022-02-28T06:54:54.000Z
|
core/python/spirit/hamiltonian.py
|
MSallermann/spirit
|
d3b771bcbf2f1eb4b28d48899091c17a48f12c67
|
[
"MIT"
] | null | null | null |
core/python/spirit/hamiltonian.py
|
MSallermann/spirit
|
d3b771bcbf2f1eb4b28d48899091c17a48f12c67
|
[
"MIT"
] | 4
|
2020-09-05T13:24:41.000Z
|
2021-11-06T07:46:47.000Z
|
"""
Hamiltonian
====================
Set the parameters of the Heisenberg Hamiltonian, such as external field or exchange interaction.
"""
import spirit.spiritlib as spiritlib
import ctypes
### Load Library
_spirit = spiritlib.load_spirit_library()
### DM vector chirality
CHIRALITY_BLOCH = 1
"""DMI Bloch chirality type for neighbour shells"""
CHIRALITY_NEEL = 2
"""DMI Neel chirality type for neighbour shells"""
CHIRALITY_BLOCH_INVERSE = -1
"""DMI Bloch chirality type for neighbour shells with opposite sign"""
CHIRALITY_NEEL_INVERSE = -2
"""DMI Neel chirality type for neighbour shells with opposite sign"""
### DDI METHOD
DDI_METHOD_NONE = 0
"""Dipole-dipole interaction: do not calculate"""
DDI_METHOD_FFT = 1
"""Dipole-dipole interaction: use FFT convolutions"""
DDI_METHOD_FMM = 2
"""Dipole-dipole interaction: use a fast multipole method (FMM)"""
DDI_METHOD_CUTOFF = 3
"""Dipole-dipole interaction: use a direct summation with a cutoff radius"""
### ---------------------------------- Set ----------------------------------
_Set_Boundary_Conditions = _spirit.Hamiltonian_Set_Boundary_Conditions
_Set_Boundary_Conditions.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool),
ctypes.c_int, ctypes.c_int]
_Set_Boundary_Conditions.restype = None
def set_boundary_conditions(p_state, boundaries, idx_image=-1, idx_chain=-1):
"""Set the boundary conditions along the translation directions [a, b, c].
0 = open, 1 = periodical
"""
bool3 = ctypes.c_bool * 3
_Set_Boundary_Conditions(ctypes.c_void_p(p_state), bool3(*boundaries),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Field = _spirit.Hamiltonian_Set_Field
_Set_Field.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.POINTER(ctypes.c_float),
ctypes.c_int, ctypes.c_int]
_Set_Field.restype = None
def set_field(p_state, magnitude, direction, idx_image=-1, idx_chain=-1):
"""Set the (homogeneous) external magnetic field."""
vec3 = ctypes.c_float * 3
_Set_Field(ctypes.c_void_p(p_state), ctypes.c_float(magnitude), vec3(*direction),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Anisotropy = _spirit.Hamiltonian_Set_Anisotropy
_Set_Anisotropy.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.POINTER(ctypes.c_float),
ctypes.c_int, ctypes.c_int]
_Set_Anisotropy.restype = None
def set_anisotropy(p_state, magnitude, direction, idx_image=-1, idx_chain=-1):
"""Set the (homogeneous) magnetocrystalline anisotropy."""
vec3 = ctypes.c_float * 3
_Set_Anisotropy(ctypes.c_void_p(p_state), ctypes.c_float(magnitude), vec3(*direction),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Exchange = _spirit.Hamiltonian_Set_Exchange
_Set_Exchange.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
ctypes.c_int, ctypes.c_int]
_Set_Exchange.restype = None
def set_exchange(p_state, n_shells, J_ij, idx_image=-1, idx_chain=-1):
"""Set the Exchange interaction in terms of neighbour shells."""
vec = ctypes.c_float * n_shells
_Set_Exchange(ctypes.c_void_p(p_state), ctypes.c_int(n_shells), vec(*J_ij),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_DMI = _spirit.Hamiltonian_Set_DMI
_Set_DMI.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Set_DMI.restype = None
def set_dmi(p_state, n_shells, D_ij, chirality=CHIRALITY_BLOCH, idx_image=-1, idx_chain=-1):
"""Set the Dzyaloshinskii-Moriya interaction in terms of neighbour shells."""
vec = ctypes.c_float * n_shells
_Set_DMI(ctypes.c_void_p(p_state), ctypes.c_int(n_shells), vec(*D_ij),
ctypes.c_int(chirality), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
# Micromagnetics
_Set_Ms = _spirit.Hamiltonian_Set_Field_Regions
_Set_Ms.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Set_Ms.restype = None
def set_ms(p_state, Ms, region_id, idx_image=-1, idx_chain=-1):
"""Set the (homogeneous) external magnetic field."""
_Set_Ms(ctypes.c_void_p(p_state), ctypes.c_float(Ms), ctypes.c_int(region_id),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Field_m = _spirit.Hamiltonian_Set_Field_Regions
_Set_Field_m.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.POINTER(ctypes.c_float), ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Set_Field_m.restype = None
def set_field_m(p_state, magnitude, normal, region_id, idx_image=-1, idx_chain=-1):
"""Set the (homogeneous) external magnetic field."""
vec3 = ctypes.c_float * 3
_Set_Field_m(ctypes.c_void_p(p_state), ctypes.c_float(magnitude), vec3(*normal), ctypes.c_int(region_id),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Anisotropy_m = _spirit.Hamiltonian_Set_Anisotropy_Regions
_Set_Anisotropy_m.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.POINTER(ctypes.c_float), ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Set_Anisotropy_m.restype = None
def set_anisotropy_m(p_state, magnitude, normal, region_id, idx_image=-1, idx_chain=-1):
"""Set the (homogeneous) magnetocrystalline anisotropy."""
vec3 = ctypes.c_float * 3
_Set_Anisotropy_m(ctypes.c_void_p(p_state), ctypes.c_float(magnitude), vec3(*normal), ctypes.c_int(region_id),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_Exchange_m = _spirit.Hamiltonian_Set_Exchange_Tensor
_Set_Exchange_m.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Set_Exchange_m.restype = None
def set_exchange_m(p_state, exchange_tensor, region_id, idx_image=-1, idx_chain=-1):
"""Set the Exchange interaction in terms of neighbour shells."""
_Set_Exchange_m(ctypes.c_void_p(p_state), ctypes.c_float(exchange_tensor), ctypes.c_int(region_id),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_DMI_m = _spirit.Hamiltonian_Set_DMI_Tensor
_Set_DMI_m.argtypes = [ctypes.c_void_p, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_Set_DMI_m.restype = None
def set_dmi_m(p_state, dmi_tensor, region_id, idx_image=-1, idx_chain=-1):
"""Set the Dzyaloshinskii-Moriya interaction in terms of neighbour shells."""
_Set_DMI_m(ctypes.c_void_p(p_state), ctypes.c_float(dmi_tensor), ctypes.c_int(region_id),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Set_DDI = _spirit.Hamiltonian_Set_DDI
_Set_DDI.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.c_float,
ctypes.c_int, ctypes.c_int]
_Set_DDI.restype = None
def set_ddi(p_state, ddi_method, n_periodic_images=[4,4,4], radius=0.0, idx_image=-1, idx_chain=-1):
"""Set the dipolar interaction calculation method.
- `ddi_method`: one of the integers defined above
- `n_periodic_images`: the number of periodical images in the three translation directions,
taken into account when boundaries in the corresponding direction are periodical
- `radius`: the cutoff radius for the direct summation method
"""
vec3 = ctypes.c_int * 3
_Set_DDI(ctypes.c_void_p(p_state), ctypes.c_int(ddi_method) , vec3(*n_periodic_images), ctypes.c_float(radius),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
### ---------------------------------- Get ----------------------------------
_Get_Name = _spirit.Hamiltonian_Get_Name
_Get_Name.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_Name.restype = ctypes.c_char_p
def get_name(p_state, idx_image=-1, idx_chain=-1):
"""Returns a string containing the name of the Hamiltonian currently in use."""
return str(_Get_Name(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
_Get_Boundary_Conditions = _spirit.Hamiltonian_Get_Boundary_Conditions
_Get_Boundary_Conditions.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool),
ctypes.c_int, ctypes.c_int]
_Get_Boundary_Conditions.restype = None
def get_boundary_conditions(p_state, idx_image=-1, idx_chain=-1):
"""Returns an array of `shape(3)` containing the boundary conditions in the
three translation directions `[a, b, c]` of the lattice.
"""
boundaries = (3*ctypes.c_bool)()
_Get_Boundary_Conditions(ctypes.c_void_p(p_state), boundaries,
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
return [bc for bc in boundaries]
_Get_Field = _spirit.Hamiltonian_Get_Field
_Get_Field.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float), ctypes.c_int, ctypes.c_int]
_Get_Field.restype = None
def get_field(p_state, idx_image=-1, idx_chain=-1):
"""Returns the magnitude and an array of `shape(3)` containing the direction of
the external magnetic field.
"""
magnitude = (1*ctypes.c_float)()
normal = (3*ctypes.c_float)()
_Get_Field(ctypes.c_void_p(p_state), magnitude, normal,
ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
return float(magnitude), [n for n in normal]
_Get_DDI = _spirit.Hamiltonian_Get_DDI
_Get_DDI.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_DDI.restype = ctypes.c_float
def get_ddi(p_state, idx_image=-1, idx_chain=-1):
"""Returns the cutoff radius of the DDI."""
return float(_Get_DDI(ctypes.c_void_p(p_state), ctypes.c_int(idx_image),
ctypes.c_int(idx_chain)))
| 48.245192
| 115
| 0.688889
|
4a01dcfbb511321e882d8816fc55bf762a7cd589
| 2,019
|
py
|
Python
|
src/app/confusion_matrix_calculation_app.py
|
LiuYuWei/service-structure-data-extract
|
6a3d74cfd71e27364dc1cfc8d42787b72e0f738d
|
[
"Apache-2.0"
] | null | null | null |
src/app/confusion_matrix_calculation_app.py
|
LiuYuWei/service-structure-data-extract
|
6a3d74cfd71e27364dc1cfc8d42787b72e0f738d
|
[
"Apache-2.0"
] | null | null | null |
src/app/confusion_matrix_calculation_app.py
|
LiuYuWei/service-structure-data-extract
|
6a3d74cfd71e27364dc1cfc8d42787b72e0f738d
|
[
"Apache-2.0"
] | null | null | null |
"""Confusion matrix calculation app."""
# coding=utf-8
# import relation package.
# import project package.
from config.config_setting import ConfigSetting
from src.service.confusion_matrix_calculation_service import ConfusionMatrixCalculationService
class ConfusionMatrixCalculationApp:
"""Confusion matrix calculation app."""
def __init__(self):
"""Initial variable and module"""
config_setting = ConfigSetting()
self.log = config_setting.set_logger(
"[Confusion_matrix_calculation_app]")
self.config = config_setting.yaml_parser()
self.confusion_matrix_calculation_service = ConfusionMatrixCalculationService()
def get_confusion_matrix(self, y_true, y_pred):
payload = self.confusion_matrix_calculation_service.confusion_matrix_calculation(
y_true, y_pred)
return payload
def get_confusion_matrix_html(self, y_true, y_pred):
html = self.confusion_matrix_calculation_service.confusion_matrix_calculation_html(
y_true, y_pred)
return html
def confusion_matrix_to_accuracy_value(self, confusion_matrix):
payload = self.confusion_matrix_calculation_service.accuracy_calculation(
confusion_matrix=confusion_matrix)
return payload
def prediction_to_accuracy_value(self, y_true, y_pred):
confusion_matrix = self.get_confusion_matrix(y_true, y_pred)
payload = self.confusion_matrix_to_accuracy_value(confusion_matrix)
return payload
def confusion_matrix_to_precision_recall_value(self, confusion_matrix):
payload = self.confusion_matrix_calculation_service.precision_recall_calculation(
confusion_matrix=confusion_matrix)
return payload
def prediction_to_precision_recall_value(self, y_true, y_pred):
confusion_matrix = self.get_confusion_matrix(y_true, y_pred)
payload = self.confusion_matrix_to_precision_recall_value(
confusion_matrix)
return payload
| 39.588235
| 94
| 0.747895
|
4a01dd326d90f2d2c58ad325933bdbb91d8ca582
| 257
|
py
|
Python
|
WikiTerms/getterms.py
|
SIREN-DST/SeedURLWork
|
6e74adbf584bdf5a2fdfb4ac258af124917ea2c6
|
[
"MIT"
] | null | null | null |
WikiTerms/getterms.py
|
SIREN-DST/SeedURLWork
|
6e74adbf584bdf5a2fdfb4ac258af124917ea2c6
|
[
"MIT"
] | null | null | null |
WikiTerms/getterms.py
|
SIREN-DST/SeedURLWork
|
6e74adbf584bdf5a2fdfb4ac258af124917ea2c6
|
[
"MIT"
] | 2
|
2019-05-01T08:31:32.000Z
|
2020-06-28T14:33:19.000Z
|
f=open('NIST.IR.7298r2.txt')
g=open('terms','w')
i=0
for line in f:
k=''
s=line.decode('utf-8').strip()
if i==0:
c=s[-1]
print c
i+=1
for j in line:
try:
k+=j.encode('utf-8')
except:
if s[-1]==c:
g.write(k)
g.write('\n')
break
| 14.277778
| 31
| 0.521401
|
4a01dd48f27d2b0bb1465f87503ecc6edd2cc9a0
| 387
|
py
|
Python
|
cride/circles/migrations/0005_auto_20190227_2238.py
|
mpita/cride
|
5e8afb710d3ac704c9c3819e0f7ff762e60948ff
|
[
"MIT"
] | 1
|
2019-04-09T07:15:46.000Z
|
2019-04-09T07:15:46.000Z
|
cride/circles/migrations/0005_auto_20190227_2238.py
|
mpita/cride
|
5e8afb710d3ac704c9c3819e0f7ff762e60948ff
|
[
"MIT"
] | 7
|
2020-06-05T19:54:39.000Z
|
2022-03-11T23:41:06.000Z
|
cride/circles/migrations/0005_auto_20190227_2238.py
|
mpita/cride
|
5e8afb710d3ac704c9c3819e0f7ff762e60948ff
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.10 on 2019-02-27 22:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('circles', '0004_auto_20190226_2153'),
]
operations = [
migrations.RenameField(
model_name='membership',
old_name='userd_invitation',
new_name='used_invitation',
),
]
| 20.368421
| 48
| 0.609819
|
4a01dd8acbd5d161fe816414e429e1b875926216
| 825
|
py
|
Python
|
di/utils/inspection/module_factories/__init__.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | 8
|
2021-02-05T16:17:31.000Z
|
2022-03-03T00:01:33.000Z
|
di/utils/inspection/module_factories/__init__.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | null | null | null |
di/utils/inspection/module_factories/__init__.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | null | null | null |
from di.utils.inspection.module_factories.base import (
FactoryFilter,
FactoryFilterCascade,
FactoryItem,
)
from di.utils.inspection.module_factories.filters import (
AllFactoryFilter,
InternalsFactoryFilter,
InternalsOrAllFactoryFilter,
NonAbstractFactoryFilter,
NonDataclassFactoryFilter,
NonTypeFactoryFilter,
PublicFactoryFilter,
)
from di.utils.inspection.module_factories.inspector import ModuleFactoriesInspector
__all__ = [
# base
"FactoryFilter",
"FactoryFilterCascade",
"FactoryItem",
# filters
"AllFactoryFilter",
"InternalsFactoryFilter",
"InternalsOrAllFactoryFilter",
"NonAbstractFactoryFilter",
"NonDataclassFactoryFilter",
"NonTypeFactoryFilter",
"PublicFactoryFilter",
# inspector
"ModuleFactoriesInspector",
]
| 25
| 83
| 0.749091
|
4a01ddd4c6bf6871f305746a700326ae59d3bf7e
| 3,134
|
py
|
Python
|
tracker/filters/ball_kalman_filter.py
|
RoboCupULaval/TrackBots
|
4cf272a32a1933613a904a170d6fe84a9583ee02
|
[
"MIT"
] | 1
|
2017-08-21T19:58:00.000Z
|
2017-08-21T19:58:00.000Z
|
tracker/filters/ball_kalman_filter.py
|
RoboCupULaval/TrackBots
|
4cf272a32a1933613a904a170d6fe84a9583ee02
|
[
"MIT"
] | null | null | null |
tracker/filters/ball_kalman_filter.py
|
RoboCupULaval/TrackBots
|
4cf272a32a1933613a904a170d6fe84a9583ee02
|
[
"MIT"
] | 1
|
2020-08-07T01:13:48.000Z
|
2020-08-07T01:13:48.000Z
|
from tracker.filters.kalman_filter import KalmanFilter
import numpy as np
from tracker.constants import BallConst
class BallFilter(KalmanFilter):
INITIAL_CONFIDENCE = BallConst.INITIAL_CONFIDENCE
INITIAL_STATE_COVARIANCE = BallConst.INITIAL_STATE_COVARIANCE
POSITION_PROCESS_COVARIANCE = BallConst.POSITION_PROCESS_COVARIANCE
VELOCITY_PROCESS_COVARIANCE = BallConst.VELOCITY_PROCESS_COVARIANCE
POSITION_OBSERVATION_COVARIANCE = BallConst.POSITION_OBSERVATION_COVARIANCE
def __init__(self):
self.confidence = BallFilter.INITIAL_CONFIDENCE
super().__init__()
@property
def pose(self):
if self.is_active:
return np.array([self.x[0], self.x[2]]).flatten()
@property
def velocity(self):
if not self.is_active:
return None
else:
return np.array([self.x[1], self.x[3]]).flatten()
def transition_model(self, dt=0):
return np.array([[1, dt, 0, 0], # Position x
[0, 1, 0, 0], # Speed x
[0, 0, 1, dt], # Position y
[0, 0, 0, 1]]) # Speed y
def observation_model(self):
return np.array([[1, 0, 0, 0], # Position x
[0, 0, 1, 0]]) # Position y
def control_input_model(self, dt=0):
return np.zeros((self.state_number, self.observable_state))
def initial_state_covariance(self):
return BallFilter.INITIAL_STATE_COVARIANCE * np.eye(self.state_number)
def process_covariance(self):
return np.diag([BallFilter.POSITION_PROCESS_COVARIANCE,
BallFilter.VELOCITY_PROCESS_COVARIANCE,
BallFilter.POSITION_PROCESS_COVARIANCE,
BallFilter.VELOCITY_PROCESS_COVARIANCE])
def observation_covariance(self):
return BallFilter.POSITION_OBSERVATION_COVARIANCE * np.eye(self.observable_state)
def update(self, observation, t_capture):
self.is_active = True
self._increase_confidence()
dt = t_capture - self.last_t_capture
if dt < 0:
return
self.last_t_capture = t_capture
self.last_observation = observation
self.F = self.transition_model(dt)
y = observation - np.dot(self.H, self.x)
self.S = np.dot(np.dot(self.H, self.P), self.H.T) + self.R
K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(self.S))
self.x = self.x + np.dot(K, y)
self.P = np.dot((np.eye(self.state_number) - np.dot(K, self.H)), self.P)
def predict(self, dt=0):
self._decrease_confidence()
self.F = self.transition_model(dt)
self.B = self.control_input_model(dt)
self.x = np.dot(self.F, self.x) + np.dot(self.B, self.u)
self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
def _increase_confidence(self):
self.confidence += 1
if self.confidence > 100:
self.confidence = 100
def _decrease_confidence(self):
self.confidence *= 0.95
if self.confidence < 0:
self.confidence = 0
| 33.340426
| 89
| 0.619336
|
4a01dfaf1d431a38d03737369b852e8918b49bed
| 7,944
|
py
|
Python
|
classifier/train_model.py
|
juandiegomarquez/hate-speech-and-offensive-language
|
865bb58052f84c132d4830b53ebc9f6034d9e28f
|
[
"MIT"
] | null | null | null |
classifier/train_model.py
|
juandiegomarquez/hate-speech-and-offensive-language
|
865bb58052f84c132d4830b53ebc9f6034d9e28f
|
[
"MIT"
] | null | null | null |
classifier/train_model.py
|
juandiegomarquez/hate-speech-and-offensive-language
|
865bb58052f84c132d4830b53ebc9f6034d9e28f
|
[
"MIT"
] | null | null | null |
# %% [markdown]
# # Hate Speech Model
import pandas as pd
import numpy as np
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import corpus
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
import helpers
df = pd.read_csv('../data/labeled_data.csv')
tweets = df.tweet
# %% [markdown]
# ## Part 1: Compute full feature matrix
# Features: <br>
# - tfidf character n-gram values:
# - character unigrams
# - character bigrams
# - character trigrams
# - tfidf part of speech values:
# - pos unigrams
# - pos bigrams
# - pos trigrams
# - miscellaneous features:
# - FKRA (Flesch-Kincaid Reading Age)
# - (0.39 x num_words) + (11.8 x avg_syl) - 15.59
# - FRE (Flesch Reading Ease)
# - 206.835 - (1.015 x num_words) - (84.6 x avg_syl)
# - total number of syllables
# - avg number of syllables
# - number of characters in words (excludes spaces, punctuation, etc)
# - total number of characters in document
# - number of terms (includes non-word terms such as punctuation, emojis, etc)
# - number of words
# - number of unique terms
# - vader negative sentiment
# - vader positive sentiment
# - vader neutral sentiment
# - vader compound sentiment
# - number of hashtags
# - number of urls
# - number of mentions
# - boolean retweet/non-retweet
stopwords = corpus.stopwords.words("english")
additional_exclusions = ["#ff", "ff", "rt"]
stopwords.extend(additional_exclusions)
ngram_vectorizer = TfidfVectorizer(
tokenizer=helpers.tokenize,
preprocessor=helpers.preprocess,
ngram_range=(1, 3),
stop_words=stopwords,
use_idf=True,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=10000,
min_df=5,
max_df=0.501
)
# Tfidf feature matrix
# Also save idf feature matrix
ngram_features = ngram_vectorizer.fit_transform(tweets).toarray()
ngram_vocab = {v:i for i, v in enumerate(ngram_vectorizer.get_feature_names())}
ngram_idf_values = ngram_vectorizer.idf_
# Compute parts of speech (pos) for all tweets
tweet_tags = helpers.get_pos_tags(tweets)
# Use the TFIDF vectorizer to get a token matrix for the POS tags
pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, 3),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=5000,
min_df=5,
max_df=0.501,
)
# POS feature matrix
pos_features = pos_vectorizer.fit_transform(pd.Series(tweet_tags)).toarray()
pos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())}
misc_feature_names = ["FKRA",
"FRE",
"num_syllables",
"avg_syl_per_word",
"num_chars",
"num_chars_total",
"num_terms",
"num_words",
"num_unique_terms",
"sentiment_neg",
"sentiment_pos",
"sentiment_neu",
"sentiment_compound",
"num_hashtags",
"num_mentions",
"num_urls",
"is_retweet"
]
misc_features = helpers.get_miscellaneous_features(tweets, features = misc_feature_names)
# Complete feature matrix
feature_matrix = np.concatenate([ngram_features,
pos_features,
misc_features
] ,axis=1)
ngram_feature_names = ['']*len(ngram_vocab)
for k,v in ngram_vocab.items():
ngram_feature_names[v] = k
pos_feature_names = ['']*len(pos_vocab)
for k,v in pos_vocab.items():
pos_feature_names[v] = k
feature_names = ngram_feature_names + pos_feature_names + misc_feature_names
X = pd.DataFrame(feature_matrix)
y = df['class'].astype(int)
# %% [markdown]
# ## Part 2: Feature Selection and Train Model
feature_selector = SelectFromModel(
LogisticRegression(
class_weight='balanced',
penalty='l2',
C=0.01
)
)
X_selected = feature_selector.fit_transform(X,y)
model = LinearSVC(class_weight='balanced',
C=0.01,
penalty='l2',
loss='squared_hinge',
multi_class='ovr'
).fit(X_selected, y)
y_preds = model.predict(X_selected)
report = classification_report(y, y_preds)
print(report)
# Feature indices
final_feature_indices = feature_selector.get_support(indices=True)
# Feature names
final_feature_list = [str(feature_names[i]) for i in final_feature_indices]
# %% [markdown]
# # Part 3: Package models
# Above we computed a full feature matrix, selected the best features, <br>
# trained a model, and generated a report to evaluate the model. <br>
# Below we will train a model with only the selected features from the <br>
# start and regenerate the report to verify the results are the same.
select_ngram_features = final_feature_list[:final_feature_list.index('z z z')+1]
select_pos_features = final_feature_list[final_feature_list.index('z z z')+1:final_feature_list.index('VBZ DT JJ')+1]
select_misc_features = final_feature_list[final_feature_list.index('VBZ DT JJ')+1:]
with open("../pickled_models/final_misc_features.pkl", "wb") as file:
pickle.dump(select_misc_features, file)
select_ngram_vocab = {v:i for i, v in enumerate(select_ngram_features)}
select_ngram_indices = final_feature_indices[:len(select_ngram_features)]
select_ngram_vectorizer = TfidfVectorizer(
tokenizer=helpers.tokenize,
preprocessor=helpers.preprocess,
ngram_range=(1, 3),
stop_words=stopwords,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
min_df=1,
max_df=1.0,
vocabulary=select_ngram_vocab
)
with open('../pickled_models/ngram_model.pkl',"wb") as file:
pickle.dump(select_ngram_vectorizer, file)
select_ngram_features = select_ngram_vectorizer.fit_transform(tweets).toarray()
select_ngram_idf_values = ngram_idf_values[select_ngram_indices]
with open('../pickled_models/idf_model.pkl',"wb") as file:
pickle.dump(select_ngram_idf_values, file)
final_ngram_features = select_ngram_features * select_ngram_idf_values
select_pos_vocab = {v:i for i, v in enumerate(select_pos_features)}
select_pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, 3),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
min_df=1,
max_df=1.0,
vocabulary=select_pos_vocab
)
with open('../pickled_models/pos_model.pkl',"wb") as file:
pickle.dump(select_pos_vectorizer, file)
final_pos_features = select_pos_vectorizer.fit_transform(tweet_tags).toarray()
final_misc_features = helpers.get_miscellaneous_features(tweets,
features=select_misc_features)
final_feature_matrix = np.concatenate([final_ngram_features,
final_pos_features,
final_misc_features
], axis=1)
X_final = pd.DataFrame(final_feature_matrix)
y_preds_final = model.predict(X_final)
report = classification_report(y, y_preds_final)
print(report)
# %% [markdown]
# # Part 4: Save Model
with open('../pickled_models/final_model.pkl',"wb") as file:
pickle.dump(model, file)
# %%
| 31.776
| 117
| 0.64854
|
4a01dfec96e4fc93f17e20190b1db79d7849fedc
| 1,774
|
py
|
Python
|
examples/pybullet/gym/pybullet_envs/deep_mimic/learning/exp_params.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 9,136
|
2015-01-02T00:41:45.000Z
|
2022-03-31T15:30:02.000Z
|
examples/pybullet/gym/pybullet_envs/deep_mimic/learning/exp_params.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,424
|
2015-01-05T08:55:58.000Z
|
2022-03-30T19:34:55.000Z
|
examples/pybullet/gym/pybullet_envs/deep_mimic/learning/exp_params.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,921
|
2015-01-02T10:19:30.000Z
|
2022-03-31T02:48:42.000Z
|
import json
import numpy as np
import pybullet_utils.math_util as MathUtil
class ExpParams(object):
RATE_KEY = 'Rate'
INIT_ACTION_RATE_KEY = 'InitActionRate'
NOISE_KEY = 'Noise'
NOISE_INTERNAL_KEY = 'NoiseInternal'
TEMP_KEY = 'Temp'
def __init__(self):
self.rate = 0.2
self.init_action_rate = 0
self.noise = 0.1
self.noise_internal = 0
self.temp = 0.1
return
def __str__(self):
str = ''
str += '{}: {:.2f}\n'.format(self.RATE_KEY, self.rate)
str += '{}: {:.2f}\n'.format(self.INIT_ACTION_RATE_KEY, self.init_action_rate)
str += '{}: {:.2f}\n'.format(self.NOISE_KEY, self.noise)
str += '{}: {:.2f}\n'.format(self.NOISE_INTERNAL_KEY, self.noise_internal)
str += '{}: {:.2f}\n'.format(self.TEMP_KEY, self.temp)
return str
def load(self, json_data):
if (self.RATE_KEY in json_data):
self.rate = json_data[self.RATE_KEY]
if (self.INIT_ACTION_RATE_KEY in json_data):
self.init_action_rate = json_data[self.INIT_ACTION_RATE_KEY]
if (self.NOISE_KEY in json_data):
self.noise = json_data[self.NOISE_KEY]
if (self.NOISE_INTERNAL_KEY in json_data):
self.noise_internal = json_data[self.NOISE_INTERNAL_KEY]
if (self.TEMP_KEY in json_data):
self.temp = json_data[self.TEMP_KEY]
return
def lerp(self, other, t):
lerp_params = ExpParams()
lerp_params.rate = MathUtil.lerp(self.rate, other.rate, t)
lerp_params.init_action_rate = MathUtil.lerp(self.init_action_rate, other.init_action_rate, t)
lerp_params.noise = MathUtil.lerp(self.noise, other.noise, t)
lerp_params.noise_internal = MathUtil.lerp(self.noise_internal, other.noise_internal, t)
lerp_params.temp = MathUtil.log_lerp(self.temp, other.temp, t)
return lerp_params
| 31.678571
| 98
| 0.692221
|
4a01e001a17d48f24c245b508b3ee18cf59e3f01
| 12,068
|
py
|
Python
|
plugin.py
|
heggink/domoticz-icloud
|
a6c68f83e09ae5c8f248892d8ea7433d8aeb3cdd
|
[
"MIT"
] | 1
|
2018-09-04T06:50:17.000Z
|
2018-09-04T06:50:17.000Z
|
plugin.py
|
heggink/domoticz-icloud
|
a6c68f83e09ae5c8f248892d8ea7433d8aeb3cdd
|
[
"MIT"
] | 2
|
2018-10-29T07:54:46.000Z
|
2019-07-02T19:50:17.000Z
|
plugin.py
|
heggink/domoticz-icloud
|
a6c68f83e09ae5c8f248892d8ea7433d8aeb3cdd
|
[
"MIT"
] | null | null | null |
"""
<plugin key="AppleDevices" name="Apple iCloud Device Presence" author="heggink" version="0.0.2">
<params>
<param field="Username" label="Apple user name" width="150px" required="true" default="username"/>
<param field="Password" label="Apple Password" width="150px" required="true" default="password"/>
<param field="Mode1" label="Apple device name" width="150px" required="true" default="device"/>
<param field="Mode2" label="Poll period (s)" width="150px" required="true" default="30"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
#import datetime
from pyicloud import PyiCloudService
from geopy.distance import vincenty
import time
#v0.0.1:
class BasePlugin:
def __init__(self):
self.deviceName = ' '
self.username = ''
self.password = ''
self.id = ''
self.pollPeriod = 0
self.pollCount = 0
self.home = (0, 0)
self.circleLatitude = 0
self.circleLongitude = 0
self.away = (1, 1)
self.mindist = 0.1
self.lastloc = 0
self.lastdist = 100000000
self.difdist = 0
self.count = 0
return
def onStart(self):
Domoticz.Log("onStart called")
if (Parameters["Mode6"] == "Debug"):
Domoticz.Debugging(1)
Domoticz.Debug('devicename established: '+ self.deviceName)
self.deviceName = Parameters["Mode1"]
if ("iCloud" not in Images):
Domoticz.Debug('Icons Created...')
Domoticz.Image('iCloud.zip').Create()
iconPID = Images["iCloud"].ID
self.username=Parameters["Username"]
self.password=Parameters["Password"]
# Get the location from the Settings
if not "Location" in Settings:
Domoticz.Log("Location not set in Preferences")
return False
# The location is stored in a string in the Settings
loc = Settings["Location"].split(";")
self.home = (float(loc[0]), float(loc[1]))
Domoticz.Debug("Coordinates from Domoticz: " + str(loc[0]) + ";" + str(loc[1]))
if loc == None or loc[0] == None:
Domoticz.Log("Unable to parse coordinates")
return False
#
# initialise the icloud service and create device if icloud knows it
#
api = PyiCloudService(self.username, self.password)
if api == None:
Domoticz.Log('Error Authenticating iCloud or Connection Problem...')
Domoticz.Log('Please Use Correct Credentials and Restart The Plugin!')
else:
for rdev in api.devices:
dev = str(rdev)
Domoticz.Debug('Iterating device: [' + dev + ']' + ' to find [' + self.deviceName + ']')
if self.deviceName in dev:
if 1 and 2 not in Devices:
Domoticz.Debug(dev + ' matches ' + self.deviceName)
Domoticz.Device(Name='GFC', Unit=1, TypeName="Switch", Image=iconPID, Used=1).Create()
Domoticz.Device(Name='Distance', Unit=2, TypeName="Distance", Used=1).Create()
Domoticz.Debug(str(self.deviceName))
Domoticz.Debug("Devices created.")
self.pollPeriod = int(int(Parameters["Mode2"]) / 10)
self.pollCount = self.pollPeriod - 1
Domoticz.Debug('PollPeriod: ' + str(self.pollPeriod))
Domoticz.Debug('PollCount: ' + str(self.pollCount))
Domoticz.Heartbeat(10)
def onStop(self):
Domoticz.Log("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Log("onConnect called")
def onMessage(self, Connection, Data, Status, Extra):
Domoticz.Log("onMessage called")
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Command = Command.strip()
action, sep, params = Command.partition(' ')
action = action.capitalize()
params = params.capitalize()
if Command=='Off':
UpdateDevice(Unit,0,'Off')
elif Command=='On':
UpdateDevice(Unit,1,'On')
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Log("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called")
def onHeartbeat(self):
Domoticz.Debug("onHeartBeat called:"+str(self.pollCount)+"/"+str(self.pollPeriod))
if self.pollCount >= self.pollPeriod:
Domoticz.Debug("Checking iCloud...")
api = PyiCloudService(self.username, self.password)
for rdev in api.devices:
dev = str(rdev)
Domoticz.Debug('Iterating device: [' + dev + ']' + ' to find [' + self.deviceName + ']')
if self.deviceName == dev:
curr_loc = rdev.location()
if curr_loc is None:
Domoticz.log('Unable to find location for ' + dev)
dist = vincenty(self.home,away).miles
else:
latitude = float(curr_loc['latitude'])
longitude = float(curr_loc['longitude'])
current = (latitude,longitude)
dist = vincenty(self.home,current).miles
self.difdist = abs(dist - self.lastdist)
self.count = self.count + 1
# Domoticz.Debug('I got location for ' + self.deviceName + ' of lat: ' + current['latitude'] + ', long: ' + ', ' + current['longitude'] + ' Finished: ' + str(curr_loc['locationFinished']) + ', Distance: ' + str(dist) + ' miles')
Domoticz.Debug('I got location for ' + self.deviceName + ' of lat: ' + str(latitude) + ', long: ' + ', ' + str(longitude) + ' Finished: ' + str(curr_loc['locationFinished']) + ', Distance: ' + str(dist) + ' miles')
if self.lastdist == 100000000:
Domoticz.Debug('Starting up so update location')
# FIRST TIME WE ARE CALLED, ALWAYS UPDATE
# updatedomo(dist)
Domoticz.Debug('Update dist: ' + str(self.difdist))
UpdateDevice(2,1, str( dist * 1609))
if dist <= self.mindist:
Domoticz.Debug('Switching device ON: ' + self.deviceName)
UpdateDevice(1,1,'On')
else:
if (self.lastdist > self.mindist) and (self.lastdist != 100000000):
Domoticz.Debug('Device OFF but already reported OFF so no action: ' + self.deviceName)
else:
Domoticz.Debug('Switching device OFF: ' + self.deviceName)
UpdateDevice(1,0,'Off')
self.lastdist = dist
continue
if self.count == 120:
Domoticz.Debug('Counter hit, update location')
# EVERY 120 CYCLES, FORCE UPDATE AND RESET COUNTER
self.count = 0
# updatedomo(dist)
Domoticz.Debug('Update dist: ' + str(self.difdist))
UpdateDevice(2,1, str( dist * 1609))
if dist <= self.mindist:
Domoticz.Debug('Switching device ON: ' + self.deviceName)
UpdateDevice(1,1,'On')
else:
if (self.lastdist > self.mindist) and (self.lastdist != 100000000):
Domoticz.Debug('Device OFF but already reported OFF so no action: ' + self.deviceName)
else:
Domoticz.Debug('Switching device OFF: ' + self.deviceName)
UpdateDevice(1,0,'Off')
lastdist = dist
continue
if self.difdist > self.mindist:
# we should update but check for anomalies
Domoticz.Debug('Difdist > mindist, update location')
# updatedomo(dist)
Domoticz.Debug('Update dist: ' + str(self.difdist))
UpdateDevice(2,1, str( dist * 1609))
if dist <= self.mindist:
Domoticz.Debug('Switching device ON: ' + self.deviceName)
UpdateDevice(1,1,'On')
else:
if (self.lastdist > self.mindist) and (self.lastdist != 100000000):
Domoticz.Debug('Device OFF but already reported OFF so no action: ' + self.deviceName)
else:
Domoticz.Debug('Switching device OFF: ' + self.deviceName)
UpdateDevice(1,0,'Off')
self.lastdist = dist
continue
else:
# NO UPDATE NEEDED AS DISTANCE WITHIN THE RANGE
Domoticz.Debug('No update needed as within last distance')
self.pollCount = 0 #Reset Pollcount
else:
self.pollCount = self.pollCount + 1
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data, Status, Extra):
global _plugin
_plugin.onMessage(Connection, Data, Status, Extra)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
def UpdateDevice(Unit, nValue, sValue):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue))
Domoticz.Debug("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
| 43.1
| 250
| 0.530577
|
4a01e23a5810ba4244de53857502d63622b7e55f
| 517
|
py
|
Python
|
readthedocs/api/v2/proxied_urls.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | null | null | null |
readthedocs/api/v2/proxied_urls.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 7
|
2020-07-07T21:17:17.000Z
|
2022-01-13T02:31:42.000Z
|
readthedocs/api/v2/proxied_urls.py
|
mondeja/readthedocs.org
|
fb01c6d9d78272e3f4fd146697e8760c04e4fbb6
|
[
"MIT"
] | null | null | null |
"""
Proxied API URLs.
Served from the same domain docs are served,
so they can make use of features that require to have access to their cookies.
"""
from django.conf.urls import include, url
from .views.proxied import ProxiedFooterHTML
from readthedocs.search.proxied_api import ProxiedPageSearchAPIView
api_footer_urls = [
url(r'footer_html/', ProxiedFooterHTML.as_view(), name='footer_html'),
url(r'docsearch/$', ProxiedPageSearchAPIView.as_view(), name='doc_search'),
]
urlpatterns = api_footer_urls
| 27.210526
| 79
| 0.775629
|
4a01e2f3168d7493f2ba8487f74f02d739e96d03
| 5,049
|
py
|
Python
|
Qubie_code/python/robotcontrol.py
|
OldETC/Bipedal_Qubie
|
a789ea84ce83c6e135bb55d48fa63464ed958632
|
[
"Apache-2.0"
] | null | null | null |
Qubie_code/python/robotcontrol.py
|
OldETC/Bipedal_Qubie
|
a789ea84ce83c6e135bb55d48fa63464ed958632
|
[
"Apache-2.0"
] | null | null | null |
Qubie_code/python/robotcontrol.py
|
OldETC/Bipedal_Qubie
|
a789ea84ce83c6e135bb55d48fa63464ed958632
|
[
"Apache-2.0"
] | null | null | null |
""" RobotControl.py
the joints are assigned to two servo driver boards
They start at the top of the chest.
shoulder, bicep, elbow, wrist, claw, hip, thigh, knee, ankelroll anklepitch.
The head nod and turn are in to top two slots for the right side.
"""
""" Apache license"""
"""
Copyright 2020 H. L. Howell
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from adafruit_servokit import ServoKit
from board import SCL, SDA
import busio
from adafruit_pca9685 import PCA9685
from array import *
#import numpy as nmp
#the global parameters for controlling the joints.
shoulder=0
bicep=1
elbow=2
wrist=3
grip=4
hip=5
thigh=6
knee=7
ankleroll=8
anklepitch=9
LEFT=0x41
RIGHT=0x40
headnod=13
headturn=14
#setup for working ranges for servos in degrees enforced by hardware design
# follows the servo numbers used for the joints
ranges= [
# default min max minPW maxPW range el# side designation
[135, 105, 270, 500, 2500, 270], # 0 right shoulder
[217, 9, 270, 500, 2500, 270], # 1 right bicep
[135, 0, 270, 500, 2500, 270], # 2 right elbow
[135, 0, 180, 500, 2500, 180], # 3 right wrist
[ 33, 33, 100, 500, 2500, 180], # 4 right grip
[135, 125, 144, 500, 2500, 270], # 5 right hip
[135, 125, 144, 500, 2500, 270], # 6 right thigh
[135, 0, 270, 500, 2500, 270], # 7 right knee
[135, 125, 159, 500, 2500, 270], # 8 right foot roll
[135, 92, 192, 500, 2500, 270], # 9 right foot pitch
[ 0, 0, 0, 0, 0, 0], # 10 reserved
[ 0, 0, 0, 0, 0, 0], # 11 reserved
[ 0, 0, 0, 0, 0, 0], # 12 reserved
[ 90, 0, 180, 500, 2500, 180], # 13 head nod
[ 90, 0, 180, 500, 2500, 180], # 14 head turn
[ 0, 0, 0, 0, 0, 0], # 15 reserved
[135, 105, 270, 500, 2500, 270], # 16 left shoulder
[217, 9, 270, 500, 2500, 270], # 17 left bicep
[135, 0, 270, 500, 2500, 270], # 18 left elbow
[135, 0, 180, 500, 2500, 180], # 19 left wrist
[ 20, 0, 75, 500, 2500, 180], # 20 left grip
[135, 125, 144, 500, 2500, 270], # 21 left hip
[135, 0, 270, 500, 2500, 270], # 22 left thigh
[135, 0, 270, 500, 2500, 270], # 23 left knee
[135, 125, 159, 500, 2500, 270], # 24 left foot roll
[135, 92, 192, 500, 2500, 270], # 25 left foot pitch
[ 0, 0, 0, 0, 0, 0], # 26 reserved
[ 0, 0, 0, 0, 0, 0], # 27 reserved
[ 0, 0, 0, 0, 0, 0], # 28 reserved
[ 0, 0, 0, 0, 0, 0], # 29 reserved
[ 0, 0, 0, 0, 0, 0], # 30 reserved
[ 0, 0, 0, 0, 0, 0], # 31 reserved
]
#creating the instances of the control software
# Set channels to the number of servo channels on your kit.
# 8 for FeatherWing, 16 for Shield/HAT/Bonnet.
# Set the PWM frequency to 200.
Left = ServoKit(channels=16,address=LEFT)
Right= ServoKit(channels=16,address=RIGHT)
def setup():
Left.PWMfrequency=200
j=0;
# setup the PCA code for the servo controlls.
for i in range (0,15):
Right.servo[i].set_pulse_width_range(ranges[j][3],ranges[j][4])
Right.servo[i].actuation_range=ranges[j][5]
Left.servo[i].set_pulse_width_range(ranges[j+16][3],ranges[j+16][4])
Left.servo[i].actuation_range=ranges[j+16][5]
j=j+1
# print ('j=',j,'\n')
setup()
#right shoulder
#Right.servo[shoulder].angle=135
#print ('anklepitch=',anklepitch,anklepitch+16,'\n');
jnum=0
while 1:
side=input('side:')
if side=='q':
break
joint=input('joint:')
if joint=='q':
break
elif joint=='shoulder':
jnum=0
elif joint=='bicep':
jnum=1
elif joint== 'elbow':
jnum=2
elif joint=='wrist':
jnum=3
elif joint=='grip':
jnum=4
elif joint=='hip':
jnum=5
elif joint=='thigh':
jnum=6
elif joint=='knee':
jnum=7
elif joint=='ankleroll':
jnum=8
elif joint=='anklepitch':
jnum=9
print("jnum:",jnum,"\n")
elif joint=='nod':
jnum=13
side='right'
elif joint =='turn':
jnum=14
side='right'
else:
break
angle=input ('angle:')
if angle=='q':
break
if side=='right':
Right.servo[jnum].angle=int(angle)
print ("moved Right\n")
elif side=='left':
Left.servo[jnum].angle=int(angle)
print ("moved Left\n")
print ("side:",side,"jnum:",jnum,"angle:",angle,"\n")
| 31.55625
| 80
| 0.581303
|
4a01e3ced4559a80cbfb9a9688e71e0bb63f1f75
| 6,842
|
py
|
Python
|
examples/segregation.py
|
cash/dworp
|
b7b0280183d4fa54b65bf5a3d4008bf64b7434dc
|
[
"BSD-3-Clause"
] | null | null | null |
examples/segregation.py
|
cash/dworp
|
b7b0280183d4fa54b65bf5a3d4008bf64b7434dc
|
[
"BSD-3-Clause"
] | null | null | null |
examples/segregation.py
|
cash/dworp
|
b7b0280183d4fa54b65bf5a3d4008bf64b7434dc
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import dworp
import logging
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
class Household(dworp.Agent):
"""Household agent for Schelling Segregation model"""
def __init__(self, agent_id, color, x, y, similarity):
super().__init__(agent_id, 0)
self.color = color
self.x = x
self.y = y
self.happy = None
self.similarity = similarity
def __repr__(self):
return "Household({}, {}, ({},{}), happy={})".format(
self.agent_id, self.color, self.x, self.y, self.happy)
def init(self, start_time, env):
self.happy = self.check_happiness(env)
def step(self, new_time, env):
if not self.check_happiness(env):
env.move(self)
def complete(self, current_time, env):
self.happy = self.check_happiness(env)
def check_happiness(self, env):
"""Check neighbors' color"""
neighbors = env.grid.neighbors(self.x, self.y)
similar = sum([neighbor.color == self.color for neighbor in neighbors])
total = len(neighbors)
return similar >= self.similarity * total
class ColorAssigner:
"""Assigns a color to a household probabilistically"""
def __init__(self, rng, color1, color2):
self.rng = rng
self.color1 = color1
self.color2 = color2
def assign(self):
if self.rng.uniform() < 0.5:
return self.color1
else:
return self.color2
class HouseholdFactory:
"""Creates households as needed"""
def __init__(self, rng, similarity, color1, color2):
self.namer = dworp.IdentifierHelper.get()
self.similarity = similarity
self.colorer = ColorAssigner(rng, color1, color2)
def create(self, x, y):
return Household(next(self.namer), self.colorer.assign(), x, y, self.similarity)
class SegregationEnvironment(dworp.Environment):
"""Segregation environment that holds the grid"""
def __init__(self, grid, rng):
super().__init__(0)
self.grid = grid
self.rng = rng
def move(self, agent):
x1 = x2 = agent.x
y1 = y2 = agent.y
while self.grid.occupied(x2, y2):
x2 = self.rng.randint(0, self.grid.width)
y2 = self.rng.randint(0, self.grid.height)
self.grid.move(x1, y1, x2, y2)
agent.x = x2
agent.y = y2
def init(self, start_time):
pass
def step(self, new_time, agents):
pass
class SegObserver(dworp.Observer):
"""Writes simulation state to stdout"""
def start(self, time, agents, env):
print("Starting: {}% agents happy".format(self.get_happiness(agents)))
def step(self, time, agents, env):
print("Step {}: {}% agents happy".format(time, self.get_happiness(agents)))
def done(self, agents, env):
print("Ending: {}% agents happy".format(self.get_happiness(agents)))
@staticmethod
def get_happiness(agents):
num_happy = sum(agent.happy for agent in agents)
return 100 * num_happy / float(len(agents))
class SegTerminator(dworp.Terminator):
"""Stop when everyone is happy"""
def test(self, time, agents, env):
return SegObserver.get_happiness(agents) >= 100
class HeatmapPlotObserver(dworp.Observer):
"""Plot the segregration grid"""
def __init__(self, colors):
self.data = None
self.colors = colors
cmap = matplotlib.colors.ListedColormap([colors[0], "white", colors[1]])
self.options = {
'cmap': cmap, 'cbar': False, 'linewidths': 0.2,
'xticklabels': False, 'yticklabels': False
}
def start(self, time, agents, env):
self.data = np.zeros(env.grid.data.shape)
plt.ion()
self.plot(env.grid)
def step(self, time, agents, env):
self.plot(env.grid)
def plot(self, grid):
for x in range(self.data.shape[0]):
for y in range(self.data.shape[1]):
if grid.data[x, y] is None:
self.data[x, y] = 0
elif grid.data[x, y].color == self.colors[0]:
self.data[x, y] = -1
else:
self.data[x, y] = 1
sns.heatmap(self.data, **self.options)
class SegregationParams:
"""Container for simulation parameters"""
def __init__(self, density, similarity, grid_size, seed, colors):
self.density = density
self.similarity = similarity
self.grid_width = grid_size[0]
self.grid_height = grid_size[1]
self.seed = seed
self.colors = colors
class SegregationSimulation(dworp.TwoStageSimulation):
"""Simulation with two stages (moving and then happiness test)"""
def __init__(self, params, observer):
self.params = params
self.rng = np.random.RandomState(params.seed)
factory = HouseholdFactory(self.rng, params.similarity, params.colors[0], params.colors[1])
time = dworp.InfiniteTime()
scheduler = dworp.RandomOrderScheduler(self.rng)
terminator = SegTerminator()
agents = []
grid = dworp.Grid(params.grid_width, params.grid_height)
env = SegregationEnvironment(grid, self.rng)
for x in range(params.grid_width):
for y in range(params.grid_height):
if self.rng.uniform() < params.density:
agent = factory.create(x, y)
grid.set(agent, x, y)
agents.append(agent)
super().__init__(agents, env, time, scheduler, observer, terminator)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
# parse command line
parser = argparse.ArgumentParser()
parser.add_argument("--density", help="density of agents (1-99)", default=95, type=int)
parser.add_argument("--similar", help="desired similarity (0-100)", default=30, type=int)
parser.add_argument("--size", help="grid size formatted as XXXxYYY", default="50x50")
parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
args = parser.parse_args()
# prepare parameters of simulation
assert(1 <= args.density <= 99)
assert(0 <= args.similar <= 100)
density = args.density / float(100)
similarity = args.similar / float(100)
grid_size = [int(dim) for dim in args.size.split("x")]
seed = args.seed
colors = ["blue", "orange"]
params = SegregationParams(density, similarity, grid_size, seed, colors)
# create and run one realization of the simulation
observer = dworp.ChainedObserver(
SegObserver(),
HeatmapPlotObserver(colors),
dworp.PauseObserver(delay=1, start=True, matplotlib=True)
)
sim = SegregationSimulation(params, observer)
sim.run()
| 32.894231
| 99
| 0.618971
|
4a01e3df53e47737eb040efc126fb30f853b4be5
| 3,002
|
py
|
Python
|
ssguan/ignitor/utility/reflect.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | 1
|
2015-07-14T14:24:05.000Z
|
2015-07-14T14:24:05.000Z
|
ssguan/ignitor/utility/reflect.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | null | null | null |
ssguan/ignitor/utility/reflect.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reflect utilities.
"""
import functools
import inspect
import six
from ssguan.ignitor.base.error import NoFoundError
def wrap_func(func, *args, **kwargs):
wrapper = functools.partial(func, *args, **kwargs)
try:
functools.update_wrapper(wrapper, func)
except AttributeError:
# func already wrapped by funcols.partial won't have
# __name__, __module__ or __doc__ and the update_wrapper()
# call will fail.
pass
return wrapper
def import_module(path):
"""
import module from string
:param str funcpath: the string of absolute path a module
"""
try:
if "." in path:
modpath, hcls = path.rsplit('.', 1)
mod = __import__(modpath, None, None, [''])
mod = getattr(mod, hcls)
else:
mod = __import__(path, None, None, [''])
except:
raise NoFoundError("module", path)
return mod
def get_function_path(func, bound_to=None):
"""
Get received func path (as string), to import func later with `import_string`.
"""
if isinstance(func, six.string_types):
return func
# static and class methods
if hasattr(func, '__func__'):
real_func = func.__func__
elif callable(func):
real_func = func
else:
return func
func_path = []
module = getattr(real_func, '__module__', '__main__')
if module:
func_path.append(module)
if not bound_to:
try:
bound_to = six.get_method_self(func)
except AttributeError:
pass
if bound_to:
if isinstance(bound_to, six.class_types):
func_path.append(bound_to.__name__)
else:
func_path.append(bound_to.__class__.__name__)
func_path.append(real_func.__name__)
return '.'.join(func_path)
def get_function_args(func):
"""
Return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)
"""
return inspect.getfullargspec(func)
def eval_script(script, **kwargs):
"""
Execute a python script.
"""
if script == '':
return True
if type(script) != str:
return script(**kwargs)
else:
return eval(script, kwargs)
| 28.590476
| 100
| 0.616589
|
4a01e3ffcf08c8fa639deb059ac1f8d196401458
| 6,324
|
py
|
Python
|
entrypoint.py
|
vulnbe/openvas-docker
|
4d9e56a3feebe240d24dc873b2f2b4a14244a320
|
[
"MIT"
] | 1
|
2020-02-15T08:46:19.000Z
|
2020-02-15T08:46:19.000Z
|
entrypoint.py
|
vulnbe/openvas-docker
|
4d9e56a3feebe240d24dc873b2f2b4a14244a320
|
[
"MIT"
] | null | null | null |
entrypoint.py
|
vulnbe/openvas-docker
|
4d9e56a3feebe240d24dc873b2f2b4a14244a320
|
[
"MIT"
] | 4
|
2019-10-17T20:09:04.000Z
|
2021-10-10T09:47:42.000Z
|
#!/usr/bin/env python3
import os
import logging
import subprocess
import argparse
from time import sleep
from shlex import quote
from gvm_client import GVM_client, Task
env_ov_passwd = 'OV_PASSWD'
env_ov_run_tasks = 'OV_AUTORUN_TASKS'
env_ov_save_reports = 'OV_AUTOSAVE_REPORTS'
redis_conf = '/etc/openvas-redis.conf'
redis_socket = '/tmp/redis.sock'
gvm_socket = '/var/run/gvmd.sock'
ov_user = 'admin'
overrides_path = '/overrides'
reports_path = '/reports'
configs_path = '/configs'
targets_path = '/targets'
tasks_path = '/tasks'
openvassd_wait_secs = 60
gvmd_wait_secs = 6
gvmd_connect_tries = 10
task_wait_secs = 15
loglevel = logging.INFO
def create_user(username, password):
logging.log(logging.INFO, 'Creating user {}...'.format(username))
command = 'gvmd --create-user {} --password={}'.format(quote(username), quote(password))
os.system(command)
def delete_user(username):
logging.log(logging.INFO, 'Deleting user {}...'.format(username))
command = 'gvmd --delete-user {}'.format(quote(username))
os.system(command)
def ping_postgres():
try:
subprocess.check_call(['pg_isready', '-h', 'localhost', '-p', '5432'])
logging.info('Postgres is running')
return True
except:
logging.error('Unable to reach Postgres')
return False
def run_postgres():
if not ping_postgres():
subprocess.Popen(['/etc/init.d/postgresql', 'start']).wait()
while not ping_postgres():
logging.info('Waiting for postgres to start...')
sleep(1)
def stop_postgres():
if ping_postgres():
subprocess.Popen(['/etc/init.d/postgresql', 'stop']).wait()
while ping_postgres():
logging.info('Waiting for Postgres to stop...')
sleep(1)
def ping_redis():
try:
response = subprocess.check_output(['redis-cli','-s', redis_socket, 'ping']).decode('utf-8')
logging.info('Ping redis: {}'.format(response))
return response == 'PONG\n'
except:
logging.error('Unable to reach Redis')
return False
def run_redis():
if not ping_redis():
subprocess.Popen(['redis-server', redis_conf])
while not ping_redis():
logging.info('Waiting for redis...')
sleep(1)
def stop_redis():
try:
logging.info('Shutdown Redis: {}'.format(subprocess.check_output(['redis-cli','-s', redis_socket, 'SHUTDOWN', 'SAVE']).decode('utf-8')))
except Exception as ex:
logging.error('Shutdown Redis error: {}'.format(ex))
def stop_process(process:subprocess.Popen):
try:
process.send_signal(subprocess.signal.SIGINT)
process.wait()
except:
pass
def task_can_be_runned(task: Task):
return task != None and task.status in ['New', 'Done', 'Stopped']
def task_runned(task: Task):
return task != None and task.status in ['Running', 'Requested']
if __name__ == '__main__':
logging.basicConfig(level=loglevel)
parser = argparse.ArgumentParser()
parser.add_argument('--create-cache', dest='create_cache', default=False, action='store_true')
parser.add_argument('--only-run-tasks', dest='only_run_tasks', default=False, action='store_true')
args = parser.parse_args()
if args.create_cache:
run_postgres()
run_redis()
admin_pass = 'cache'
create_user(ov_user, admin_pass)
openvassd_proc = subprocess.Popen(['openvassd', '-f'])
sleep(openvassd_wait_secs)
gvmd_proc = subprocess.Popen(['gvmd', '-f'])
processor = GVM_client(
socket_path=gvm_socket,
user=ov_user,
password=admin_pass,
loglevel=loglevel)
processor.wait_connection(connection_tries=gvmd_connect_tries, secs_before_attempt=gvmd_wait_secs)
processor.wait_sync(interval=20)
delete_user(ov_user)
stop_process(openvassd_proc)
stop_process(gvmd_proc)
stop_postgres()
stop_redis()
else:
admin_pass = os.environ.get(env_ov_passwd)
if not args.only_run_tasks:
run_postgres()
run_redis()
if admin_pass != None:
create_user(ov_user, admin_pass)
else:
print('Admin password hasn\'t specified')
print('Please pass admin password via {} env variable'.format(env_ov_passwd))
exit(1)
supervisor_proc = subprocess.Popen(['supervisord','-n', '-c', '/etc/openvas-supervisor.conf'])
try:
processor = GVM_client(
socket_path=gvm_socket,
user=ov_user,
password=admin_pass,
loglevel=loglevel)
processor.wait_connection(connection_tries=gvmd_connect_tries, secs_before_attempt=gvmd_wait_secs)
processor.wait_sync()
if not args.only_run_tasks:
processor.import_configs(configs_path)
processor.import_targets(targets_path)
processor.import_tasks(tasks_path)
processor.import_reports(reports_path)
processor.import_overrides(overrides_path)
if os.environ.get(env_ov_run_tasks, ''):
tasks = processor.get_tasks()
for task in tasks:
for run_try in range(1, 4):
_task = processor.get_task(task.id)
if task_can_be_runned(_task):
logging.info('#{} try to run task: {}'.format(run_try, task.name))
if task_runned(_task) or processor.run_task(task.id):
logging.info('Waiting for task: {}'.format(task.name))
while True:
sleep(task_wait_secs)
_task = processor.get_task(task.id)
if _task != None and _task.status == 'Done':
if os.environ.get(env_ov_save_reports, '') and _task.last_report != None:
try:
processor.save_report(_task.last_report.id, reports_path)
except Exception as ex:
logging.error('Saving report error: {}'.format(ex))
break
elif _task != None and not task_runned(_task):
logging.error('Ignoring stopped/crashed task: {}'.format(task.name))
break
break
else:
logging.error('Error running task: {}'.format(task.name))
sleep(5)
else:
logging.error('Wrong task status: {}'.format(task.name))
sleep(5)
except Exception as ex:
logging.error('GVM_client error: {}'.format(ex))
if not args.only_run_tasks:
supervisor_proc.wait()
| 31.62
| 140
| 0.652435
|
4a01e4fa21d6aced0fe6ab436441be44d677e940
| 244
|
py
|
Python
|
webportal/webportal.py
|
sreenut/BlockChain-Framework
|
3abfd5420a977573e87e3f0afcd3e6354fa4884d
|
[
"MIT"
] | 8
|
2018-07-18T17:49:41.000Z
|
2022-03-17T01:30:44.000Z
|
webportal/webportal.py
|
sreenut/BlockChain-Framework
|
3abfd5420a977573e87e3f0afcd3e6354fa4884d
|
[
"MIT"
] | null | null | null |
webportal/webportal.py
|
sreenut/BlockChain-Framework
|
3abfd5420a977573e87e3f0afcd3e6354fa4884d
|
[
"MIT"
] | 1
|
2018-09-05T07:11:25.000Z
|
2018-09-05T07:11:25.000Z
|
from app import create_app, db
from app.models import User, MultichainNode, EthTx
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'MultichainNode': MultichainNode, 'EthTx': EthTx}
| 27.111111
| 85
| 0.75
|
4a01e56ab91ea2d835427131c742fd73c336dfec
| 1,976
|
py
|
Python
|
fastreid/modeling/base_module.py
|
X-funbean/fast-reid
|
97ef9608ba696d116fafb3a8746842b8d0cf546e
|
[
"Apache-2.0"
] | null | null | null |
fastreid/modeling/base_module.py
|
X-funbean/fast-reid
|
97ef9608ba696d116fafb3a8746842b8d0cf546e
|
[
"Apache-2.0"
] | null | null | null |
fastreid/modeling/base_module.py
|
X-funbean/fast-reid
|
97ef9608ba696d116fafb3a8746842b8d0cf546e
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from ..utils.events import get_event_storage
class BaseModule(nn.Module):
"""
A base Module for ReID models
"""
def __init__(self, cfg):
super().__init__()
self._cfg = cfg
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
self.register_buffer(
"pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1)
)
self.register_buffer(
"pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1)
)
@property
def device(self):
return self.pixel_mean.device
def preprocess_image(self, batched_inputs):
r"""
Normalize and batch the input images.
"""
if isinstance(batched_inputs, dict):
images = batched_inputs["images"].to(self.device)
elif isinstance(batched_inputs, torch.Tensor):
images = batched_inputs.to(self.device)
else:
raise TypeError(
"batched_inputs must be dict or torch.Tensor, but get {}".format(
type(batched_inputs)
)
)
images.sub_(self.pixel_mean).div_(self.pixel_std)
return images
@staticmethod
def log_accuracy(pred_class_logits, gt_classes, name="cls_accuracy", topk=(1,)):
"""
Log the accuracy metrics to EventStorage.
modified from fastreid/modeling/losses/cross_entropy_loss.py
"""
bsz = pred_class_logits.size(0)
maxk = max(topk)
_, pred_class = pred_class_logits.topk(maxk, 1, True, True)
pred_class = pred_class.t()
correct = pred_class.eq(gt_classes.view(1, -1).expand_as(pred_class))
ret = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True)
ret.append(correct_k.mul_(1.0 / bsz))
storage = get_event_storage()
storage.put_scalar(name, ret[0])
| 31.870968
| 84
| 0.59666
|
4a01e5aa021549d1d0b039cc328d3f2935f46679
| 2,249
|
py
|
Python
|
uasparser2/imcache.py
|
redbrickmedia/uasparser2
|
13be6b361a4eba148341ca905c0a5d1c761e6411
|
[
"BSD-3-Clause"
] | 16
|
2015-01-13T12:13:16.000Z
|
2021-03-09T15:11:36.000Z
|
uasparser2/imcache.py
|
redbrickmedia/uasparser2
|
13be6b361a4eba148341ca905c0a5d1c761e6411
|
[
"BSD-3-Clause"
] | 1
|
2015-04-28T12:55:01.000Z
|
2017-10-12T20:47:09.000Z
|
uasparser2/imcache.py
|
redbrickmedia/uasparser2
|
13be6b361a4eba148341ca905c0a5d1c761e6411
|
[
"BSD-3-Clause"
] | 5
|
2015-08-07T12:51:47.000Z
|
2020-01-05T22:56:21.000Z
|
"""
In-memory caching module
Fast
Insert and get <= O(log n)
Powerful
Thread safe
Global and per key ttl
Size limit with LRU
Simple
Decorator with custom caching keys (TODO)
"""
import time
from threading import Lock
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class CacheMissException(Exception):
pass
class SimpleCache(object):
CacheMissException = CacheMissException
def __init__(self, cache_size=100000, ttl=None):
if cache_size <= 0:
raise AttributeError('cache_size has to be None or int > 0')
if ttl is not None and ttl <= 0:
raise AttributeError('ttl has to be None or float > 0')
self._cache = OrderedDict()
self._max_cache_size = cache_size
self._default_ttl = float(ttl) if ttl else None
self._lock = Lock()
def put(self, key, val, ttl=None):
if ttl is not None and ttl <= 0:
raise AttributeError('ttl has to be None or float > 0')
ttl = ttl if ttl else self._default_ttl
expires = (float(ttl) + time.time()) if ttl else False
with self._lock:
try:
del self._cache[key]
except KeyError:
self._remove_lru()
self._cache[key] = (expires, val)
def _get_unlocked(self, key):
try:
val = self._cache[key]
except KeyError:
raise self.CacheMissException
if val[0] is not False and val[0] < time.time():
del self._cache[key]
raise self.CacheMissException
del self._cache[key]
self._cache[key] = val
return val[1]
def get(self, key):
with self._lock:
return self._get_unlocked(key)
def multiget(self, keys):
values = []
with self._lock:
for key in keys:
try:
values.append(self._get_unlocked(key))
except self.CacheMissException:
values.append(None)
return values
def _remove_lru(self):
if len(self._cache) >= self._max_cache_size:
self._cache.popitem(last=False)
| 24.445652
| 72
| 0.582926
|
4a01e8c22611a0bc96a100279b28c933c9fc3e4f
| 1,213
|
py
|
Python
|
bot.py
|
sgregg85/QuoteBot
|
3f8e3adc417e7286921f2c301ec444c86fb4b247
|
[
"Unlicense"
] | null | null | null |
bot.py
|
sgregg85/QuoteBot
|
3f8e3adc417e7286921f2c301ec444c86fb4b247
|
[
"Unlicense"
] | null | null | null |
bot.py
|
sgregg85/QuoteBot
|
3f8e3adc417e7286921f2c301ec444c86fb4b247
|
[
"Unlicense"
] | null | null | null |
import key #key.py file with keys from twitter
import tweepy
import requests
import json
import time
import random
auth = tweepy.OAuthHandler(key.consumer_key, key.consumer_secret)
auth.set_access_token(key.access_token, key.access_token_secret)
api = tweepy.API(auth)
def getQuote():
url = 'http://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json'#p&jsonp=?'
response = requests.get(url)
if (response.ok): #if we get a valid response from server
try:
#convert response to a json object
jData = json.loads(response.content.decode('utf-8'))
quote = (jData['quoteText'])
if(jData['quoteAuthor'] == ''): #if no author, use Anonymous
author = "Anonymous"
else:
author = jData['quoteAuthor']
tweet = quote + " -" + author
if(len(tweet) > 140): #if the tweet is more than 140 characters, we need another quote
print("Quote too long")
getQuote()
api.update_status(tweet)
except ValueError: #try again if we get a valuerror
getQuote()
else:
response.raise_for_status() #print error from server
while True:
getQuote()
sleep_time = random.randrange(1800, 3601)
print("Sleeping for " + str(sleep_time) + " seconds.")
time.sleep(sleep_time)
| 25.808511
| 90
| 0.70981
|
4a01e97ef7ac42ab5c653f6395a73887807726b1
| 1,198
|
py
|
Python
|
phns/utils/phn.py
|
DeepLenin/phns
|
80fb48d032cd159782a5d96724e91540a55271ef
|
[
"MIT"
] | 5
|
2020-04-03T20:59:46.000Z
|
2020-07-08T17:40:40.000Z
|
phns/utils/phn.py
|
DeepLenin/phns
|
80fb48d032cd159782a5d96724e91540a55271ef
|
[
"MIT"
] | null | null | null |
phns/utils/phn.py
|
DeepLenin/phns
|
80fb48d032cd159782a5d96724e91540a55271ef
|
[
"MIT"
] | null | null | null |
class Phn:
PHONEMES = {}
def __new__(cls, val):
if isinstance(val, Phn):
return val
_phn = val.lower().replace("0", "")
if _phn not in cls.PHONEMES:
instance = super(Phn, cls).__new__(cls)
instance.__custom_init__(_phn)
cls.PHONEMES[_phn] = instance
return cls.PHONEMES[_phn]
def __custom_init__(self, phoneme):
self.__phoneme__ = phoneme
self.val, self.stress = self.process()
def process(self):
"""Splits cmu dict phoneme to phoneme and stress"""
digit = None
no_digits = []
for char in self.__phoneme__.lower():
if char.isdigit():
digit = int(char)
else:
no_digits.append(char)
return "".join(no_digits), digit
def __hash__(self):
return hash(self.val)
def __eq__(self, other):
return other and self.val == other.val
def __lt__(self, other):
return self.val < other.val
def __str__(self):
return self.val
def __repr__(self):
return f'Phn("{self.__phoneme__}")'
def __deepcopy__(self, memo={}):
return self
| 26.043478
| 59
| 0.558431
|
4a01e9d2f2555469f65149920674d9c3906d0537
| 10,075
|
py
|
Python
|
apps/authentication/api/tests/signup_tests.py
|
Nicolaad/onlineweb4
|
5942eaf907d6824d5384147627def9edefdb9946
|
[
"MIT"
] | null | null | null |
apps/authentication/api/tests/signup_tests.py
|
Nicolaad/onlineweb4
|
5942eaf907d6824d5384147627def9edefdb9946
|
[
"MIT"
] | null | null | null |
apps/authentication/api/tests/signup_tests.py
|
Nicolaad/onlineweb4
|
5942eaf907d6824d5384147627def9edefdb9946
|
[
"MIT"
] | null | null | null |
from django.core import mail
from django.urls import reverse
from django_dynamic_fixture import G
from onlineweb4.fields.recaptcha import mock_validate_recaptcha
from rest_framework import status
from apps.authentication.models import OnlineUser as User
from apps.authentication.models import RegisterToken
from apps.online_oidc_provider.test import OIDCTestCase
class SignupAPIURLTestCase(OIDCTestCase):
def setUp(self) -> None:
super().setUp()
self.password = "12345678"
self.create_user_data = {
"username": "testuser133",
"email": "test33@example.org",
"password": self.password,
}
self.captcha_mock = {"recaptcha": "--captcha-mock--"}
self.user_data_with_captcha = {**self.create_user_data, **self.captcha_mock}
self.user: User = G(User, username="test_user")
self.user.set_password(self.password)
self.user.save()
self.token = self.generate_access_token(self.user)
self.headers = {**self.generate_headers(), **self.bare_headers}
self.url = reverse("users-list")
self.id_url = lambda _id: reverse("users-detail", args=[_id])
self.change_password_url = lambda _id: reverse(
"users-change-password", args=[_id]
)
def test_signup_http_get_returns_200(self):
response = self.client.get(self.url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_signup_not_all_required_params_returns_400(self):
response = self.client.post(self.url, **self.bare_headers)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
@mock_validate_recaptcha()
def test_signup_without_recaptcha_returns_400(self, _):
response = self.client.post(
self.url, data=self.create_user_data, **self.bare_headers
)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
@mock_validate_recaptcha()
def test_signup_success_returns_201(self, _):
response = self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
@mock_validate_recaptcha()
def test_signup_success_returns_correct_data(self, _):
response = self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
data = self.create_user_data
created_user = response.json()
self.assertEqual(created_user["username"], data["username"])
self.assertEqual(created_user["email"], data["email"])
# Password should not be returned back to user
self.assertEqual(created_user.get("password"), None)
@mock_validate_recaptcha()
def test_signup_twice_with_same_data_returns_400(self, _):
self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
response = self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
@mock_validate_recaptcha()
def test_signup_twice_with_same_email_returns_400(self, _):
first_user = self.user_data_with_captcha
second_user = {
**self.captcha_mock,
"username": "testuser456",
"email": self.create_user_data.get("email"),
"password": "securepassword",
}
response_1 = self.client.post(self.url, data=first_user, **self.bare_headers)
response_2 = self.client.post(self.url, data=second_user, **self.bare_headers)
self.assertEqual(status.HTTP_201_CREATED, response_1.status_code)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response_2.status_code)
@mock_validate_recaptcha()
def test_signup_twice_with_same_username_returns_400(self, _):
first_user = self.user_data_with_captcha
second_user = {
**self.captcha_mock,
"username": self.create_user_data.get("username"),
"email": "test44@example.org",
"password": "securepassword",
}
response_1 = self.client.post(self.url, data=first_user, **self.bare_headers)
response_2 = self.client.post(self.url, data=second_user, **self.bare_headers)
self.assertEqual(status.HTTP_201_CREATED, response_1.status_code)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response_2.status_code)
@mock_validate_recaptcha()
def test_signup_success_sets_user_as_inactive(self, _):
self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
user = User.objects.get(username=self.create_user_data.get("username"))
self.assertFalse(user.is_active)
@mock_validate_recaptcha()
def test_signup_password_checks_out(self, _):
self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
user = User.objects.get(username=self.create_user_data.get("username"))
self.assertNotEqual(user.password, self.password)
self.assertTrue(user.check_password(self.password))
@mock_validate_recaptcha()
def test_signup_success_sends_verification_email(self, _):
self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
self.assertEqual(mail.outbox[0].subject, "Verifiser din konto")
@mock_validate_recaptcha()
def test_signup_success_verification_link_sets_user_as_active(self, _):
self.client.post(
self.url, data=self.user_data_with_captcha, **self.bare_headers
)
register_token = RegisterToken.objects.get(
email=self.create_user_data.get("email")
)
verify_url = reverse("auth_verify", args=[register_token.token])
self.client.get(verify_url)
user = User.objects.get(username=self.create_user_data.get("username"))
self.assertTrue(user.is_active)
def test_user_update_their_name(self):
new_first_name = "Ola Kari"
new_last_name = "Nordmann"
response = self.client.patch(
self.id_url(self.user.id),
{"first_name": new_first_name, "last_name": new_last_name},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json().get("first_name"), new_first_name)
self.assertEqual(response.json().get("last_name"), new_last_name)
def test_change_password(self):
new_password = "the_most_secure_password"
response = self.client.put(
self.change_password_url(self.user.id),
{
"current_password": self.password,
"new_password": new_password,
"new_password_confirm": new_password,
},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertTrue(self.user.check_password(new_password))
def test_change_password_wrong_confirm(self):
new_password = "the_most_secure_password"
response = self.client.put(
self.change_password_url(self.user.id),
{
"current_password": self.password,
"new_password": new_password,
"new_password_confirm": "some_random_shit",
},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json().get("non_field_errors"), ["Passordene stemmer ikke overens"]
)
self.assertFalse(self.user.check_password(new_password))
def test_change_password_wrong_current_password(self):
new_password = "the_most_secure_password"
response = self.client.put(
self.change_password_url(self.user.id),
{
"current_password": "some_random_shit",
"new_password": new_password,
"new_password_confirm": new_password,
},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json().get("current_password"), ["Nåværende passord stemmer ikke"]
)
self.assertFalse(self.user.check_password(new_password))
def test_change_password_new_password_missing(self):
new_password = "the_most_secure_password"
response = self.client.put(
self.change_password_url(self.user.id),
{"current_password": self.password, "new_password_confirm": new_password},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json().get("new_password"), ["Dette feltet er påkrevd."]
)
self.assertFalse(self.user.check_password(new_password))
def test_change_password_new_password_invalid(self):
new_password = "123"
response = self.client.put(
self.change_password_url(self.user.id),
{
"current_password": self.password,
"new_password": new_password,
"new_password_confirm": new_password,
},
**self.headers
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json().get("new_password"),
["Passordet er for kort. Det må bestå av minst 8 tegn."],
)
self.assertFalse(self.user.check_password(new_password))
| 38.162879
| 88
| 0.659553
|
4a01ed411251ff5235e234f60b7cd6441594ef5b
| 28,151
|
py
|
Python
|
perses/tests/test_relative.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
perses/tests/test_relative.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
perses/tests/test_relative.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
###########################################
# IMPORTS
###########################################
from simtk.openmm import app
from simtk import unit, openmm
import numpy as np
import os
from nose.tools import nottest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from perses.annihilation.relative import HybridTopologyFactory
from perses.rjmc.geometry import FFAllAngleGeometryEngine
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine, SystemGenerator, TopologyProposal
from perses.tests import utils
import openeye.oechem as oechem
from openmmtools import alchemy
from openmmtools.states import ThermodynamicState, SamplerState, CompoundThermodynamicState
import openmmtools.mcmc as mcmc
import openmmtools.cache as cache
from unittest import skipIf
import pymbar.timeseries as timeseries
import copy
import pymbar
istravis = os.environ.get('TRAVIS', None) == 'true'
try:
cache.global_context_cache.platform = openmm.Platform.getPlatformByName("Reference")
except Exception:
cache.global_context_cache.platform = openmm.Platform.getPlatformByName("Reference")
#############################################
# CONSTANTS
#############################################
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
temperature = 300.0 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
CARBON_MASS = 12.01
ENERGY_THRESHOLD = 1e-1
REFERENCE_PLATFORM = openmm.Platform.getPlatformByName("CPU")
def run_hybrid_endpoint_overlap(topology_proposal, current_positions, new_positions):
"""
Test that the variance of the perturbation from lambda={0,1} to the corresponding nonalchemical endpoint is not
too large.
Parameters
----------
topology_proposal : perses.rjmc.TopologyProposal
TopologyProposal object describing the transformation
current_positions : np.array, unit-bearing
Positions of the initial system
new_positions : np.array, unit-bearing
Positions of the new system
Returns
-------
hybrid_endpoint_results : list
list of [df, ddf, N_eff] for 1 and 0
"""
#create the hybrid system:
#hybrid_factory = HybridTopologyFactory(topology_proposal, current_positions, new_positions, use_dispersion_correction=True)
hybrid_factory = HybridTopologyFactory(topology_proposal, current_positions, new_positions, use_dispersion_correction=False) # DEBUG
#get the relevant thermodynamic states:
nonalchemical_zero_thermodynamic_state, nonalchemical_one_thermodynamic_state, lambda_zero_thermodynamic_state, lambda_one_thermodynamic_state = utils.generate_endpoint_thermodynamic_states(
hybrid_factory.hybrid_system, topology_proposal)
nonalchemical_thermodynamic_states = [nonalchemical_zero_thermodynamic_state, nonalchemical_one_thermodynamic_state]
alchemical_thermodynamic_states = [lambda_zero_thermodynamic_state, lambda_one_thermodynamic_state]
#create an MCMCMove, BAOAB with default parameters (but don't restart if we encounter a NaN)
mc_move = mcmc.LangevinDynamicsMove(n_restart_attempts=0, n_steps=100)
initial_sampler_state = SamplerState(hybrid_factory.hybrid_positions, box_vectors=hybrid_factory.hybrid_system.getDefaultPeriodicBoxVectors())
hybrid_endpoint_results = []
all_results = []
for lambda_state in (0, 1):
result, non, hybrid = run_endpoint_perturbation(alchemical_thermodynamic_states[lambda_state],
nonalchemical_thermodynamic_states[lambda_state], initial_sampler_state,
mc_move, 100, hybrid_factory, lambda_index=lambda_state)
all_results.append(non)
all_results.append(hybrid)
print('lambda {} : {}'.format(lambda_state,result))
hybrid_endpoint_results.append(result)
calculate_cross_variance(all_results)
return hybrid_endpoint_results
def calculate_cross_variance(all_results):
"""
Calculates the overlap (df and ddf) between the non-alchemical state at lambda=0 to the hybrid state at lambda=1 and visa versa
These ensembles are not expected to have good overlap, as they are of explicitly different system, but provides a benchmark of appropriate dissimilarity
"""
if len(all_results) != 4:
return
else:
non_a = all_results[0]
hybrid_a = all_results[1]
non_b = all_results[2]
hybrid_b = all_results[3]
print('CROSS VALIDATION')
[df, ddf] = pymbar.EXP(non_a - hybrid_b)
print('df: {}, ddf: {}'.format(df, ddf))
[df, ddf] = pymbar.EXP(non_b - hybrid_a)
print('df: {}, ddf: {}'.format(df, ddf))
return
def check_result(results, threshold=3.0, neffmin=10):
"""
Ensure results are within threshold standard deviations and Neff_max > neffmin
Parameters
----------
results : list
list of [df, ddf, Neff_max]
threshold : float, default 3.0
the standard deviation threshold
neff_min : float, default 10
the minimum number of acceptable samples
"""
[df, ddf, t0, N_eff] = results
if N_eff < neffmin:
raise Exception("Number of effective samples %f was below minimum of %f" % (N_eff, neffmin))
if ddf > threshold:
raise Exception("Standard deviation of %f exceeds threshold of %f" % (ddf, threshold))
def test_networkx_proposal_order():
"""
This test fails with a 'no topical torsions found' error with the old ProposalOrderTools
"""
pairs=[('pentane','propane')]
for pair in pairs:
print('{} -> {}'.format(pair[0],pair[1]))
test_simple_overlap(pair[0],pair[1])
print('{} -> {}'.format(pair[1],pair[0]))
test_simple_overlap(pair[1],pair[0])
def test_explosion():
"""
This test fails with ridiculous DeltaF if the alchemical factory is misbehaving
"""
pairs = [['2-phenyl ethanol', 'benzene']]
for pair in pairs:
print('{} -> {}'.format(pair[0],pair[1]))
test_simple_overlap(pair[0],pair[1])
print('{} -> {}'.format(pair[1],pair[0]))
test_simple_overlap(pair[1],pair[0])
def test_vacuum_overlap_with_constraints():
"""
Test that constraints do not cause problems for the hybrid factory in vacuum
"""
test_simple_overlap('2-phenyl ethanol', 'benzene', forcefield_kwargs={'constraints' : app.HBonds})
def test_valence_overlap():
"""
Test hybrid factory vacuum overlap with valence terms only
"""
system_generator_kwargs = {
'particle_charge' : False, 'exception_charge' : False, 'particle_epsilon' : False, 'exception_epsilon' : False, 'torsions' : True,
}
test_simple_overlap('2-phenyl ethanol', 'benzene', system_generator_kwargs=system_generator_kwargs)
def test_bonds_angles_overlap():
"""
Test hybrid factory vacuum overlap with bonds and angles
"""
system_generator_kwargs = {
'particle_charge' : False, 'exception_charge' : False, 'particle_epsilon' : False, 'exception_epsilon' : False, 'torsions' : False,
}
test_simple_overlap('2-phenyl ethanol', 'benzene', system_generator_kwargs=system_generator_kwargs)
def test_sterics_overlap():
"""
Test hybrid factory vacuum overlap with valence terms and sterics only
"""
system_generator_kwargs = {
'particle_charge' : False, 'exception_charge' : False, 'particle_epsilon' : True, 'exception_epsilon' : True, 'torsions' : True,
}
test_simple_overlap('2-phenyl ethanol', 'benzene', system_generator_kwargs=system_generator_kwargs)
def test_simple_overlap_pairs(pairs=None):
"""
Test to run pairs of small molecule perturbations in vacuum, using test_simple_overlap, both forward and backward.
Parameters
----------
pairs : list of lists of str, optional, default=None
Pairs of IUPAC names to test.
If None, will test a default set:
[['pentane','butane'],['fluorobenzene', 'chlorobenzene'],['benzene', 'catechol'],['benzene','2-phenyl ethanol'],['imatinib','nilotinib']]
pentane <-> butane is adding a methyl group
fluorobenzene <-> chlorobenzene perturbs one halogen to another, with no adding or removing of atoms
benzene <-> catechol perturbing molecule in two positions simultaneously
benzene <-> 2-phenyl ethanol addition of 3 heavy atom group
"""
if pairs is None:
pairs = [['pentane','butane'],['fluorobenzene', 'chlorobenzene'],['benzene', 'catechol'],['benzene','2-phenyl ethanol']] #'imatinib' --> 'nilotinib' atom mapping is bad
for pair in pairs:
print('{} -> {}'.format(pair[0],pair[1]))
test_simple_overlap(pair[0],pair[1])
# now running the reverse
print('{} -> {}'.format(pair[1],pair[0]))
test_simple_overlap(pair[1],pair[0])
@nottest #this is, in fact, a helper function that is called in other working tests
@skipIf(istravis, "Skip helper function on travis")
def test_simple_overlap(name1='pentane', name2='butane', forcefield_kwargs=None, system_generator_kwargs=None):
"""Test that the variance of the hybrid -> real perturbation in vacuum is sufficiently small.
Parameters
----------
name1 : str
IUPAC name of initial molecule
name2 : str
IUPAC name of final molecule
forcefield_kwargs : dict, optional, default=None
If None, these parameters are fed to the SystemGenerator
Setting { 'constraints' : app.HBonds } will enable constraints to hydrogen
system_generator_kwargs : dict, optional, default=None
If None, these parameters are fed to the SystemGenerator
Setting { 'particle_charge' : False } will turn off particle charges in parameterized systems
Can also disable 'exception_charge', 'particle_epsilon', 'exception_epsilon', and 'torsions' by setting to False
"""
topology_proposal, current_positions, new_positions = utils.generate_solvated_hybrid_test_topology(current_mol_name=name1, proposed_mol_name=name2, vacuum = True)
results = run_hybrid_endpoint_overlap(topology_proposal, current_positions, new_positions)
for idx, lambda_result in enumerate(results):
try:
check_result(lambda_result)
except Exception as e:
message = "pentane->butane failed at lambda %d \n" % idx
message += str(e)
raise Exception(message)
@skipIf(istravis, "Skip expensive test on travis")
def test_hostguest_overlap():
"""Test that the variance of the endpoint->nonalchemical perturbation is sufficiently small for host-guest system in vacuum"""
topology_proposal, current_positions, new_positions = utils.generate_vacuum_hostguest_proposal()
results = run_hybrid_endpoint_overlap(topology_proposal, current_positions, new_positions)
for idx, lambda_result in enumerate(results):
try:
check_result(lambda_result)
except Exception as e:
message = "pentane->butane failed at lambda %d \n" % idx
message += str(e)
raise Exception(message)
@skipIf(istravis, "Skip broken test on travis")
@nottest # at the moment, the mapping between imatinib and nilotinib is faulty
def test_difficult_overlap():
"""Test that the variance of the endpoint->nonalchemical perturbation is sufficiently small for imatinib->nilotinib in solvent"""
name1 = 'imatinib'
name2 = 'nilotinib'
print(name1, name2)
topology_proposal, solvated_positions, new_positions = utils.generate_solvated_hybrid_test_topology(current_mol_name=name1, proposed_mol_name=name2)
results = run_hybrid_endpoint_overlap(topology_proposal, solvated_positions, new_positions)
for idx, lambda_result in enumerate(results):
try:
check_result(lambda_result)
except Exception as e:
message = "solvated imatinib->nilotinib failed at lambda %d \n" % idx
message += str(e)
raise Exception(message)
print(name2, name1)
topology_proposal, solvated_positions, new_positions = utils.generate_solvated_hybrid_test_topology(current_mol_name=name2, proposed_mol_name=name1)
results = run_hybrid_endpoint_overlap(topology_proposal, solvated_positions, new_positions)
for idx, lambda_result in enumerate(results):
try:
check_result(lambda_result)
except Exception as e:
message = "solvated imatinib->nilotinib failed at lambda %d \n" % idx
message += str(e)
raise Exception(message)
def run_endpoint_perturbation(lambda_thermodynamic_state, nonalchemical_thermodynamic_state, initial_hybrid_sampler_state, mc_move, n_iterations, factory,
lambda_index=0, print_work=False, write_system=False, write_state=False, write_trajectories=False):
"""
Parameters
----------
lambda_thermodynamic_state : ThermodynamicState
The thermodynamic state corresponding to the hybrid system at a lambda endpoint
nonalchemical_thermodynamic_state : ThermodynamicState
The nonalchemical thermodynamic state for the relevant endpoint
initial_hybrid_sampler_state : SamplerState
Starting positions for the sampler. Must be compatible with lambda_thermodynamic_state
mc_move : MCMCMove
The MCMove that will be used for sampling at the lambda endpoint
n_iterations : int
The number of iterations
factory : HybridTopologyFactory
The hybrid topology factory
lambda_index : int, optional, default=0
The index, 0 or 1, at which to retrieve nonalchemical positions
print_work : bool, optional, default=False
If True, will print work values
write_system : bool, optional, default=False
If True, will write alchemical and nonalchemical System XML files
write_state : bool, optional, default=False
If True, write alchemical (hybrid) State XML files each iteration
write_trajectories : bool, optional, default=False
If True, will write trajectories
Returns
-------
df : float
Free energy difference between alchemical and nonalchemical systems, estimated with EXP
ddf : float
Standard deviation of estimate, corrected for correlation, from EXP estimator.
"""
import mdtraj as md
#run an initial minimization:
mcmc_sampler = mcmc.MCMCSampler(lambda_thermodynamic_state, initial_hybrid_sampler_state, mc_move)
mcmc_sampler.minimize(max_iterations=20)
new_sampler_state = mcmc_sampler.sampler_state
if write_system:
with open(f'hybrid{lambda_index}-system.xml', 'w') as outfile:
outfile.write(openmm.XmlSerializer.serialize(lambda_thermodynamic_state.system))
with open(f'nonalchemical{lambda_index}-system.xml', 'w') as outfile:
outfile.write(openmm.XmlSerializer.serialize(nonalchemical_thermodynamic_state.system))
#initialize work array
w = np.zeros([n_iterations])
non_potential = np.zeros([n_iterations])
hybrid_potential = np.zeros([n_iterations])
#run n_iterations of the endpoint perturbation:
hybrid_trajectory = unit.Quantity(np.zeros([n_iterations, lambda_thermodynamic_state.system.getNumParticles(), 3]), unit.nanometers) # DEBUG
nonalchemical_trajectory = unit.Quantity(np.zeros([n_iterations, nonalchemical_thermodynamic_state.system.getNumParticles(), 3]), unit.nanometers) # DEBUG
for iteration in range(n_iterations):
# Generate a new sampler state for the hybrid system
mc_move.apply(lambda_thermodynamic_state, new_sampler_state)
# Compute the hybrid reduced potential at the new sampler state
hybrid_context, integrator = cache.global_context_cache.get_context(lambda_thermodynamic_state)
new_sampler_state.apply_to_context(hybrid_context, ignore_velocities=True)
hybrid_reduced_potential = lambda_thermodynamic_state.reduced_potential(hybrid_context)
if write_state:
state = hybrid_context.getState(getPositions=True, getParameters=True)
state_xml = openmm.XmlSerializer.serialize(state)
with open(f'state{iteration}_l{lambda_index}.xml', 'w') as outfile:
outfile.write(state_xml)
# Construct a sampler state for the nonalchemical system
if lambda_index == 0:
nonalchemical_positions = factory.old_positions(new_sampler_state.positions)
elif lambda_index == 1:
nonalchemical_positions = factory.new_positions(new_sampler_state.positions)
else:
raise ValueError("The lambda index needs to be either one or zero for this to be meaningful")
nonalchemical_sampler_state = SamplerState(nonalchemical_positions, box_vectors=new_sampler_state.box_vectors)
if write_trajectories:
state = hybrid_context.getState(getPositions=True)
hybrid_trajectory[iteration,:,:] = state.getPositions(asNumpy=True)
nonalchemical_trajectory[iteration,:,:] = nonalchemical_positions
# Compute the nonalchemical reduced potential
nonalchemical_context, integrator = cache.global_context_cache.get_context(nonalchemical_thermodynamic_state)
nonalchemical_sampler_state.apply_to_context(nonalchemical_context, ignore_velocities=True)
nonalchemical_reduced_potential = nonalchemical_thermodynamic_state.reduced_potential(nonalchemical_context)
# Compute and store the work
w[iteration] = nonalchemical_reduced_potential - hybrid_reduced_potential
non_potential[iteration] = nonalchemical_reduced_potential
hybrid_potential[iteration] = hybrid_reduced_potential
if print_work:
print(f'{iteration:8d} {hybrid_reduced_potential:8.3f} {nonalchemical_reduced_potential:8.3f} => {w[iteration]:8.3f}')
if write_trajectories:
if lambda_index == 0:
nonalchemical_mdtraj_topology = md.Topology.from_openmm(factory._topology_proposal.old_topology)
elif lambda_index == 1:
nonalchemical_mdtraj_topology = md.Topology.from_openmm(factory._topology_proposal.new_topology)
md.Trajectory(hybrid_trajectory / unit.nanometers, factory.hybrid_topology).save(f'hybrid{lambda_index}.pdb')
md.Trajectory(nonalchemical_trajectory / unit.nanometers, nonalchemical_mdtraj_topology).save(f'nonalchemical{lambda_index}.pdb')
# Analyze data and return results
[t0, g, Neff_max] = timeseries.detectEquilibration(w)
w_burned_in = w[t0:]
[df, ddf] = pymbar.EXP(w_burned_in)
ddf_corrected = ddf * np.sqrt(g)
results = [df, ddf_corrected, t0, Neff_max]
return results, non_potential, hybrid_potential
def compare_energies(mol_name="naphthalene", ref_mol_name="benzene"):
"""
Make an atom map where the molecule at either lambda endpoint is identical, and check that the energies are also the same.
"""
from openmmtools import alchemy, states
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine, TopologyProposal
from perses.annihilation.relative import HybridTopologyFactory
import simtk.openmm as openmm
from perses.utils.openeye import createSystemFromIUPAC
from openmoltools.openeye import iupac_to_oemol,generate_conformers
mol = iupac_to_oemol(mol_name)
mol = generate_conformers(mol, max_confs=1)
m, system, positions, topology = createSystemFromIUPAC(mol_name)
refmol = iupac_to_oemol(ref_mol_name)
refmol = generate_conformers(refmol,max_confs=1)
#map one of the rings
atom_map = SmallMoleculeSetProposalEngine._get_mol_atom_map(mol, refmol)
#now use the mapped atoms to generate a new and old system with identical atoms mapped. This will result in the
#same molecule with the same positions for lambda=0 and 1, and ensures a contiguous atom map
effective_atom_map = {value : value for value in atom_map.values()}
#make a topology proposal with the appropriate data:
top_proposal = TopologyProposal(new_topology=topology, new_system=system, old_topology=topology, old_system=system, new_to_old_atom_map=effective_atom_map, new_chemical_state_key="n1", old_chemical_state_key='n2')
factory = HybridTopologyFactory(top_proposal, positions, positions)
alchemical_system = factory.hybrid_system
alchemical_positions = factory.hybrid_positions
platform = openmm.Platform.getPlatformByName("Reference")
_,_,alch_zero_state, alch_one_state = utils.generate_endpoint_thermodynamic_states(alchemical_system, top_proposal)
rp_list = []
for state in [alch_zero_state, alch_one_state]:
integrator = openmm.VerletIntegrator(1)
context = state.create_context(integrator, platform)
samplerstate = states.SamplerState(positions = alchemical_positions, box_vectors = alchemical_system.getDefaultPeriodicBoxVectors())
samplerstate.apply_to_context(context)
rp = state.reduced_potential(context)
rp_list.append(rp)
del context, integrator
assert abs(rp_list[0] - rp_list[1]) < 1e-6
def test_compare_energies():
mols_and_refs = [['naphthalene', 'benzene'], ['pentane', 'propane'], ['biphenyl', 'benzene']]
for mol_ref_pair in mols_and_refs:
compare_energies(mol_name=mol_ref_pair[0], ref_mol_name=mol_ref_pair[1])
def test_position_output():
"""
Test that the hybrid returns the correct positions for the new and old systems after construction
"""
from perses.annihilation.relative import HybridTopologyFactory
import numpy as np
#generate topology proposal
topology_proposal, old_positions, new_positions = utils.generate_solvated_hybrid_test_topology()
factory = HybridTopologyFactory(topology_proposal, old_positions, new_positions)
old_positions_factory = factory.old_positions(factory.hybrid_positions)
new_positions_factory = factory.new_positions(factory.hybrid_positions)
assert np.all(np.isclose(old_positions.in_units_of(unit.nanometers), old_positions_factory.in_units_of(unit.nanometers)))
assert np.all(np.isclose(new_positions.in_units_of(unit.nanometers), new_positions_factory.in_units_of(unit.nanometers)))
def test_generate_endpoint_thermodynamic_states():
"""
test whether the hybrid system zero and one thermodynamic states have the appropriate lambda values
"""
topology_proposal, current_positions, new_positions = utils.generate_solvated_hybrid_test_topology(current_mol_name='propane', proposed_mol_name='pentane', vacuum = False)
hybrid_factory = HybridTopologyFactory(topology_proposal, current_positions, new_positions, use_dispersion_correction=True)
#get the relevant thermodynamic states:
_, _, lambda_zero_thermodynamic_state, lambda_one_thermodynamic_state = utils.generate_endpoint_thermodynamic_states(hybrid_factory.hybrid_system, topology_proposal)
# check the parameters for each state
lambda_protocol = ['lambda_sterics_core','lambda_electrostatics_core','lambda_sterics_insert','lambda_electrostatics_insert','lambda_sterics_delete','lambda_electrostatics_delete']
for value in lambda_protocol:
if getattr(lambda_zero_thermodynamic_state, value) != 0.:
raise Exception('Interaction {} not set to 0. at lambda = 0. {} set to {}'.format(value,value, getattr(lambda_one_thermodynamic_state, value)))
if getattr(lambda_one_thermodynamic_state, value) != 1.:
raise Exception('Interaction {} not set to 1. at lambda = 1. {} set to {}'.format(value,value, getattr(lambda_one_thermodynamic_state, value)))
def HybridTopologyFactory_energies(current_mol = 'toluene', proposed_mol = '1,2-bis(trifluoromethyl) benzene'):
"""
Test whether the difference in the nonalchemical zero and alchemical zero states is the forward valence energy. Also test for the one states.
"""
from perses.tests.utils import generate_solvated_hybrid_test_topology, generate_endpoint_thermodynamic_states
import openmmtools.cache as cache
#Just test the solvated system
top_proposal, old_positions, _ = generate_solvated_hybrid_test_topology(current_mol_name = current_mol, proposed_mol_name = proposed_mol)
#remove the dispersion correction
top_proposal._old_system.getForce(3).setUseDispersionCorrection(False)
top_proposal._new_system.getForce(3).setUseDispersionCorrection(False)
# run geometry engine to generate old and new positions
_geometry_engine = FFAllAngleGeometryEngine(metadata=None, use_sterics=False, n_bond_divisions=100, n_angle_divisions=180, n_torsion_divisions=360, verbose=True, storage=None, bond_softening_constant=1.0, angle_softening_constant=1.0, neglect_angles = False)
_new_positions, _lp = _geometry_engine.propose(top_proposal, old_positions, beta)
_lp_rev = _geometry_engine.logp_reverse(top_proposal, _new_positions, old_positions, beta)
# make the hybrid system, reset the CustomNonbondedForce cutoff
HTF = HybridTopologyFactory(top_proposal, old_positions, _new_positions)
hybrid_system = HTF.hybrid_system
nonalch_zero, nonalch_one, alch_zero, alch_one = generate_endpoint_thermodynamic_states(hybrid_system, top_proposal)
# compute reduced energies
#for the nonalchemical systems...
attrib_list = [(nonalch_zero, old_positions, top_proposal._old_system.getDefaultPeriodicBoxVectors()),
(alch_zero, HTF._hybrid_positions, hybrid_system.getDefaultPeriodicBoxVectors()),
(alch_one, HTF._hybrid_positions, hybrid_system.getDefaultPeriodicBoxVectors()),
(nonalch_one, _new_positions, top_proposal._new_system.getDefaultPeriodicBoxVectors())]
rp_list = []
for (state, pos, box_vectors) in attrib_list:
context, integrator = cache.global_context_cache.get_context(state)
samplerstate = SamplerState(positions = pos, box_vectors = box_vectors)
samplerstate.apply_to_context(context)
rp = state.reduced_potential(context)
rp_list.append(rp)
#valence energy definitions
forward_added_valence_energy = _geometry_engine.forward_final_context_reduced_potential - _geometry_engine.forward_atoms_with_positions_reduced_potential
reverse_subtracted_valence_energy = _geometry_engine.reverse_final_context_reduced_potential - _geometry_engine.reverse_atoms_with_positions_reduced_potential
nonalch_zero_rp, alch_zero_rp, alch_one_rp, nonalch_one_rp = rp_list[0], rp_list[1], rp_list[2], rp_list[3]
# print(f"Difference between zeros: {nonalch_zero_rp - alch_zero_rp}; forward added: {forward_added_valence_energy}")
# print(f"Difference between ones: {nonalch_zero_rp - alch_zero_rp}; forward added: {forward_added_valence_energy}")
assert abs(nonalch_zero_rp - alch_zero_rp + forward_added_valence_energy) < ENERGY_THRESHOLD, f"The zero state alchemical and nonalchemical energy absolute difference {abs(nonalch_zero_rp - alch_zero_rp + forward_added_valence_energy)} is greater than the threshold of {ENERGY_THRESHOLD}."
assert abs(nonalch_one_rp - alch_one_rp + reverse_subtracted_valence_energy) < ENERGY_THRESHOLD, f"The one state alchemical and nonalchemical energy absolute difference {abs(nonalch_one_rp - alch_one_rp + reverse_subtracted_valence_energy)} is greater than the threshold of {ENERGY_THRESHOLD}."
print(f"Abs difference in zero alchemical vs nonalchemical systems: {abs(nonalch_zero_rp - alch_zero_rp + forward_added_valence_energy)}")
print(f"Abs difference in one alchemical vs nonalchemical systems: {abs(nonalch_one_rp - alch_one_rp + reverse_subtracted_valence_energy)}")
def test_HybridTopologyFactory_energies(molecule_perturbation_list = [['naphthalene', 'benzene'], ['pentane', 'propane'], ['biphenyl', 'benzene']]):
"""
Test whether the difference in the nonalchemical zero and alchemical zero states is the forward valence energy. Also test for the one states.
"""
for molecule_pair in molecule_perturbation_list:
print(f"\tconduct energy comparison for {molecule_pair[0]} --> {molecule_pair[1]}")
HybridTopologyFactory_energies(current_mol = molecule_pair[0], proposed_mol = molecule_pair[1])
| 49.824779
| 298
| 0.73802
|
4a01ed9991fec5fbb21754f21d0bde97d71a3959
| 10,096
|
py
|
Python
|
_setup/py2/make/_make.py
|
urbaniak/rjsmin
|
50edd2fbf3a4fcd75dc579cdde416927aef9124f
|
[
"Apache-2.0"
] | 11
|
2016-09-14T21:59:47.000Z
|
2019-01-28T21:58:15.000Z
|
_setup/py2/make/_make.py
|
openstack/deb-python-rcssmin
|
eda03a6cb109deed481e0bc31811e775e96c9d5e
|
[
"Apache-2.0"
] | null | null | null |
_setup/py2/make/_make.py
|
openstack/deb-python-rcssmin
|
eda03a6cb109deed481e0bc31811e775e96c9d5e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
==================
Simple make base
==================
Simple make base.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import sys as _sys
from _setup import term as _term
class Failure(SystemExit):
""" Failure exception """
def fail(reason):
""" Fail for a reason """
raise Failure(reason)
def warn(message, name=None):
""" Warn """
_term.red("%(NAME)sWarning: %(msg)s",
NAME=name and "%s:" % name or '', msg=message
)
def fatal(reason):
""" Fatal error, immediate stop """
print >> _sys.stderr, reason
_sys.exit(1)
class Target(object):
""" Target base class """
NAME = None
DEPS = None
HIDDEN = False
ERROR = None
def __init__(self, runner):
""" Base __init__ """
self.runner = runner
self.init()
def init(self):
""" Default init hook """
pass
def run(self):
""" Default run hook """
pass
def clean(self, scm=True, dist=False):
""" Default clean hook """
pass
class _Runner(object):
""" Runner """
def __init__(self, *targetscollection):
""" Initialization """
tdict = {}
if not targetscollection:
import __main__
targetscollection = [__main__]
from _setup.make import default_targets
if default_targets not in targetscollection:
targetscollection.append(default_targets)
for targets in targetscollection:
for value in vars(targets).values():
if isinstance(value, type) and issubclass(value, Target) and \
value.NAME is not None:
if value.NAME in tdict:
if issubclass(value, tdict[value.NAME]):
pass # override base target
elif issubclass(tdict[value.NAME], value):
continue # found base later. ignore
else:
warn('Ambiguous target name', value.NAME)
continue
tdict[value.NAME] = value
self._tdict = tdict
self._itdict = {}
def print_help(self):
""" Print make help """
import textwrap as _textwrap
targets = self.targetinfo()
keys = []
for key, info in targets.items():
if not info['hide']:
keys.append(key)
keys.sort()
length = max(map(len, keys))
info = []
for key in keys:
info.append("%s%s" % (
(key + " " * length)[:length + 2],
_textwrap.fill(
targets[key]['desc'].strip(),
subsequent_indent=" " * (length + 2)
),
))
print "Available targets:\n\n" + "\n".join(info)
def targetinfo(self):
""" Extract target information """
result = {}
for name, cls in self._tdict.items():
result[name] = {
'desc': cls.__doc__ or "no description",
'hide': cls.HIDDEN,
'deps': cls.DEPS or (),
}
return result
def _topleveltargets(self):
""" Find all top level targets """
rev = {} # key is a dep of [values]
all_ = self.targetinfo()
for target, info in all_.items():
for dep in info['deps']:
if dep not in all_:
fatal("Unknown target '%s' (dep of %s) -> exit" % (
dep, target
))
rev.setdefault(dep, []).append(target)
return [target for target, info in rev.items() if not info]
def _run(self, target, seen=None):
""" Run a target """
if target.DEPS:
self(*target.DEPS, **{'seen': seen})
if not target.HIDDEN:
_term.yellow(">>> %(name)s", name=target.NAME)
try:
result = target.run()
except KeyboardInterrupt:
result, target.ERROR = False, "^C -> exit"
except Failure, e:
result, target.ERROR = False, "%s: %s" % (target.NAME, e)
except (SystemExit, MemoryError):
raise
except:
import traceback
target.ERROR = "%s errored:\n%s" % (target.NAME, ''.join(
traceback.format_exception(*_sys.exc_info())
))
result = False
else:
if result is None:
result = True
return result
def _clean(self, target, scm, dist, seen=None):
""" Run a target """
if target.DEPS:
self.run_clean(
*target.DEPS, **{'scm': scm, 'dist': dist, 'seen': seen}
)
try:
result = target.clean(scm, dist)
except KeyboardInterrupt:
result, target.ERROR = False, "^C -> exit"
except Failure, e:
result, target.ERROR = False, "%s: %s" % (target.NAME, e)
except (SystemExit, MemoryError):
raise
except:
import traceback
target.ERROR = "%s errored:\n%s" % (target.NAME, ''.join(
traceback.format_exception(*_sys.exc_info())
))
result = False
else:
if result is None:
result = True
return result
def _make_init(self, seen):
""" Make init mapper """
def init(target):
""" Return initialized target """
if target not in seen:
try:
seen[target] = self._tdict[target](self)
except KeyError:
fatal("Unknown target '%s' -> exit" % target)
else:
seen[target] = None
return seen[target]
return init
def run_clean(self, *targets, **kwargs):
""" Run targets """
def pop(name, default=None):
""" Pop """
if name in kwargs:
value = kwargs[name]
del kwargs[name]
if value is None:
return default
return value
else:
return default
seen = pop('seen', {})
scm = pop('scm', True)
dist = pop('dist', False)
if kwargs:
raise TypeError('Unknown keyword parameters')
if not targets:
top_targets = self._topleveltargets()
targets = self.targetinfo()
for item in top_targets:
del targets[item]
targets = targets.keys()
targets.sort()
top_targets.sort()
targets = top_targets + targets
init = self._make_init(seen)
for name in targets:
target = init(name)
if target is not None:
if not self._clean(target, scm=scm, dist=dist, seen=seen):
msg = target.ERROR
if msg is None:
msg = "Clean target %s returned error -> exit" % name
fatal(msg)
def __call__(self, *targets, **kwargs):
""" Run targets """
if 'seen' in kwargs:
seen = kwargs['seen']
del kwargs['seen']
else:
seen = None
if seen is None:
seen = self._itdict
if kwargs:
raise TypeError('Unknown keyword parameters')
init = self._make_init(seen)
for name in targets:
target = init(name)
if target is not None:
if not self._run(target, seen):
msg = target.ERROR
if msg is None:
msg = "Target %s returned error -> exit" % name
fatal(msg)
def main(*args, **kwargs):
"""
main(argv=None, *args, name=None)
Main start point. This function parses the command line and executes the
targets given through `argv`. If there are no targets given, a help output
is generated.
:Parameters:
`argv` : sequence
Command line arguments. If omitted or ``None``, they are picked from
``sys.argv``.
`args` : ``tuple``
The list of modules with targets. If omitted, ``__main__``
is imported and treated as target module. Additionally the mechanism
always adds the `_setup.make` module (this one) to the list in order
to grab some default targets.
`name` : ``str``
Name of the executing module. If omitted or ``None``, ``'__main__'``
is assumed. If the final name is not ``'__main__'``, the function
returns immediately.
"""
try:
name = kwargs['name']
except KeyError:
name = '__main__'
else:
del kwargs['name']
if name is None:
name = '__main__'
try:
argv = kwargs['argv']
except KeyError:
if not args:
args = (None,)
else:
del kwargs['argv']
args = (argv,) + args
if kwargs:
raise TypeError("Unrecognized keyword arguments for main()")
if name == '__main__':
argv, args = args[0], args[1:]
if argv is None:
argv = _sys.argv[1:]
runner = _Runner(*args)
if argv:
runner(*argv)
else:
runner.print_help()
| 29.781711
| 78
| 0.512183
|
4a01edb012c6bb0ab5a64538417ecd8391351849
| 391
|
py
|
Python
|
compiled/construct/switch_else_only.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 4
|
2017-04-08T12:55:11.000Z
|
2020-12-05T21:09:31.000Z
|
compiled/construct/switch_else_only.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 7
|
2018-04-23T01:30:33.000Z
|
2020-10-30T23:56:14.000Z
|
compiled/construct/switch_else_only.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 6
|
2017-04-08T11:41:14.000Z
|
2020-10-30T22:47:31.000Z
|
from construct import *
from construct.lib import *
switch_else_only__data = Struct(
'value' / FixedSized(4, GreedyBytes),
)
switch_else_only = Struct(
'opcode' / Int8sb,
'prim_byte' / Switch(this.opcode, {}, default=Int8sb),
'indicator' / FixedSized(4, GreedyBytes),
'ut' / Switch(this.indicator, {}, default=LazyBound(lambda: switch_else_only__data)),
)
_schema = switch_else_only
| 24.4375
| 86
| 0.734015
|
4a01eeff6c492a3e7ffb6034acd26210d81d8655
| 1,437
|
py
|
Python
|
2017/quals/2017-re-johnny-boy/chall/generate_image.py
|
tonghuaroot/google-ctf
|
46cb7676b4f8e1957bec352e76fa8f335cee1bd5
|
[
"Apache-2.0"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
2017/quals/2017-re-johnny-boy/chall/generate_image.py
|
tonghuaroot/google-ctf
|
46cb7676b4f8e1957bec352e76fa8f335cee1bd5
|
[
"Apache-2.0"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
2017/quals/2017-re-johnny-boy/chall/generate_image.py
|
tonghuaroot/google-ctf
|
46cb7676b4f8e1957bec352e76fa8f335cee1bd5
|
[
"Apache-2.0"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import Image
import ImageDraw
except ImportError:
from PIL import Image
from PIL import ImageDraw
import sys
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
if len(sys.argv) != 2:
print 'Usage: {} <flag>'.format(sys.argv[0])
txt = 'Congratulations!\n The flag is:\n{}'.format(sys.argv[1])
image = Image.new('RGBA', (128, 64), (0, 0, 0))
draw = ImageDraw.Draw(image)
draw.text((3, 0), txt, (255, 255, 255))
res = []
for pixels in chunks(list(image.getdata()), 8):
byte = ['1' if x[0] == 255 else '0' for x in pixels]
res.append(int("".join(byte), 2))
line = '{'
for b in res:
line += '{}, '.format(hex(b))
line += '}'
with open('bitmaps_tmpl.h') as f:
content = f.read().replace('__REPLACE_WITH_FLAG__', line)
with open('bitmaps.h', 'w') as f:
f.write(content)
| 28.176471
| 74
| 0.663883
|
4a01ef4e3fdca074d9def32111ae87071a765149
| 275
|
py
|
Python
|
tests/test_merge_the_tools.py
|
yxtay/code-ex
|
aabcfd4741067a8515fe75441c10853b5724bc01
|
[
"MIT"
] | null | null | null |
tests/test_merge_the_tools.py
|
yxtay/code-ex
|
aabcfd4741067a8515fe75441c10853b5724bc01
|
[
"MIT"
] | null | null | null |
tests/test_merge_the_tools.py
|
yxtay/code-ex
|
aabcfd4741067a8515fe75441c10853b5724bc01
|
[
"MIT"
] | null | null | null |
import pytest
from src.merge_the_tools import merge_the_tools
cases = [
("AABCAAADA", 3, ["AB", "CA", "AD"]),
]
@pytest.mark.parametrize("string, k,expected", cases)
def test(string, k, expected):
result = merge_the_tools(string, k)
assert result == expected
| 19.642857
| 53
| 0.68
|
4a01f0a75a11e6b8b86a903aad114c039eab5fec
| 2,575
|
py
|
Python
|
infoblox_netmri/api/remote/models/report_job_run_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/report_job_run_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/report_job_run_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class ReportJobRunRemote(RemoteModel):
"""
Report Job information for reports that have been run on the system in the past 7 days.
| ``id:`` The internal NetMRI identifier for the report job.
| ``attribute type:`` number
| ``report_id:`` The internal NetMRI identifier for a specific report.
| ``attribute type:`` number
| ``status:`` The report running status.
| ``attribute type:`` string
| ``start_time:`` The date and time the report job started running.
| ``attribute type:`` datetime
| ``is_foreground:`` Value to indicate the report is being run in the NetMRI GUI.
| ``attribute type:`` number
| ``cancel_time:`` The date and time the report job was canceled.
| ``attribute type:`` datetime
| ``created_at:`` The date and time the report job was created.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the report job was updated.
| ``attribute type:`` datetime
| ``report_type:`` The report job type to indicate if a report was scheduled or run on demand.
| ``attribute type:`` string
| ``job_priority:`` The report job priority, lower priority are processed first.
| ``attribute type:`` number
| ``size:`` The file size of a completed report.
| ``attribute type:`` number
| ``ext_job_id:`` The system process id for the report job.
| ``attribute type:`` number
| ``auth_user_id:`` The internal NetMRI user id that created the Report Job.
| ``attribute type:`` number
| ``last_checkin:`` The date and time the report job last changed status.
| ``attribute type:`` datetime
| ``report_job_specification_id:`` The internal NetMRI identifier for the associated Report Job Specification.
| ``attribute type:`` number
"""
properties = ("id",
"report_id",
"status",
"start_time",
"is_foreground",
"cancel_time",
"created_at",
"updated_at",
"report_type",
"job_priority",
"size",
"ext_job_id",
"auth_user_id",
"last_checkin",
"report_job_specification_id",
)
| 28.932584
| 115
| 0.560388
|
4a01f0bc3138f2184e600a3be4208dbc386051e5
| 2,414
|
py
|
Python
|
setup.py
|
bbc/rd-apmm-python-lib-mediagrains
|
84c9de511cc53418c277867eaf143f2cc8730d02
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2018-03-26T23:49:34.000Z
|
2021-12-23T10:06:09.000Z
|
setup.py
|
bbc/rd-apmm-python-lib-mediagrains
|
84c9de511cc53418c277867eaf143f2cc8730d02
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-03-21T16:45:10.000Z
|
2022-03-28T13:27:34.000Z
|
setup.py
|
bbc/rd-apmm-python-lib-mediagrains
|
84c9de511cc53418c277867eaf143f2cc8730d02
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2018 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup
packages = {
'mediagrains': 'mediagrains',
'mediagrains.hypothesis': 'mediagrains/hypothesis',
'mediagrains.comparison': 'mediagrains/comparison',
'mediagrains.utils': 'mediagrains/utils',
'mediagrains.asyncio': 'mediagrains/asyncio',
'mediagrains.numpy': 'mediagrains/numpy',
'mediagrains.tools': 'mediagrains/tools',
'mediagrains.patterngenerators': 'mediagrains/patterngenerators',
'mediagrains.patterngenerators.video': 'mediagrains/patterngenerators/video',
'mediagrains.patterngenerators.audio': 'mediagrains/patterngenerators/audio'
}
packages_required = [
"mediajson >=2.0.0",
"mediatimestamp >=2.1.0",
"frozendict >= 1.2",
'numpy >= 1.17.2',
'mypy',
'deprecated >= 1.2.6',
"bitstring"
]
deps_required = []
package_names = list(packages.keys())
console_scripts = [
'wrap_video_in_gsf=mediagrains.tools:wrap_video_in_gsf',
'wrap_audio_in_gsf=mediagrains.tools:wrap_audio_in_gsf',
'extract_gsf_essence=mediagrains.tools:extract_gsf_essence',
'gsf_probe=mediagrains.tools:gsf_probe'
]
setup(name="mediagrains",
version="2.16.1",
python_requires='>=3.6.0',
description="Simple utility for grain-based media",
url='https://github.com/bbc/rd-apmm-python-lib-mediagrains',
author='James Weaver',
author_email='james.barrett@bbc.co.uk',
license='Apache 2',
packages=package_names,
package_dir=packages,
package_data={name: ['py.typed'] for name in package_names},
install_requires=packages_required,
entry_points={
'console_scripts': console_scripts
},
data_files=[],
long_description="""
Simple python library for dealing with grain data in a python-native format.
""")
| 33.068493
| 81
| 0.71251
|
4a01f2d853854df14aeb3e9d79b919cec8082bec
| 4,964
|
py
|
Python
|
cms/admin/settingsadmin.py
|
Mario-Kart-Felix/django-cms
|
6d68439fe7fd59d000f99e27c1f2135a3f9c816a
|
[
"BSD-3-Clause"
] | 1,110
|
2020-10-08T15:12:08.000Z
|
2022-03-31T23:08:37.000Z
|
cms/admin/settingsadmin.py
|
rpep/django-cms
|
53dddb106f45963f9f8393d434b4313fa3bbdf54
|
[
"BSD-3-Clause"
] | 410
|
2020-10-08T13:24:53.000Z
|
2022-03-31T23:18:04.000Z
|
cms/admin/settingsadmin.py
|
rpep/django-cms
|
53dddb106f45963f9f8393d434b4313fa3bbdf54
|
[
"BSD-3-Clause"
] | 405
|
2020-10-08T21:22:30.000Z
|
2022-03-30T20:58:14.000Z
|
import copy
import json
from functools import update_wrapper
from urllib.parse import urlparse
from django.urls import re_path
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from django.contrib.auth.admin import csrf_protect_m
from django.db import transaction
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest
from django.http.request import QueryDict
from django.utils.translation import override
from cms.admin.forms import RequestToolbarForm
from cms.models import UserSettings
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.page import get_page_from_request
from cms.utils.urlutils import admin_reverse
class SettingsAdmin(ModelAdmin):
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
return [
re_path(r'^session_store/$',
self.session_store,
name='%s_%s_session_store' % info),
re_path(r'^cms-toolbar/$',
wrap(self.get_toolbar),
name='%s_%s_get_toolbar' % info),
re_path(r'^$',
wrap(self.change_view),
name='%s_%s_change' % info),
re_path(r'^(.+)/$',
wrap(self.change_view),
name='%s_%s_change' % info),
]
@csrf_protect_m
@transaction.atomic
def change_view(self, request, id=None):
model = self.model
try:
obj = model.objects.get(user=request.user)
except model.DoesNotExist:
return self.add_view(request)
return super().change_view(request, str(obj.pk))
def session_store(self, request):
"""
either POST or GET
POST should have a settings parameter
"""
if not request.user.is_staff:
return HttpResponse(json.dumps(""),
content_type="application/json")
if request.method == "POST":
request.session['cms_settings'] = request.POST['settings']
request.session.save()
return HttpResponse(
json.dumps(request.session.get('cms_settings', '')),
content_type="application/json"
)
def get_toolbar(self, request):
form = RequestToolbarForm(request.GET or None)
if not form.is_valid():
return HttpResponseBadRequest('Invalid parameters')
form_data = form.cleaned_data
cms_path = form_data.get('cms_path') or request.path_info
origin_url = urlparse(cms_path)
attached_obj = form_data.get('attached_obj')
current_page = get_page_from_request(request, use_path=origin_url.path, clean_path=True)
if attached_obj and current_page and not (attached_obj == current_page):
return HttpResponseBadRequest('Generic object does not match current page')
data = QueryDict(query_string=origin_url.query, mutable=True)
placeholders = request.GET.getlist("placeholders[]")
if placeholders:
data.setlist('placeholders[]', placeholders)
request = copy.copy(request)
request.GET = data
request.current_page = current_page
request.toolbar = CMSToolbar(request, request_path=origin_url.path, _async=True)
request.toolbar.set_object(attached_obj or current_page)
return HttpResponse(request.toolbar.render())
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
def response_post_save_change(self, request, obj):
#
# When the user changes his language setting, we need to do two things:
# 1. Change the language-prefix for the sideframed admin view
# 2. Reload the whole window so that the new language affects the
# toolbar, etc.
#
# To do this, we first redirect the sideframe to the correct new, URL,
# but we pass a GET param 'reload_window', which instructs JS on that
# page to strip (to avoid infinite redirection loops) that param then
# reload the whole window again.
#
with override(obj.language):
post_url = admin_reverse(
'cms_usersettings_change',
args=[obj.id, ],
current_app=self.admin_site.name
)
return HttpResponseRedirect("{0}?reload_window".format(post_url))
def has_change_permission(self, request, obj=None):
if obj and obj.user == request.user:
return True
return False
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(UserSettings, SettingsAdmin)
| 35.457143
| 96
| 0.639807
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.